diff options
Diffstat (limited to 'fs/kernfs/file.c')
| -rw-r--r-- | fs/kernfs/file.c | 434 |
1 files changed, 252 insertions, 182 deletions
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 9414a7a60a9f..9adf36e6364b 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -18,25 +18,13 @@ #include "kernfs-internal.h" -/* - * There's one kernfs_open_file for each open file and one kernfs_open_node - * for each kernfs_node with one or more open files. - * - * kernfs_node->attr.open points to kernfs_open_node. attr.open is - * protected by kernfs_open_node_lock. - * - * filp->private_data points to seq_file whose ->private points to - * kernfs_open_file. kernfs_open_files are chained at - * kernfs_open_node->files, which is protected by kernfs_open_file_mutex. - */ -static DEFINE_SPINLOCK(kernfs_open_node_lock); -static DEFINE_MUTEX(kernfs_open_file_mutex); - struct kernfs_open_node { - atomic_t refcnt; + struct rcu_head rcu_head; atomic_t event; wait_queue_head_t poll; struct list_head files; /* goes through kernfs_open_file.list */ + unsigned int nr_mmapped; + unsigned int nr_to_release; }; /* @@ -45,13 +33,85 @@ struct kernfs_open_node { * pending queue is implemented as a singly linked list of kernfs_nodes. * The list is terminated with the self pointer so that whether a * kernfs_node is on the list or not can be determined by testing the next - * pointer for NULL. + * pointer for %NULL. */ #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list) static DEFINE_SPINLOCK(kernfs_notify_lock); static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL; +static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn) +{ + int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS); + + return &kernfs_locks->open_file_mutex[idx]; +} + +static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn) +{ + struct mutex *lock; + + lock = kernfs_open_file_mutex_ptr(kn); + + mutex_lock(lock); + + return lock; +} + +/** + * of_on - Get the kernfs_open_node of the specified kernfs_open_file + * @of: target kernfs_open_file + * + * Return: the kernfs_open_node of the kernfs_open_file + */ +static struct kernfs_open_node *of_on(struct kernfs_open_file *of) +{ + return rcu_dereference_protected(of->kn->attr.open, + !list_empty(&of->list)); +} + +/* Get active reference to kernfs node for an open file */ +static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of) +{ + /* Skip if file was already released */ + if (unlikely(of->released)) + return NULL; + + if (!kernfs_get_active(of->kn)) + return NULL; + + return of; +} + +static void kernfs_put_active_of(struct kernfs_open_file *of) +{ + return kernfs_put_active(of->kn); +} + +/** + * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn + * + * @kn: target kernfs_node. + * + * Fetch and return ->attr.open of @kn when caller holds the + * kernfs_open_file_mutex_ptr(kn). + * + * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when + * the caller guarantees that this mutex is being held, other updaters can't + * change ->attr.open and this means that we can safely deref ->attr.open + * outside RCU read-side critical section. + * + * The caller needs to make sure that kernfs_open_file_mutex is held. + * + * Return: @kn->attr.open when kernfs_open_file_mutex is held. + */ +static struct kernfs_open_node * +kernfs_deref_open_node_locked(struct kernfs_node *kn) +{ + return rcu_dereference_protected(kn->attr.open, + lockdep_is_held(kernfs_open_file_mutex_ptr(kn))); +} + static struct kernfs_open_file *kernfs_of(struct file *file) { return ((struct seq_file *)file->private_data)->private; @@ -97,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, void *v) if (ops->seq_stop) ops->seq_stop(sf, v); - kernfs_put_active(of->kn); + kernfs_put_active_of(of); } static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) @@ -110,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); - if (!kernfs_get_active(of->kn)) + if (!kernfs_get_active_of(of)) return ERR_PTR(-ENODEV); ops = kernfs_ops(of->kn); @@ -120,13 +180,8 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) if (next == ERR_PTR(-ENODEV)) kernfs_seq_stop_active(sf, next); return next; - } else { - /* - * The same behavior and code as single_open(). Returns - * !NULL if pos is at the beginning; otherwise, NULL. - */ - return NULL + !*ppos; } + return single_start(sf, ppos); } static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos) @@ -163,7 +218,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v) { struct kernfs_open_file *of = sf->private; - of->event = atomic_read(&of->kn->attr.open->event); + of->event = atomic_read(&of_on(of)->event); return of->kn->attr.ops->seq_show(sf, v); } @@ -201,20 +256,21 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); - if (!kernfs_get_active(of->kn)) { + if (!kernfs_get_active_of(of)) { len = -ENODEV; mutex_unlock(&of->mutex); goto out_free; } - of->event = atomic_read(&of->kn->attr.open->event); + of->event = atomic_read(&of_on(of)->event); + ops = kernfs_ops(of->kn); if (ops->read) len = ops->read(of, buf, len, iocb->ki_pos); else len = -EINVAL; - kernfs_put_active(of->kn); + kernfs_put_active_of(of); mutex_unlock(&of->mutex); if (len < 0) @@ -249,7 +305,7 @@ static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter) * There is no easy way for us to know if userspace is only doing a partial * write, so we don't support them. We expect the entire buffer to come on * the first write. Hint: if you're writing a value, first read the file, - * modify only the the value you're changing, then write entire buffer + * modify only the value you're changing, then write entire buffer * back. */ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) @@ -285,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); - if (!kernfs_get_active(of->kn)) { + if (!kernfs_get_active_of(of)) { mutex_unlock(&of->mutex); len = -ENODEV; goto out_free; @@ -297,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) else len = -EINVAL; - kernfs_put_active(of->kn); + kernfs_put_active_of(of); mutex_unlock(&of->mutex); if (len > 0) @@ -319,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma) if (!of->vm_ops) return; - if (!kernfs_get_active(of->kn)) + if (!kernfs_get_active_of(of)) return; if (of->vm_ops->open) of->vm_ops->open(vma); - kernfs_put_active(of->kn); + kernfs_put_active_of(of); } static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) @@ -337,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) if (!of->vm_ops) return VM_FAULT_SIGBUS; - if (!kernfs_get_active(of->kn)) + if (!kernfs_get_active_of(of)) return VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS; if (of->vm_ops->fault) ret = of->vm_ops->fault(vmf); - kernfs_put_active(of->kn); + kernfs_put_active_of(of); return ret; } @@ -357,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) if (!of->vm_ops) return VM_FAULT_SIGBUS; - if (!kernfs_get_active(of->kn)) + if (!kernfs_get_active_of(of)) return VM_FAULT_SIGBUS; ret = 0; @@ -366,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) else file_update_time(file); - kernfs_put_active(of->kn); + kernfs_put_active_of(of); return ret; } @@ -380,71 +436,22 @@ static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, if (!of->vm_ops) return -EINVAL; - if (!kernfs_get_active(of->kn)) + if (!kernfs_get_active_of(of)) return -EINVAL; ret = -EINVAL; if (of->vm_ops->access) ret = of->vm_ops->access(vma, addr, buf, len, write); - kernfs_put_active(of->kn); + kernfs_put_active_of(of); return ret; } -#ifdef CONFIG_NUMA -static int kernfs_vma_set_policy(struct vm_area_struct *vma, - struct mempolicy *new) -{ - struct file *file = vma->vm_file; - struct kernfs_open_file *of = kernfs_of(file); - int ret; - - if (!of->vm_ops) - return 0; - - if (!kernfs_get_active(of->kn)) - return -EINVAL; - - ret = 0; - if (of->vm_ops->set_policy) - ret = of->vm_ops->set_policy(vma, new); - - kernfs_put_active(of->kn); - return ret; -} - -static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma, - unsigned long addr) -{ - struct file *file = vma->vm_file; - struct kernfs_open_file *of = kernfs_of(file); - struct mempolicy *pol; - - if (!of->vm_ops) - return vma->vm_policy; - - if (!kernfs_get_active(of->kn)) - return vma->vm_policy; - - pol = vma->vm_policy; - if (of->vm_ops->get_policy) - pol = of->vm_ops->get_policy(vma, addr); - - kernfs_put_active(of->kn); - return pol; -} - -#endif - static const struct vm_operations_struct kernfs_vm_ops = { .open = kernfs_vma_open, .fault = kernfs_vma_fault, .page_mkwrite = kernfs_vma_page_mkwrite, .access = kernfs_vma_access, -#ifdef CONFIG_NUMA - .set_policy = kernfs_vma_set_policy, - .get_policy = kernfs_vma_get_policy, -#endif }; static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) @@ -458,7 +465,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) * warnings and we don't want to add spurious locking dependency * between the two. Check whether mmap is actually implemented * without grabbing @of->mutex by testing HAS_MMAP flag. See the - * comment in kernfs_file_open() for more details. + * comment in kernfs_fop_open() for more details. */ if (!(of->kn->flags & KERNFS_HAS_MMAP)) return -ENODEV; @@ -466,7 +473,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) mutex_lock(&of->mutex); rc = -ENODEV; - if (!kernfs_get_active(of->kn)) + if (!kernfs_get_active_of(of)) goto out_unlock; ops = kernfs_ops(of->kn); @@ -490,16 +497,18 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) * It is not possible to successfully wrap close. * So error if someone is trying to use close. */ - rc = -EINVAL; if (vma->vm_ops && vma->vm_ops->close) goto out_put; rc = 0; - of->mmapped = true; - of->vm_ops = vma->vm_ops; + if (!of->mmapped) { + of->mmapped = true; + of_on(of)->nr_mmapped++; + of->vm_ops = vma->vm_ops; + } vma->vm_ops = &kernfs_vm_ops; out_put: - kernfs_put_active(of->kn); + kernfs_put_active_of(of); out_unlock: mutex_unlock(&of->mutex); @@ -514,84 +523,88 @@ out_unlock: * If @kn->attr.open exists, increment its reference count; otherwise, * create one. @of is chained to the files list. * - * LOCKING: + * Locking: * Kernel thread context (may sleep). * - * RETURNS: - * 0 on success, -errno on failure. + * Return: + * %0 on success, -errno on failure. */ static int kernfs_get_open_node(struct kernfs_node *kn, struct kernfs_open_file *of) { - struct kernfs_open_node *on, *new_on = NULL; - - retry: - mutex_lock(&kernfs_open_file_mutex); - spin_lock_irq(&kernfs_open_node_lock); + struct kernfs_open_node *on; + struct mutex *mutex; - if (!kn->attr.open && new_on) { - kn->attr.open = new_on; - new_on = NULL; - } + mutex = kernfs_open_file_mutex_lock(kn); + on = kernfs_deref_open_node_locked(kn); - on = kn->attr.open; - if (on) { - atomic_inc(&on->refcnt); - list_add_tail(&of->list, &on->files); + if (!on) { + /* not there, initialize a new one */ + on = kzalloc(sizeof(*on), GFP_KERNEL); + if (!on) { + mutex_unlock(mutex); + return -ENOMEM; + } + atomic_set(&on->event, 1); + init_waitqueue_head(&on->poll); + INIT_LIST_HEAD(&on->files); + rcu_assign_pointer(kn->attr.open, on); } - spin_unlock_irq(&kernfs_open_node_lock); - mutex_unlock(&kernfs_open_file_mutex); + list_add_tail(&of->list, &on->files); + if (kn->flags & KERNFS_HAS_RELEASE) + on->nr_to_release++; - if (on) { - kfree(new_on); - return 0; - } - - /* not there, initialize a new one and retry */ - new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); - if (!new_on) - return -ENOMEM; - - atomic_set(&new_on->refcnt, 0); - atomic_set(&new_on->event, 1); - init_waitqueue_head(&new_on->poll); - INIT_LIST_HEAD(&new_on->files); - goto retry; + mutex_unlock(mutex); + return 0; } /** - * kernfs_put_open_node - put kernfs_open_node - * @kn: target kernfs_nodet + * kernfs_unlink_open_file - Unlink @of from @kn. + * + * @kn: target kernfs_node * @of: associated kernfs_open_file + * @open_failed: ->open() failed, cancel ->release() * - * Put @kn->attr.open and unlink @of from the files list. If - * reference count reaches zero, disassociate and free it. + * Unlink @of from list of @kn's associated open files. If list of + * associated open files becomes empty, disassociate and free + * kernfs_open_node. * * LOCKING: * None. */ -static void kernfs_put_open_node(struct kernfs_node *kn, - struct kernfs_open_file *of) +static void kernfs_unlink_open_file(struct kernfs_node *kn, + struct kernfs_open_file *of, + bool open_failed) { - struct kernfs_open_node *on = kn->attr.open; - unsigned long flags; + struct kernfs_open_node *on; + struct mutex *mutex; - mutex_lock(&kernfs_open_file_mutex); - spin_lock_irqsave(&kernfs_open_node_lock, flags); + mutex = kernfs_open_file_mutex_lock(kn); - if (of) - list_del(&of->list); + on = kernfs_deref_open_node_locked(kn); + if (!on) { + mutex_unlock(mutex); + return; + } - if (atomic_dec_and_test(&on->refcnt)) - kn->attr.open = NULL; - else - on = NULL; + if (of) { + if (kn->flags & KERNFS_HAS_RELEASE) { + WARN_ON_ONCE(of->released == open_failed); + if (open_failed) + on->nr_to_release--; + } + if (of->mmapped) + on->nr_mmapped--; + list_del(&of->list); + } - spin_unlock_irqrestore(&kernfs_open_node_lock, flags); - mutex_unlock(&kernfs_open_file_mutex); + if (list_empty(&on->files)) { + rcu_assign_pointer(kn->attr.open, NULL); + kfree_rcu(on, rcu_head); + } - kfree(on); + mutex_unlock(mutex); } static int kernfs_fop_open(struct inode *inode, struct file *file) @@ -641,11 +654,18 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) * each file a separate locking class. Let's differentiate on * whether the file has mmap or not for now. * - * Both paths of the branch look the same. They're supposed to + * For similar reasons, writable and readonly files are given different + * lockdep key, because the writable file /sys/power/resume may call vfs + * lookup helpers for arbitrary paths and readonly files can be read by + * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs. + * + * All three cases look the same. They're supposed to * look that way and give @of->mutex different static lockdep keys. */ if (has_mmap) mutex_init(&of->mutex); + else if (file->f_mode & FMODE_WRITE) + mutex_init(&of->mutex); else mutex_init(&of->mutex); @@ -711,7 +731,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) return 0; err_put_node: - kernfs_put_open_node(kn, of); + kernfs_unlink_open_file(kn, of, true); err_seq_release: seq_release(inode, file); err_free: @@ -729,11 +749,11 @@ static void kernfs_release_file(struct kernfs_node *kn, /* * @of is guaranteed to have no other file operations in flight and * we just want to synchronize release and drain paths. - * @kernfs_open_file_mutex is enough. @of->mutex can't be used + * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used * here because drain path may be called from places which can * cause circular dependency. */ - lockdep_assert_held(&kernfs_open_file_mutex); + lockdep_assert_held(kernfs_open_file_mutex_ptr(kn)); if (!of->released) { /* @@ -743,6 +763,7 @@ static void kernfs_release_file(struct kernfs_node *kn, */ kn->attr.ops->release(of); of->released = true; + of_on(of)->nr_to_release--; } } @@ -752,12 +773,14 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) struct kernfs_open_file *of = kernfs_of(filp); if (kn->flags & KERNFS_HAS_RELEASE) { - mutex_lock(&kernfs_open_file_mutex); + struct mutex *mutex; + + mutex = kernfs_open_file_mutex_lock(kn); kernfs_release_file(kn, of); - mutex_unlock(&kernfs_open_file_mutex); + mutex_unlock(mutex); } - kernfs_put_open_node(kn, of); + kernfs_unlink_open_file(kn, of, false); seq_release(inode, filp); kfree(of->prealloc_buf); kfree(of); @@ -765,37 +788,54 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) return 0; } +bool kernfs_should_drain_open_files(struct kernfs_node *kn) +{ + struct kernfs_open_node *on; + bool ret; + + /* + * @kn being deactivated guarantees that @kn->attr.open can't change + * beneath us making the lockless test below safe. + * Callers post kernfs_unbreak_active_protection may be counted in + * kn->active by now, do not WARN_ON because of them. + */ + + rcu_read_lock(); + on = rcu_dereference(kn->attr.open); + ret = on && (on->nr_mmapped || on->nr_to_release); + rcu_read_unlock(); + + return ret; +} + void kernfs_drain_open_files(struct kernfs_node *kn) { struct kernfs_open_node *on; struct kernfs_open_file *of; + struct mutex *mutex; - if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) + mutex = kernfs_open_file_mutex_lock(kn); + on = kernfs_deref_open_node_locked(kn); + if (!on) { + mutex_unlock(mutex); return; - - spin_lock_irq(&kernfs_open_node_lock); - on = kn->attr.open; - if (on) - atomic_inc(&on->refcnt); - spin_unlock_irq(&kernfs_open_node_lock); - if (!on) - return; - - mutex_lock(&kernfs_open_file_mutex); + } list_for_each_entry(of, &on->files, list) { struct inode *inode = file_inode(of->file); - if (kn->flags & KERNFS_HAS_MMAP) + if (of->mmapped) { unmap_mapping_range(inode->i_mapping, 0, 0, 1); + of->mmapped = false; + on->nr_mmapped--; + } if (kn->flags & KERNFS_HAS_RELEASE) kernfs_release_file(kn, of); } - mutex_unlock(&kernfs_open_file_mutex); - - kernfs_put_open_node(kn, NULL); + WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release); + mutex_unlock(mutex); } /* @@ -814,8 +854,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn) */ __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait) { - struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry); - struct kernfs_open_node *on = kn->attr.open; + struct kernfs_open_node *on = of_on(of); poll_wait(of->file, &on->poll, wait); @@ -831,7 +870,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); __poll_t ret; - if (!kernfs_get_active(kn)) + if (!kernfs_get_active_of(of)) return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; if (kn->attr.ops->poll) @@ -839,7 +878,34 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) else ret = kernfs_generic_poll(of, wait); - kernfs_put_active(kn); + kernfs_put_active_of(of); + return ret; +} + +static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence) +{ + struct kernfs_open_file *of = kernfs_of(file); + const struct kernfs_ops *ops; + loff_t ret; + + /* + * @of->mutex nests outside active ref and is primarily to ensure that + * the ops aren't called concurrently for the same open file. + */ + mutex_lock(&of->mutex); + if (!kernfs_get_active_of(of)) { + mutex_unlock(&of->mutex); + return -ENODEV; + } + + ops = kernfs_ops(of->kn); + if (ops->llseek) + ret = ops->llseek(of, offset, whence); + else + ret = generic_file_llseek(file, offset, whence); + + kernfs_put_active_of(of); + mutex_unlock(&of->mutex); return ret; } @@ -862,11 +928,13 @@ repeat: root = kernfs_root(kn); /* kick fsnotify */ - down_write(&root->kernfs_rwsem); + down_read(&root->kernfs_supers_rwsem); + down_read(&root->kernfs_rwsem); list_for_each_entry(info, &kernfs_root(kn)->supers, node) { struct kernfs_node *parent; struct inode *p_inode = NULL; + const char *kn_name; struct inode *inode; struct qstr name; @@ -880,7 +948,8 @@ repeat: if (!inode) continue; - name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); + kn_name = kernfs_rcu_name(kn); + name = QSTR(kn_name); parent = kernfs_get_parent(kn); if (parent) { p_inode = ilookup(info->sb, kernfs_ino(parent)); @@ -900,7 +969,8 @@ repeat: iput(inode); } - up_write(&root->kernfs_rwsem); + up_read(&root->kernfs_rwsem); + up_read(&root->kernfs_supers_rwsem); kernfs_put(kn); goto repeat; } @@ -922,13 +992,13 @@ void kernfs_notify(struct kernfs_node *kn) return; /* kick poll immediately */ - spin_lock_irqsave(&kernfs_open_node_lock, flags); - on = kn->attr.open; + rcu_read_lock(); + on = rcu_dereference(kn->attr.open); if (on) { atomic_inc(&on->event); wake_up_interruptible(&on->poll); } - spin_unlock_irqrestore(&kernfs_open_node_lock, flags); + rcu_read_unlock(); /* schedule work to kick fsnotify */ spin_lock_irqsave(&kernfs_notify_lock, flags); @@ -945,13 +1015,13 @@ EXPORT_SYMBOL_GPL(kernfs_notify); const struct file_operations kernfs_file_fops = { .read_iter = kernfs_fop_read_iter, .write_iter = kernfs_fop_write_iter, - .llseek = generic_file_llseek, + .llseek = kernfs_fop_llseek, .mmap = kernfs_fop_mmap, .open = kernfs_fop_open, .release = kernfs_fop_release, .poll = kernfs_fop_poll, .fsync = noop_fsync, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; @@ -968,7 +1038,7 @@ const struct file_operations kernfs_file_fops = { * @ns: optional namespace tag of the file * @key: lockdep key for the file's active_ref, %NULL to disable lockdep * - * Returns the created node on success, ERR_PTR() value on error. + * Return: the created node on success, ERR_PTR() value on error. */ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, const char *name, @@ -1002,7 +1072,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, #endif /* - * kn->attr.ops is accesible only while holding active ref. We + * kn->attr.ops is accessible only while holding active ref. We * need to know whether some ops are implemented outside active * ref. Cache their existence in flags. */ |
