summaryrefslogtreecommitdiff
path: root/fs/gfs2/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/file.c')
-rw-r--r--fs/gfs2/file.c401
1 files changed, 216 insertions, 185 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 3e718cfc19a7..b2d23c98c996 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/gfs2_ondisk.h>
#include <linux/falloc.h>
#include <linux/swap.h>
@@ -154,7 +155,7 @@ static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
return fsflags;
}
-int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+int gfs2_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
@@ -235,7 +236,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
goto out;
if (!IS_IMMUTABLE(inode)) {
- error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
+ error = gfs2_permission(&nop_mnt_idmap, inode, MAY_WRITE);
if (error)
goto out;
}
@@ -250,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = filemap_fdatawait(inode->i_mapping);
if (error)
goto out;
+ truncate_inode_pages(inode->i_mapping, 0);
if (new_flags & GFS2_DIF_JDATA)
gfs2_ordered_del_inode(ip);
}
@@ -259,7 +261,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
goto out_trans_end;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
gfs2_trans_add_meta(ip->i_gl, bh);
ip->i_diskflags = new_flags;
gfs2_dinode_out(ip, bh->b_data);
@@ -273,8 +275,8 @@ out:
return error;
}
-int gfs2_fileattr_set(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa)
+int gfs2_fileattr_set(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
u32 fsflags = fa->flags, gfsflags = 0;
@@ -375,23 +377,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
}
/**
- * gfs2_allocate_page_backing - Allocate blocks for a write fault
- * @page: The (locked) page to allocate backing for
+ * gfs2_allocate_folio_backing - Allocate blocks for a write fault
+ * @folio: The (locked) folio to allocate backing for
* @length: Size of the allocation
*
- * We try to allocate all the blocks required for the page in one go. This
+ * We try to allocate all the blocks required for the folio in one go. This
* might fail for various reasons, so we keep trying until all the blocks to
- * back this page are allocated. If some of the blocks are already allocated,
+ * back this folio are allocated. If some of the blocks are already allocated,
* that is ok too.
*/
-static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
+static int gfs2_allocate_folio_backing(struct folio *folio, size_t length)
{
- u64 pos = page_offset(page);
+ u64 pos = folio_pos(folio);
do {
struct iomap iomap = { };
- if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
+ if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
return -EIO;
if (length < iomap.length)
@@ -413,16 +415,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
- u64 offset = page_offset(page);
+ struct gfs2_alloc_parms ap = {};
+ u64 pos = folio_pos(folio);
unsigned int data_blocks, ind_blocks, rblocks;
vm_fault_t ret = VM_FAULT_LOCKED;
struct gfs2_holder gh;
- unsigned int length;
+ size_t length;
loff_t size;
int err;
@@ -431,27 +433,27 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
err = gfs2_glock_nq(&gh);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_uninit;
}
- /* Check page index against inode size */
+ /* Check folio index against inode size */
size = i_size_read(inode);
- if (offset >= size) {
+ if (pos >= size) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
- /* Update file times before taking page lock */
+ /* Update file times before taking folio lock */
file_update_time(vmf->vma->vm_file);
- /* page is wholly or partially inside EOF */
- if (size - offset < PAGE_SIZE)
- length = size - offset;
+ /* folio is wholly or partially inside EOF */
+ if (size - pos < folio_size(folio))
+ length = size - pos;
else
- length = PAGE_SIZE;
+ length = folio_size(folio);
- gfs2_size_hint(vmf->vma->vm_file, offset, length);
+ gfs2_size_hint(vmf->vma->vm_file, pos, length);
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -462,18 +464,19 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
*/
if (!gfs2_is_stuffed(ip) &&
- !gfs2_write_alloc_required(ip, offset, length)) {
- lock_page(page);
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ !gfs2_write_alloc_required(ip, pos, length)) {
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio) ||
+ folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
goto out_unlock;
}
err = gfs2_rindex_update(sdp);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_unlock;
}
@@ -481,12 +484,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
ap.target = data_blocks + ind_blocks;
err = gfs2_quota_lock_check(ip, &ap);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_unlock;
}
err = gfs2_inplace_reserve(ip, &ap);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_quota_unlock;
}
@@ -499,35 +502,35 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
}
err = gfs2_trans_begin(sdp, rblocks, 0);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_trans_fail;
}
- /* Unstuff, if required, and allocate backing blocks for page */
+ /* Unstuff, if required, and allocate backing blocks for folio */
if (gfs2_is_stuffed(ip)) {
err = gfs2_unstuff_dinode(ip);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_trans_end;
}
}
- lock_page(page);
+ folio_lock(folio);
/* If truncated, we must retry the operation, we may have raced
* with the glock demotion code.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
goto out_page_locked;
}
- err = gfs2_allocate_page_backing(page, length);
+ err = gfs2_allocate_folio_backing(folio, length);
if (err)
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
out_page_locked:
if (ret != VM_FAULT_LOCKED)
- unlock_page(page);
+ folio_unlock(folio);
out_trans_end:
gfs2_trans_end(sdp);
out_trans_fail:
@@ -539,8 +542,8 @@ out_unlock:
out_uninit:
gfs2_holder_uninit(&gh);
if (ret == VM_FAULT_LOCKED) {
- set_page_dirty(page);
- wait_for_stable_page(page);
+ folio_mark_dirty(folio);
+ folio_wait_stable(folio);
}
sb_end_pagefault(inode->i_sb);
return ret;
@@ -557,7 +560,7 @@ static vm_fault_t gfs2_fault(struct vm_fault *vmf)
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
err = gfs2_glock_nq(&gh);
if (err) {
- ret = block_page_mkwrite_return(err);
+ ret = vmf_fs_error(err);
goto out_uninit;
}
ret = filemap_fault(vmf);
@@ -629,6 +632,9 @@ int gfs2_open_common(struct inode *inode, struct file *file)
ret = generic_file_open(inode, file);
if (ret)
return ret;
+
+ if (!gfs2_is_jdata(GFS2_I(inode)))
+ file->f_mode |= FMODE_CAN_ODIRECT;
}
fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
@@ -704,10 +710,11 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_rs_active(&ip->i_res))
- gfs2_rs_delete(ip, &inode->i_writecount);
- if (file->f_mode & FMODE_WRITE)
+ if (file->f_mode & FMODE_WRITE) {
+ if (gfs2_rs_active(&ip->i_res))
+ gfs2_rs_delete(ip);
gfs2_qa_put(ip);
+ }
return 0;
}
@@ -737,7 +744,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- int sync_state = inode->i_state & I_DIRTY;
+ int sync_state = inode_state_read_once(inode) & I_DIRTY;
struct gfs2_inode *ip = GFS2_I(inode);
int ret = 0, ret1 = 0;
@@ -769,33 +776,35 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
return ret ? ret : ret1;
}
-static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
+static inline bool should_fault_in_pages(struct iov_iter *i,
+ struct kiocb *iocb,
size_t *prev_count,
size_t *window_size)
{
size_t count = iov_iter_count(i);
- char __user *p;
- int pages = 1;
+ size_t size, offs;
- if (likely(!count))
+ if (!count)
return false;
- if (ret <= 0 && ret != -EFAULT)
- return false;
- if (!iter_is_iovec(i))
+ if (!user_backed_iter(i))
return false;
- if (*prev_count != count || !*window_size) {
- int pages, nr_dirtied;
+ /*
+ * Try to fault in multiple pages initially. When that doesn't result
+ * in any progress, fall back to a single page.
+ */
+ size = PAGE_SIZE;
+ offs = offset_in_page(iocb->ki_pos);
+ if (*prev_count != count) {
+ size_t nr_dirtied;
- pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
nr_dirtied = max(current->nr_dirtied_pause -
- current->nr_dirtied, 1);
- pages = min(pages, nr_dirtied);
+ current->nr_dirtied, 8);
+ size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
}
*prev_count = count;
- p = i->iov[0].iov_base + i->iov_offset;
- *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
+ *window_size = size - offs;
return true;
}
@@ -805,13 +814,13 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
struct file *file = iocb->ki_filp;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
size_t prev_count = 0, window_size = 0;
- size_t written = 0;
+ size_t read = 0;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
@@ -833,35 +842,33 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
pagefault_disable();
to->nofault = true;
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
- IOMAP_DIO_PARTIAL, written);
+ IOMAP_DIO_PARTIAL, NULL, read);
to->nofault = false;
pagefault_enable();
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
+ /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
if (ret > 0)
- written = ret;
-
- if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
- size_t leftover;
+ read = ret;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_writeable(to, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(gh))
- goto retry;
- goto retry_under_glock;
- }
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ window_size -= fault_in_iov_iter_writeable(to, window_size);
+ if (window_size)
+ goto retry;
}
+out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
+ /* User space doesn't expect partial success. */
if (ret < 0)
return ret;
- return written;
+ return read;
}
static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
@@ -871,13 +878,14 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
size_t prev_count = 0, window_size = 0;
- size_t read = 0;
+ size_t written = 0;
+ bool enough_retries;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*
* For writes, iomap_dio_rw only triggers manual page faults, so we
@@ -897,41 +905,45 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
/* Silently fall back to buffered I/O when writing beyond EOF */
if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
- goto out;
+ goto out_unlock;
from->nofault = true;
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
- IOMAP_DIO_PARTIAL, read);
+ IOMAP_DIO_PARTIAL, NULL, written);
from->nofault = false;
-
- if (ret == -ENOTBLK)
- ret = 0;
+ if (ret <= 0) {
+ if (ret == -ENOTBLK)
+ ret = 0;
+ if (ret != -EFAULT)
+ goto out_unlock;
+ }
+ /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
if (ret > 0)
- read = ret;
-
- if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
- size_t leftover;
+ written = ret;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_readable(from, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(gh))
+ enough_retries = prev_count == iov_iter_count(from) &&
+ window_size <= PAGE_SIZE;
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ window_size -= fault_in_iov_iter_readable(from, window_size);
+ if (window_size) {
+ if (!enough_retries)
goto retry;
- goto retry_under_glock;
+ /* fall back to buffered I/O */
+ ret = 0;
}
}
-out:
+out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
+ /* User space doesn't expect partial success. */
if (ret < 0)
return ret;
- return read;
+ return written;
}
static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -939,30 +951,29 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct gfs2_inode *ip;
struct gfs2_holder gh;
size_t prev_count = 0, window_size = 0;
- size_t written = 0;
+ size_t read = 0;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
- if (iocb->ki_flags & IOCB_DIRECT) {
- ret = gfs2_file_direct_read(iocb, to, &gh);
- if (likely(ret != -ENOTBLK))
- return ret;
- iocb->ki_flags &= ~IOCB_DIRECT;
- }
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return gfs2_file_direct_read(iocb, to, &gh);
+
+ pagefault_disable();
iocb->ki_flags |= IOCB_NOIO;
ret = generic_file_read_iter(iocb, to);
iocb->ki_flags &= ~IOCB_NOIO;
+ pagefault_enable();
if (ret >= 0) {
if (!iov_iter_count(to))
return ret;
- written = ret;
- } else {
+ read = ret;
+ } else if (ret != -EFAULT) {
if (ret != -EAGAIN)
return ret;
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -974,33 +985,26 @@ retry:
ret = gfs2_glock_nq(&gh);
if (ret)
goto out_uninit;
-retry_under_glock:
pagefault_disable();
ret = generic_file_read_iter(iocb, to);
pagefault_enable();
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
if (ret > 0)
- written += ret;
-
- if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
- size_t leftover;
+ read += ret;
- gfs2_holder_allow_demote(&gh);
- leftover = fault_in_iov_iter_writeable(to, window_size);
- gfs2_holder_disallow_demote(&gh);
- if (leftover != window_size) {
- if (!gfs2_holder_queued(&gh)) {
- if (written)
- goto out_uninit;
- goto retry;
- }
- goto retry_under_glock;
- }
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(&gh);
+ window_size -= fault_in_iov_iter_writeable(to, window_size);
+ if (window_size)
+ goto retry;
}
+out_unlock:
if (gfs2_holder_queued(&gh))
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
- return written ? written : ret;
+ return read ? read : ret;
}
static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
@@ -1014,13 +1018,13 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
struct gfs2_holder *statfs_gh = NULL;
size_t prev_count = 0, window_size = 0;
size_t orig_count = iov_iter_count(from);
- size_t read = 0;
+ size_t written = 0;
ssize_t ret;
/*
* In this function, we disable page faults when we're holding the
* inode glock while doing I/O. If a page fault occurs, we indicate
- * that the inode glock may be dropped, fault in the pages manually,
+ * that the inode glock should be dropped, fault in the pages manually,
* and retry.
*/
@@ -1031,11 +1035,19 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
}
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
retry:
+ window_size -= fault_in_iov_iter_readable(from, window_size);
+ if (!window_size) {
+ ret = -EFAULT;
+ goto out_uninit;
+ }
+ from->count = min(from->count, window_size);
+ }
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
+
if (inode == sdp->sd_rindex) {
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -1045,44 +1057,32 @@ retry_under_glock:
goto out_unlock;
}
- current->backing_dev_info = inode_to_bdi(inode);
pagefault_disable();
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
+ &gfs2_iomap_write_ops, NULL);
pagefault_enable();
- current->backing_dev_info = NULL;
- if (ret > 0) {
- iocb->ki_pos += ret;
- read += ret;
- }
+ if (ret > 0)
+ written += ret;
if (inode == sdp->sd_rindex)
gfs2_glock_dq_uninit(statfs_gh);
- from->count = orig_count - read;
- if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
- size_t leftover;
-
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_readable(from, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- from->count = min(from->count, window_size - leftover);
- if (!gfs2_holder_queued(gh)) {
- if (read)
- goto out_uninit;
- goto retry;
- }
- goto retry_under_glock;
- }
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
+
+ from->count = orig_count - written;
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ goto retry;
}
out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
- if (statfs_gh)
- kfree(statfs_gh);
- return read ? read : ret;
+ kfree(statfs_gh);
+ from->count = orig_count - written;
+ return written ? written : ret;
}
/**
@@ -1123,14 +1123,16 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret)
goto out_unlock;
- ret = file_update_time(file);
- if (ret)
- goto out_unlock;
-
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
ssize_t buffered, ret2;
+ /*
+ * Note that under direct I/O, we don't allow and inode
+ * timestamp updates, so we're not calling file_update_time()
+ * here.
+ */
+
ret = gfs2_file_direct_write(iocb, from, &gh);
if (ret < 0 || !iov_iter_count(from))
goto out_unlock;
@@ -1157,6 +1159,10 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!ret || ret2 > 0)
ret += ret2;
} else {
+ ret = file_update_time(file);
+ if (ret)
+ goto out_unlock;
+
ret = gfs2_file_buffered_write(iocb, from, &gh);
if (likely(ret > 0))
ret = generic_write_sync(iocb, ret);
@@ -1248,7 +1254,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
+ struct gfs2_alloc_parms ap = {};
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes, max_blks;
int error;
@@ -1436,25 +1442,44 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int ret;
- if (!(fl->fl_flags & FL_POSIX))
+ if (!(fl->c.flc_flags & FL_POSIX))
return -ENOLCK;
- if (cmd == F_CANCELLK) {
- /* Hack: */
- cmd = F_SETLK;
- fl->fl_type = F_UNLCK;
- }
- if (unlikely(gfs2_withdrawn(sdp))) {
- if (fl->fl_type == F_UNLCK)
+ if (gfs2_withdrawn(sdp)) {
+ if (lock_is_unlock(fl))
locks_lock_file_wait(file, fl);
return -EIO;
}
- if (IS_GETLK(cmd))
- return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
- else if (fl->fl_type == F_UNLCK)
- return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
- else
- return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ down_read(&ls->ls_sem);
+ ret = -ENODEV;
+ if (likely(ls->ls_dlm != NULL)) {
+ if (cmd == F_CANCELLK)
+ ret = dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (IS_GETLK(cmd))
+ ret = dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (lock_is_unlock(fl))
+ ret = dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else
+ ret = dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
+ }
+ up_read(&ls->ls_sem);
+ return ret;
+}
+
+static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh)
+{
+ struct gfs2_glock *gl = gfs2_glock_hold(fl_gh->gh_gl);
+
+ /*
+ * Make sure gfs2_glock_put() won't sleep under the file->f_lock
+ * spinlock.
+ */
+
+ spin_lock(&file->f_lock);
+ gfs2_holder_uninit(fl_gh);
+ spin_unlock(&file->f_lock);
+ gfs2_glock_put(gl);
}
static int do_flock(struct file *file, int cmd, struct file_lock *fl)
@@ -1468,8 +1493,10 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0;
int sleeptime;
- state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
+ state = lock_is_write(fl) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
+ flags = GL_EXACT | GL_NOPID;
+ if (!IS_SETLKW(cmd))
+ flags |= LM_FLAG_TRY_1CB;
mutex_lock(&fp->f_fl_mutex);
@@ -1478,8 +1505,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
if (fl_gh->gh_state == state)
goto out;
locks_init_lock(&request);
- request.fl_type = F_UNLCK;
- request.fl_flags = FL_FLOCK;
+ request.c.flc_type = F_UNLCK;
+ request.c.flc_flags = FL_FLOCK;
locks_lock_file_wait(file, &request);
gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
@@ -1488,19 +1515,21 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
&gfs2_flock_glops, CREATE, &gl);
if (error)
goto out;
+ spin_lock(&file->f_lock);
gfs2_holder_init(gl, state, flags, fl_gh);
+ spin_unlock(&file->f_lock);
gfs2_glock_put(gl);
}
for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
error = gfs2_glock_nq(fl_gh);
if (error != GLR_TRYFAILED)
break;
- fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
- fl_gh->gh_error = 0;
+ fl_gh->gh_flags &= ~LM_FLAG_TRY_1CB;
+ fl_gh->gh_flags |= LM_FLAG_TRY;
msleep(sleeptime);
}
if (error) {
- gfs2_holder_uninit(fl_gh);
+ __flock_holder_uninit(file, fl_gh);
if (error == GLR_TRYFAILED)
error = -EAGAIN;
} else {
@@ -1522,7 +1551,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
locks_lock_file_wait(file, fl);
if (gfs2_holder_initialized(fl_gh)) {
gfs2_glock_dq(fl_gh);
- gfs2_holder_uninit(fl_gh);
+ __flock_holder_uninit(file, fl_gh);
}
mutex_unlock(&fp->f_fl_mutex);
}
@@ -1538,10 +1567,10 @@ static void do_unflock(struct file *file, struct file_lock *fl)
static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
{
- if (!(fl->fl_flags & FL_FLOCK))
+ if (!(fl->c.flc_flags & FL_FLOCK))
return -ENOLCK;
- if (fl->fl_type == F_UNLCK) {
+ if (lock_is_unlock(fl)) {
do_unflock(file, fl);
return 0;
} else {
@@ -1562,10 +1591,11 @@ const struct file_operations gfs2_file_fops = {
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = simple_nosetlease,
.fallocate = gfs2_fallocate,
+ .fop_flags = FOP_ASYNC_LOCK,
};
const struct file_operations gfs2_dir_fops = {
@@ -1578,6 +1608,7 @@ const struct file_operations gfs2_dir_fops = {
.lock = gfs2_lock,
.flock = gfs2_flock,
.llseek = default_llseek,
+ .fop_flags = FOP_ASYNC_LOCK,
};
#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
@@ -1593,7 +1624,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = gfs2_file_splice_write,
.setlease = generic_setlease,
.fallocate = gfs2_fallocate,