summaryrefslogtreecommitdiff
path: root/fs/dax.c
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2016-11-08 11:33:26 +1100
committerDave Chinner <david@fromorbit.com>2016-11-08 11:33:26 +1100
commit1550290b08012637e8d741a6a298ec6320dadda2 (patch)
tree077a7b7353dfbefb1ff83411f75db6a594f8668e /fs/dax.c
parent333ccc978e1e09af2690e459b6f97d8e91cc01fa (diff)
dax: dax_iomap_fault() needs to call iomap_end()
Currently iomap_end() doesn't do anything for DAX page faults for both ext2 and XFS. ext2_iomap_end() just checks for a write underrun, and xfs_file_iomap_end() checks to see if it needs to finish a delayed allocation. However, in the future iomap_end() calls might be needed to make sure we have balanced allocations, locks, etc. So, add calls to iomap_end() with appropriate error handling to dax_iomap_fault(). Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Suggested-by: Jan Kara <jack@suse.cz> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c37
1 files changed, 29 insertions, 8 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 77379546433e..6edd89b3b69c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1165,6 +1165,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap iomap = { 0 };
unsigned flags = 0;
int error, major = 0;
+ int locked_status = 0;
void *entry;
/*
@@ -1194,7 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
goto unlock_entry;
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
error = -EIO; /* fs corruption? */
- goto unlock_entry;
+ goto finish_iomap;
}
sector = dax_iomap_sector(&iomap, pos);
@@ -1216,13 +1217,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
if (error)
- goto unlock_entry;
+ goto finish_iomap;
if (!radix_tree_exceptional_entry(entry)) {
vmf->page = entry;
- return VM_FAULT_LOCKED;
+ locked_status = VM_FAULT_LOCKED;
+ } else {
+ vmf->entry = entry;
+ locked_status = VM_FAULT_DAX_LOCKED;
}
- vmf->entry = entry;
- return VM_FAULT_DAX_LOCKED;
+ goto finish_iomap;
}
switch (iomap.type) {
@@ -1237,8 +1240,10 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
- if (!(vmf->flags & FAULT_FLAG_WRITE))
- return dax_load_hole(mapping, entry, vmf);
+ if (!(vmf->flags & FAULT_FLAG_WRITE)) {
+ locked_status = dax_load_hole(mapping, entry, vmf);
+ break;
+ }
/*FALLTHRU*/
default:
WARN_ON_ONCE(1);
@@ -1246,14 +1251,30 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break;
}
+ finish_iomap:
+ if (ops->iomap_end) {
+ if (error) {
+ /* keep previous error */
+ ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
+ &iomap);
+ } else {
+ error = ops->iomap_end(inode, pos, PAGE_SIZE,
+ PAGE_SIZE, flags, &iomap);
+ }
+ }
unlock_entry:
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ if (!locked_status || error)
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error < 0 && error != -EBUSY)
return VM_FAULT_SIGBUS | major;
+ if (locked_status) {
+ WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
+ return locked_status;
+ }
return VM_FAULT_NOPAGE | major;
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);