summaryrefslogtreecommitdiff
path: root/fs/overlayfs
diff options
context:
space:
mode:
authorChengguang Xu <cgxu519@mykernel.net>2019-11-01 20:35:51 +0800
committerMiklos Szeredi <mszeredi@redhat.com>2020-01-22 20:11:41 +0100
commitb504c6540d1752c73e16548062c49bc9f447cb12 (patch)
tree4e18011188b62d98bcaa07164cd24dafc69338a3 /fs/overlayfs
parentb1f9d3858f724ed45b279b689fb5b400d91352e3 (diff)
ovl: improving copy-up efficiency for big sparse file
Current copy-up is not efficient for big sparse file, It's not only slow but also wasting more disk space when the target lower file has huge hole inside. This patch tries to recognize file hole and skip it during copy-up. Detail logic of hole detection as below: When we detect next data position is larger than current position we will skip that hole, otherwise we copy data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually, it may not recognize all kind of holes and sometimes only skips partial of hole area. However, it will be enough for most of the use cases. Additionally, this optimization relies on lseek(2) SEEK_DATA implementation, so for some specific filesystems which do not support this feature will behave as before on copy-up. Reviewed-by: Amir Goldstein <amir73il@gmail.com> Signed-off-by: Chengguang Xu <cgxu519@mykernel.net> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Diffstat (limited to 'fs/overlayfs')
-rw-r--r--fs/overlayfs/copy_up.c41
1 files changed, 39 insertions, 2 deletions
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index b168c65666e2..9fc47c2e078d 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -123,6 +123,9 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
loff_t old_pos = 0;
loff_t new_pos = 0;
loff_t cloned;
+ loff_t data_pos = -1;
+ loff_t hole_len;
+ bool skip_hole = false;
int error = 0;
if (len == 0)
@@ -144,7 +147,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
goto out;
/* Couldn't clone, so now we try to copy the data */
- /* FIXME: copy up sparse files efficiently */
+ /* Check if lower fs supports seek operation */
+ if (old_file->f_mode & FMODE_LSEEK &&
+ old_file->f_op->llseek)
+ skip_hole = true;
+
while (len) {
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
long bytes;
@@ -157,6 +164,36 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
break;
}
+ /*
+ * Fill zero for hole will cost unnecessary disk space
+ * and meanwhile slow down the copy-up speed, so we do
+ * an optimization for hole during copy-up, it relies
+ * on SEEK_DATA implementation in lower fs so if lower
+ * fs does not support it, copy-up will behave as before.
+ *
+ * Detail logic of hole detection as below:
+ * When we detect next data position is larger than current
+ * position we will skip that hole, otherwise we copy
+ * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
+ * it may not recognize all kind of holes and sometimes
+ * only skips partial of hole area. However, it will be
+ * enough for most of the use cases.
+ */
+
+ if (skip_hole && data_pos < old_pos) {
+ data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
+ if (data_pos > old_pos) {
+ hole_len = data_pos - old_pos;
+ len -= hole_len;
+ old_pos = new_pos = data_pos;
+ continue;
+ } else if (data_pos == -ENXIO) {
+ break;
+ } else if (data_pos < 0) {
+ skip_hole = false;
+ }
+ }
+
bytes = do_splice_direct(old_file, &old_pos,
new_file, &new_pos,
this_len, SPLICE_F_MOVE);
@@ -480,7 +517,7 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
}
inode_lock(temp->d_inode);
- if (c->metacopy)
+ if (S_ISREG(c->stat.mode))
err = ovl_set_size(temp, &c->stat);
if (!err)
err = ovl_set_attr(temp, &c->stat);