diff options
| -rw-r--r-- | fs/netfs/misc.c | 50 | 
1 files changed, 35 insertions, 15 deletions
| diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 69324761fcf7..c1f321cf5999 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -97,10 +97,20 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback);  void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)  {  	struct netfs_folio *finfo; +	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));  	size_t flen = folio_size(folio);  	_enter("{%lx},%zx,%zx", folio->index, offset, length); +	if (offset == 0 && length == flen) { +		unsigned long long i_size = i_size_read(&ctx->inode); +		unsigned long long fpos = folio_pos(folio), end; + +		end = umin(fpos + flen, i_size); +		if (fpos < i_size && end > ctx->zero_point) +			ctx->zero_point = end; +	} +  	folio_wait_private_2(folio); /* [DEPRECATED] */  	if (!folio_test_private(folio)) @@ -115,18 +125,34 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)  		/* We have a partially uptodate page from a streaming write. */  		unsigned int fstart = finfo->dirty_offset;  		unsigned int fend = fstart + finfo->dirty_len; -		unsigned int end = offset + length; +		unsigned int iend = offset + length;  		if (offset >= fend)  			return; -		if (end <= fstart) +		if (iend <= fstart) +			return; + +		/* The invalidation region overlaps the data.  If the region +		 * covers the start of the data, we either move along the start +		 * or just erase the data entirely. +		 */ +		if (offset <= fstart) { +			if (iend >= fend) +				goto erase_completely; +			/* Move the start of the data. */ +			finfo->dirty_len = fend - iend; +			finfo->dirty_offset = offset; +			return; +		} + +		/* Reduce the length of the data if the invalidation region +		 * covers the tail part. +		 */ +		if (iend >= fend) { +			finfo->dirty_len = offset - fstart;  			return; -		if (offset <= fstart && end >= fend) -			goto erase_completely; -		if (offset <= fstart && end > fstart) -			goto reduce_len; -		if (offset > fstart && end >= fend) -			goto move_start; +		} +  		/* A partial write was split.  The caller has already zeroed  		 * it, so just absorb the hole.  		 */ @@ -139,12 +165,6 @@ erase_completely:  	folio_clear_uptodate(folio);  	kfree(finfo);  	return; -reduce_len: -	finfo->dirty_len = offset + length - finfo->dirty_offset; -	return; -move_start: -	finfo->dirty_len -= offset - finfo->dirty_offset; -	finfo->dirty_offset = offset;  }  EXPORT_SYMBOL(netfs_invalidate_folio); @@ -164,7 +184,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)  	if (folio_test_dirty(folio))  		return false; -	end = folio_pos(folio) + folio_size(folio); +	end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));  	if (end > ctx->zero_point)  		ctx->zero_point = end; | 
