summaryrefslogtreecommitdiff
path: root/fs/ext4/page-io.c
diff options
context:
space:
mode:
authorCurt Wohlgemuth <curtw@google.com>2011-04-30 13:26:26 -0400
committerTheodore Ts'o <tytso@mit.edu>2011-04-30 13:26:26 -0400
commit39db00f1c45e770856264bdb3ceca27980b01965 (patch)
treea06fcf532d9bbb06395016a653840efa7436c6bf /fs/ext4/page-io.c
parent2035e776050aea57fb5255557216473e82793f2c (diff)
ext4: don't set PageUptodate in ext4_end_bio()
In the bio completion routine, we should not be setting PageUptodate at all -- it's set at sys_write() time, and is unaffected by success/failure of the write to disk. This can cause a page corruption bug when the file system's block size is less than the architecture's VM page size. if we have only written a single block -- we might end up setting the page's PageUptodate flag, indicating that page is completely read into memory, which may not be true. This could cause subsequent reads to get bad data. This commit also takes the opportunity to clean up error handling in ext4_end_bio(), and remove some extraneous code: - fixes ext4_end_bio() to set AS_EIO in the page->mapping->flags on error, which was left out by mistake. This is needed so that fsync() will return an error if there was an I/O error. - remove the clear_buffer_dirty() call on unmapped buffers for each page. - consolidate page/buffer error handling in a single section. Signed-off-by: Curt Wohlgemuth <curtw@google.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Reported-by: Jim Meyering <jim@meyering.net> Reported-by: Hugh Dickins <hughd@google.com> Cc: Mingming Cao <cmm@us.ibm.com>
Diffstat (limited to 'fs/ext4/page-io.c')
-rw-r--r--fs/ext4/page-io.c39
1 files changed, 11 insertions, 28 deletions
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index b6dbd056fcb1..7bb8f76d470a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio, int error)
for (i = 0; i < io_end->num_io_pages; i++) {
struct page *page = io_end->pages[i]->p_page;
struct buffer_head *bh, *head;
- int partial_write = 0;
+ loff_t offset;
+ loff_t io_end_offset;
- head = page_buffers(page);
- if (error)
+ if (error) {
SetPageError(page);
- BUG_ON(!head);
- if (head->b_size != PAGE_CACHE_SIZE) {
- loff_t offset;
- loff_t io_end_offset = io_end->offset + io_end->size;
+ set_bit(AS_EIO, &page->mapping->flags);
+ head = page_buffers(page);
+ BUG_ON(!head);
+
+ io_end_offset = io_end->offset + io_end->size;
offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
bh = head;
do {
if ((offset >= io_end->offset) &&
- (offset+bh->b_size <= io_end_offset)) {
- if (error)
- buffer_io_error(bh);
-
- }
- if (buffer_delay(bh))
- partial_write = 1;
- else if (!buffer_mapped(bh))
- clear_buffer_dirty(bh);
- else if (buffer_dirty(bh))
- partial_write = 1;
+ (offset+bh->b_size <= io_end_offset))
+ buffer_io_error(bh);
+
offset += bh->b_size;
bh = bh->b_this_page;
} while (bh != head);
}
- /*
- * If this is a partial write which happened to make
- * all buffers uptodate then we can optimize away a
- * bogus readpage() for the next read(). Here we
- * 'discover' whether the page went uptodate as a
- * result of this (potentially partial) write.
- */
- if (!partial_write)
- SetPageUptodate(page);
-
put_io_page(io_end->pages[i]);
}
io_end->num_io_pages = 0;