summaryrefslogtreecommitdiff
path: root/include/linux/buffer_head.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/buffer_head.h')
-rw-r--r--include/linux/buffer_head.h13
1 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 9b6556d3f110..33fa5e94aa80 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -138,6 +138,17 @@ BUFFER_FNS(Defer_Completion, defer_completion)
static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
{
/*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
* make it consistent with folio_mark_uptodate
* pairs with smp_load_acquire in buffer_uptodate
*/
@@ -226,7 +237,7 @@ void __lock_buffer(struct buffer_head *bh);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
-int submit_bh(blk_opf_t, struct buffer_head *);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);