summaryrefslogtreecommitdiff
path: root/fs/ext4/mmp.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/mmp.c')
-rw-r--r--fs/ext4/mmp.c223
1 files changed, 116 insertions, 107 deletions
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 214461e42a05..6f57c181ff77 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/fs.h>
#include <linux/random.h>
#include <linux/buffer_head.h>
@@ -13,56 +14,57 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
int offset = offsetof(struct mmp_struct, mmp_checksum);
__u32 csum;
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
+ csum = ext4_chksum(sbi->s_csum_seed, (char *)mmp, offset);
return cpu_to_le32(csum);
}
-int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+ if (!ext4_has_feature_metadata_csum(sb))
return 1;
return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
}
-void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
{
- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+ if (!ext4_has_feature_metadata_csum(sb))
return;
mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
}
/*
- * Write the MMP block using WRITE_SYNC to try to get the block on-disk
+ * Write the MMP block using REQ_SYNC to try to get the block on-disk
* faster.
*/
-static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+static int write_mmp_block_thawed(struct super_block *sb,
+ struct buffer_head *bh)
{
struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
- /*
- * We protect against freezing so that we don't create dirty buffers
- * on frozen filesystem.
- */
- sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
- mark_buffer_dirty(bh);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
- sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh)))
- return 1;
-
+ return -EIO;
return 0;
}
+static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+{
+ /*
+ * We protect against freezing so that we don't create dirty buffers
+ * on frozen filesystem.
+ */
+ scoped_guard(super_write, sb)
+ return write_mmp_block_thawed(sb, bh);
+}
+
/*
* Read the MMP block. It _must_ be read from disk and hence we clear the
* uptodate flag on the buffer.
@@ -71,6 +73,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
ext4_fsblk_t mmp_block)
{
struct mmp_struct *mmp;
+ int ret;
if (*bh)
clear_buffer_uptodate(*bh);
@@ -78,33 +81,35 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
/* This would be sb_bread(sb, mmp_block), except we need to be sure
* that the MD RAID device cache has been bypassed, and that the read
* is not blocked in the elevator. */
- if (!*bh)
+ if (!*bh) {
*bh = sb_getblk(sb, mmp_block);
- if (!*bh)
- return -ENOMEM;
- if (*bh) {
- get_bh(*bh);
- lock_buffer(*bh);
- (*bh)->b_end_io = end_buffer_read_sync;
- submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
- wait_on_buffer(*bh);
- if (!buffer_uptodate(*bh)) {
- brelse(*bh);
- *bh = NULL;
+ if (!*bh) {
+ ret = -ENOMEM;
+ goto warn_exit;
}
}
- if (unlikely(!*bh)) {
- ext4_warning(sb, "Error while reading MMP block %llu",
- mmp_block);
- return -EIO;
- }
- mmp = (struct mmp_struct *)((*bh)->b_data);
- if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
- !ext4_mmp_csum_verify(sb, mmp))
- return -EINVAL;
+ lock_buffer(*bh);
+ ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL, false);
+ if (ret)
+ goto warn_exit;
+ mmp = (struct mmp_struct *)((*bh)->b_data);
+ if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
+ ret = -EFSCORRUPTED;
+ goto warn_exit;
+ }
+ if (!ext4_mmp_csum_verify(sb, mmp)) {
+ ret = -EFSBADCRC;
+ goto warn_exit;
+ }
return 0;
+warn_exit:
+ brelse(*bh);
+ *bh = NULL;
+ ext4_warning(sb, "Error %d while reading MMP block %llu",
+ ret, mmp_block);
+ return ret;
}
/*
@@ -113,12 +118,12 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
const char *function, unsigned int line, const char *msg)
{
- __ext4_warning(sb, function, line, msg);
+ __ext4_warning(sb, function, line, "%s", msg);
__ext4_warning(sb, function, line,
- "MMP failure info: last update time: %llu, last update "
- "node: %s, last update device: %s\n",
- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
- mmp->mmp_nodename, mmp->mmp_bdevname);
+ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
+ (unsigned long long)le64_to_cpu(mmp->mmp_time),
+ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
+ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
}
/*
@@ -126,9 +131,9 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
*/
static int kmmpd(void *data)
{
- struct super_block *sb = ((struct mmpd_data *) data)->sb;
- struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
+ struct super_block *sb = data;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct buffer_head *bh = EXT4_SB(sb)->s_mmp_bh;
struct mmp_struct *mmp;
ext4_fsblk_t mmp_block;
u32 seq = 0;
@@ -137,11 +142,11 @@ static int kmmpd(void *data)
unsigned mmp_check_interval;
unsigned long last_update_time;
unsigned long diff;
- int retval;
+ int retval = 0;
mmp_block = le64_to_cpu(es->s_mmp_block);
mmp = (struct mmp_struct *)(bh->b_data);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
/*
* Start with the higher mmp_check_interval and reduce it if
* the MMP block is being updated on time.
@@ -149,17 +154,21 @@ static int kmmpd(void *data)
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
EXT4_MMP_MIN_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
- bdevname(bh->b_bdev, mmp->mmp_bdevname);
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
sizeof(mmp->mmp_nodename));
- while (!kthread_should_stop()) {
+ while (!kthread_should_stop() && !ext4_emergency_state(sb)) {
+ if (!ext4_has_feature_mmp(sb)) {
+ ext4_warning(sb, "kmmpd being stopped since MMP feature"
+ " has been disabled.");
+ goto wait_to_exit;
+ }
if (++seq > EXT4_MMP_SEQ_MAX)
seq = 1;
mmp->mmp_seq = cpu_to_le32(seq);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
last_update_time = jiffies;
retval = write_mmp_block(sb, bh);
@@ -168,26 +177,13 @@ static int kmmpd(void *data)
* (s_mmp_update_interval * 60) seconds.
*/
if (retval) {
- if ((failed_writes % 60) == 0)
- ext4_error(sb, "Error writing to MMP block");
+ if ((failed_writes % 60) == 0) {
+ ext4_error_err(sb, -retval,
+ "Error writing to MMP block");
+ }
failed_writes++;
}
- if (!(le32_to_cpu(es->s_feature_incompat) &
- EXT4_FEATURE_INCOMPAT_MMP)) {
- ext4_warning(sb, "kmmpd being stopped since MMP feature"
- " has been disabled.");
- EXT4_SB(sb)->s_mmp_tsk = NULL;
- goto failed;
- }
-
- if (sb->s_flags & MS_RDONLY) {
- ext4_warning(sb, "kmmpd being stopped since filesystem "
- "has been remounted as readonly.");
- EXT4_SB(sb)->s_mmp_tsk = NULL;
- goto failed;
- }
-
diff = jiffies - last_update_time;
if (diff < mmp_update_interval * HZ)
schedule_timeout_interruptible(mmp_update_interval *
@@ -205,11 +201,10 @@ static int kmmpd(void *data)
retval = read_mmp_block(sb, &bh_check, mmp_block);
if (retval) {
- ext4_error(sb, "error reading MMP data: %d",
- retval);
-
- EXT4_SB(sb)->s_mmp_tsk = NULL;
- goto failed;
+ ext4_error_err(sb, -retval,
+ "error reading MMP data: %d",
+ retval);
+ goto wait_to_exit;
}
mmp_check = (struct mmp_struct *)(bh_check->b_data);
@@ -220,8 +215,10 @@ static int kmmpd(void *data)
"Error while updating MMP info. "
"The filesystem seems to have been"
" multiply mounted.");
- ext4_error(sb, "abort");
- goto failed;
+ ext4_error_err(sb, EBUSY, "abort");
+ put_bh(bh_check);
+ retval = -EBUSY;
+ goto wait_to_exit;
}
put_bh(bh_check);
}
@@ -230,9 +227,9 @@ static int kmmpd(void *data)
* Adjust the mmp_check_interval depending on how much time
* it took for the MMP block to be written.
*/
- mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
- EXT4_MMP_MAX_CHECK_INTERVAL),
- EXT4_MMP_MIN_CHECK_INTERVAL);
+ mmp_check_interval = clamp(EXT4_MMP_CHECK_MULT * diff / HZ,
+ EXT4_MMP_MIN_CHECK_INTERVAL,
+ EXT4_MMP_MAX_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
}
@@ -240,29 +237,36 @@ static int kmmpd(void *data)
* Unmount seems to be clean.
*/
mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
retval = write_mmp_block(sb, bh);
-failed:
- kfree(data);
- brelse(bh);
+wait_to_exit:
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
return retval;
}
+void ext4_stop_mmpd(struct ext4_sb_info *sbi)
+{
+ if (sbi->s_mmp_tsk) {
+ kthread_stop(sbi->s_mmp_tsk);
+ brelse(sbi->s_mmp_bh);
+ sbi->s_mmp_tsk = NULL;
+ }
+}
+
/*
* Get a random new sequence number but make sure it is not greater than
* EXT4_MMP_SEQ_MAX.
*/
static unsigned int mmp_new_seq(void)
{
- u32 new_seq;
-
- do {
- get_random_bytes(&new_seq, sizeof(u32));
- } while (new_seq > EXT4_MMP_SEQ_MAX);
-
- return new_seq;
+ return get_random_u32_below(EXT4_MMP_SEQ_MAX + 1);
}
/*
@@ -274,7 +278,6 @@ int ext4_multi_mount_protect(struct super_block *sb,
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct buffer_head *bh = NULL;
struct mmp_struct *mmp = NULL;
- struct mmpd_data *mmpd_data;
u32 seq;
unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
unsigned int wait_time = 0;
@@ -283,6 +286,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
mmp_block >= ext4_blocks_count(es)) {
ext4_warning(sb, "Invalid MMP block in superblock");
+ retval = -EINVAL;
goto failed;
}
@@ -308,6 +312,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (seq == EXT4_MMP_SEQ_FSCK) {
dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
+ retval = -EBUSY;
goto failed;
}
@@ -321,6 +326,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+ retval = -ETIMEDOUT;
goto failed;
}
@@ -331,6 +337,7 @@ int ext4_multi_mount_protect(struct super_block *sb,
if (seq != le32_to_cpu(mmp->mmp_seq)) {
dump_mmp_msg(sb, mmp,
"Device is already active on another node.");
+ retval = -EBUSY;
goto failed;
}
@@ -341,7 +348,11 @@ skip:
seq = mmp_new_seq();
mmp->mmp_seq = cpu_to_le32(seq);
- retval = write_mmp_block(sb, bh);
+ /*
+ * On mount / remount we are protected against fs freezing (by s_umount
+ * semaphore) and grabbing freeze protection upsets lockdep
+ */
+ retval = write_mmp_block_thawed(sb, bh);
if (retval)
goto failed;
@@ -349,7 +360,8 @@ skip:
* wait for MMP interval and check mmp_seq.
*/
if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
- ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+ ext4_warning(sb, "MMP startup interrupted, failing mount");
+ retval = -ETIMEDOUT;
goto failed;
}
@@ -360,28 +372,27 @@ skip:
if (seq != le32_to_cpu(mmp->mmp_seq)) {
dump_mmp_msg(sb, mmp,
"Device is already active on another node.");
+ retval = -EBUSY;
goto failed;
}
- mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
- if (!mmpd_data) {
- ext4_warning(sb, "not enough memory for mmpd_data");
- goto failed;
- }
- mmpd_data->sb = sb;
- mmpd_data->bh = bh;
+ EXT4_SB(sb)->s_mmp_bh = bh;
+
+ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
+ snprintf(mmp->mmp_bdevname, sizeof(mmp->mmp_bdevname),
+ "%pg", bh->b_bdev);
/*
* Start a kernel thread to update the MMP block periodically.
*/
- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
- bdevname(bh->b_bdev,
- mmp->mmp_bdevname));
+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%.*s",
+ (int)sizeof(mmp->mmp_bdevname),
+ mmp->mmp_bdevname);
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
EXT4_SB(sb)->s_mmp_tsk = NULL;
- kfree(mmpd_data);
ext4_warning(sb, "Unable to create kmmpd thread for %s.",
sb->s_id);
+ retval = -ENOMEM;
goto failed;
}
@@ -389,7 +400,5 @@ skip:
failed:
brelse(bh);
- return 1;
+ return retval;
}
-
-