summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2010-03-22 19:36:35 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-05-21 18:31:14 -0400
commit551de6f34dfeefbeeadb32909c387d393114ecc8 (patch)
tree822af803e7e75ed476b0a176639c162e0395910d /fs
parent1712ac8fda7d8bc4dc921f5777b7423aacad7263 (diff)
Leave superblocks on s_list until the end
We used to remove from s_list and s_instances at the same time. So let's *not* do the former and skip superblocks that have empty s_instances in the loops over s_list. The next step, of course, will be to get rid of rescan logics in those loops. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/quota/quota.c2
-rw-r--r--fs/super.c16
-rw-r--r--fs/sync.c5
6 files changed, 25 insertions, 4 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index c9c266db0624..021ec4da9932 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -568,6 +568,8 @@ static void do_thaw_all(struct work_struct *work)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
diff --git a/fs/dcache.c b/fs/dcache.c
index 2b6f09af13ab..5afc4994bb27 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -552,6 +552,8 @@ restart:
prune_ratio = unused / count;
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (sb->s_nr_dentry_unused == 0)
continue;
sb->s_count++;
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 31f4b0e6d72c..9cd4e4a70f56 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -40,6 +40,8 @@ static void drop_pagecache(void)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index cfc78826da90..4669e7e639bd 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -59,6 +59,8 @@ static int quota_sync_all(int type)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (!sb->s_qcop || !sb->s_qcop->quota_sync)
continue;
diff --git a/fs/super.c b/fs/super.c
index 157657b32798..0390461dfca0 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -135,6 +135,7 @@ static int __put_super(struct super_block *sb)
int ret = 0;
if (!--sb->s_count) {
+ list_del_init(&sb->s_list);
destroy_super(sb);
ret = 1;
}
@@ -151,7 +152,7 @@ static int __put_super(struct super_block *sb)
int __put_super_and_need_restart(struct super_block *sb)
{
/* check for race with generic_shutdown_super() */
- if (list_empty(&sb->s_list)) {
+ if (list_empty(&sb->s_instances)) {
/* super block is removed, need to restart... */
__put_super(sb);
return 1;
@@ -308,8 +309,7 @@ void generic_shutdown_super(struct super_block *sb)
}
spin_lock(&sb_lock);
/* should be initialized for __put_super_and_need_restart() */
- list_del_init(&sb->s_list);
- list_del(&sb->s_instances);
+ list_del_init(&sb->s_instances);
spin_unlock(&sb_lock);
up_write(&sb->s_umount);
}
@@ -400,6 +400,8 @@ void sync_supers(void)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (sb->s_op->write_super && sb->s_dirt) {
sb->s_count++;
spin_unlock(&sb_lock);
@@ -435,6 +437,8 @@ struct super_block * get_super(struct block_device *bdev)
spin_lock(&sb_lock);
rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (sb->s_bdev == bdev) {
sb->s_count++;
spin_unlock(&sb_lock);
@@ -471,6 +475,8 @@ struct super_block *get_active_super(struct block_device *bdev)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (sb->s_bdev != bdev)
continue;
@@ -490,6 +496,8 @@ struct super_block * user_get_super(dev_t dev)
spin_lock(&sb_lock);
rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (sb->s_dev == dev) {
sb->s_count++;
spin_unlock(&sb_lock);
@@ -600,6 +608,8 @@ static void do_emergency_remount(struct work_struct *work)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_write(&sb->s_umount);
diff --git a/fs/sync.c b/fs/sync.c
index 92b228176f7c..ad6691bae370 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -99,10 +99,13 @@ static void sync_filesystems(int wait)
mutex_lock(&mutex); /* Could be down_interruptible */
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list)
- sb->s_need_sync = 1;
+ if (!list_empty(&sb->s_instances))
+ sb->s_need_sync = 1;
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
if (!sb->s_need_sync)
continue;
sb->s_need_sync = 0;