summaryrefslogtreecommitdiff
path: root/drivers/md/raid5.h
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2017-03-03 22:06:12 -0800
committerShaohua Li <shli@fb.com>2017-03-16 16:55:52 -0700
commitaaf9f12ebfafd1ea603d61ead6dbcf456a86e0f3 (patch)
tree6034be2d67702bae9057856bdc8f236636715d76 /drivers/md/raid5.h
parent84890c03b6c5d7e8d76ea5e20b6aaf7e7ad410f0 (diff)
md/raid5: sort bios
Previous patch (raid5: only dispatch IO from raid5d for harddisk raid) defers IO dispatching. The goal is to create better IO pattern. At that time, we don't sort the deffered IO and hope the block layer can do IO merge and sort. Now the raid5-cache writeback could create large amount of bios. And if we enable muti-thread for stripe handling, we can't control when to dispatch IO to raid disks. In a lot of time, we are dispatching IO which block layer can't do merge effectively. This patch moves further for the IO dispatching defer. We accumulate bios, but we don't dispatch all the bios after a threshold is met. This 'dispatch partial portion of bios' stragety allows bios coming in a large time window are sent to disks together. At the dispatching time, there is large chance the block layer can merge the bios. To make this more effective, we dispatch IO in ascending order. This increases request merge chance and reduces disk seek. Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5.h')
-rw-r--r--drivers/md/raid5.h14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 6b9d2e839e6d..985cdc4850c2 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -572,6 +572,14 @@ enum r5_cache_state {
*/
};
+#define PENDING_IO_MAX 512
+#define PENDING_IO_ONE_FLUSH 128
+struct r5pending_data {
+ struct list_head sibling;
+ sector_t sector; /* stripe sector */
+ struct bio_list bios;
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */
@@ -689,9 +697,13 @@ struct r5conf {
int worker_cnt_per_group;
struct r5l_log *log;
- struct bio_list pending_bios;
spinlock_t pending_bios_lock;
bool batch_bio_dispatch;
+ struct r5pending_data *pending_data;
+ struct list_head free_list;
+ struct list_head pending_list;
+ int pending_data_cnt;
+ struct r5pending_data *next_pending_data;
};