summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 18:29:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 18:29:13 -0800
commitd7252d0d36375fe8c544098469a21d03fa267a55 (patch)
tree0912357cdfa9ab2031d342d27b7e828b158c8bd7
parent0fe4e2d5cd931ad2ff99d61cfdd5c6dc0c3ec60b (diff)
parent59f75fd0e31532bdcf65f754516cac2954d5ddc7 (diff)
Merge tag 'for-linus-20190104' of git://git.kernel.dk/linux-block
Pull block updates and fixes from Jens Axboe: - Pulled in MD changes that Shaohua had queued up for 4.21. Unfortunately we lost Shaohua late 2018, I'm sending these in on his behalf. - In conjunction with the above, I added a CREDITS entry for Shaoua. - sunvdc queue restart fix (Ming) * tag 'for-linus-20190104' of git://git.kernel.dk/linux-block: Add CREDITS entry for Shaohua Li block: sunvdc: don't run hw queue synchronously from irq context md: fix raid10 hang issue caused by barrier raid10: refactor common wait code from regular read/write request md: remvoe redundant condition check lib/raid6: add option to skip algo benchmarking lib/raid6: sort algos in rough performance order lib/raid6: check for assembler SSSE3 support lib/raid6: avoid __attribute_const__ redefinition lib/raid6: add missing include for raid6test md: remove set but not used variable 'bi_rdev'
-rw-r--r--CREDITS6
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid10.c76
-rw-r--r--include/linux/raid/pq.h8
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/raid6/algos.c81
-rw-r--r--lib/raid6/test/Makefile3
8 files changed, 101 insertions, 97 deletions
diff --git a/CREDITS b/CREDITS
index 7d397ee67524..e818eb6a3e71 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2208,6 +2208,12 @@ N: Christopher Li
E: sparse@chrisli.org
D: Sparse maintainer 2009 - 2018
+N: Shaohua Li
+D: Worked on many parts of the kernel, from core x86, ACPI, PCI, KVM, MM,
+D: and much more. He was the maintainer of MD from 2016 to 2018. Shaohua
+D: passed away late 2018, he will be greatly missed.
+W: https://www.spinics.net/lists/raid/msg61993.html
+
N: Stephan Linz
E: linz@mazet.de
E: Stephan.Linz@gmx.de
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0ff27e2d98c4..26937ba28f78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -181,7 +181,7 @@ static void vdc_blk_queue_start(struct vdc_port *port)
* allocated a disk.
*/
if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
- blk_mq_start_hw_queues(port->disk->queue);
+ blk_mq_start_stopped_hw_queues(port->disk->queue, true);
}
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9a0a1e0934d5..fd4af4de03b4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2147,14 +2147,12 @@ EXPORT_SYMBOL(md_integrity_register);
*/
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
- struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
return 0;
- bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
@@ -5693,14 +5691,10 @@ int md_run(struct mddev *mddev)
return 0;
abort:
- if (mddev->flush_bio_pool) {
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- }
- if (mddev->flush_pool){
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
- }
+ mempool_destroy(mddev->flush_bio_pool);
+ mddev->flush_bio_pool = NULL;
+ mempool_destroy(mddev->flush_pool);
+ mddev->flush_pool = NULL;
return err;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b98e746e7fc4..abb5d382f64d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
+/*
+ * 1. Register the new request and wait if the reconstruction thread has put
+ * up a bar for new requests. Continue immediately if no resync is active
+ * currently.
+ * 2. If IO spans the reshape position. Need to wait for reshape to pass.
+ */
+static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
+ struct bio *bio, sector_t sectors)
+{
+ wait_barrier(conf);
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+ raid10_log(conf->mddev, "wait reshape");
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
+ wait_barrier(conf);
+ }
+}
+
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio)
{
@@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
const int op = bio_op(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
int max_sectors;
- sector_t sectors;
struct md_rdev *rdev;
char b[BDEVNAME_SIZE];
int slot = r10_bio->read_slot;
@@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
}
rcu_read_unlock();
}
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
- sectors = r10_bio->sectors;
- while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_iter.bi_sector < conf->reshape_progress &&
- bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /*
- * IO spans the reshape position. Need to wait for reshape to
- * pass
- */
- raid10_log(conf->mddev, "wait reshape");
- allow_barrier(conf);
- wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_iter.bi_sector ||
- conf->reshape_progress >= bio->bi_iter.bi_sector +
- sectors);
- wait_barrier(conf);
- }
+ regular_request_wait(mddev, conf, bio, r10_bio->sectors);
rdev = read_balance(conf, r10_bio, &max_sectors);
if (!rdev) {
if (err_rdev) {
@@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
bio_chain(split, bio);
+ allow_barrier(conf);
generic_make_request(bio);
+ wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
@@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
finish_wait(&conf->wait_barrier, &w);
}
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
-
sectors = r10_bio->sectors;
- while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio->bi_iter.bi_sector < conf->reshape_progress &&
- bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /*
- * IO spans the reshape position. Need to wait for reshape to
- * pass
- */
- raid10_log(conf->mddev, "wait reshape");
- allow_barrier(conf);
- wait_event(conf->wait_barrier,
- conf->reshape_progress <= bio->bi_iter.bi_sector ||
- conf->reshape_progress >= bio->bi_iter.bi_sector +
- sectors);
- wait_barrier(conf);
- }
-
+ regular_request_wait(mddev, conf, bio, sectors);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
@@ -1514,7 +1494,9 @@ retry_write:
struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
+ allow_barrier(conf);
generic_make_request(bio);
+ wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
}
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index ea8505204fdf..605cf46c17bd 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#include <limits.h>
#include <stddef.h>
#include <sys/mman.h>
+#include <sys/time.h>
#include <sys/types.h>
/* Not standard, but glibc defines it */
@@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
#define __exit
-#define __attribute_const__ __attribute__((const))
+#ifndef __attribute_const__
+# define __attribute_const__ __attribute__((const))
+#endif
#define noinline __attribute__((noinline))
#define preempt_enable()
@@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define MODULE_DESCRIPTION(desc)
#define subsys_initcall(x)
#define module_exit(x)
+
+#define IS_ENABLED(x) (x)
+#define CONFIG_RAID6_PQ_BENCHMARK 1
#endif /* __KERNEL__ */
/* Routine choices */
diff --git a/lib/Kconfig b/lib/Kconfig
index 79bc2eef9c14..a9e56539bd11 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -10,6 +10,14 @@ menu "Library routines"
config RAID6_PQ
tristate
+config RAID6_PQ_BENCHMARK
+ bool "Automatically choose fastest RAID6 PQ functions"
+ depends on RAID6_PQ
+ default y
+ help
+ Benchmark all available RAID6 PQ functions on init and choose the
+ fastest one.
+
config BITREVERSE
tristate
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 5065b1e7e327..7e4f7a8ffa8e 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -34,64 +34,64 @@ struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
-#if defined(__ia64__)
- &raid6_intx16,
- &raid6_intx32,
-#endif
#if defined(__i386__) && !defined(__arch_um__)
- &raid6_mmxx1,
- &raid6_mmxx2,
- &raid6_sse1x1,
- &raid6_sse1x2,
- &raid6_sse2x1,
- &raid6_sse2x2,
-#ifdef CONFIG_AS_AVX2
- &raid6_avx2x1,
- &raid6_avx2x2,
-#endif
#ifdef CONFIG_AS_AVX512
- &raid6_avx512x1,
&raid6_avx512x2,
+ &raid6_avx512x1,
#endif
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
- &raid6_sse2x1,
- &raid6_sse2x2,
- &raid6_sse2x4,
#ifdef CONFIG_AS_AVX2
- &raid6_avx2x1,
&raid6_avx2x2,
- &raid6_avx2x4,
+ &raid6_avx2x1,
+#endif
+ &raid6_sse2x2,
+ &raid6_sse2x1,
+ &raid6_sse1x2,
+ &raid6_sse1x1,
+ &raid6_mmxx2,
+ &raid6_mmxx1,
#endif
+#if defined(__x86_64__) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX512
- &raid6_avx512x1,
- &raid6_avx512x2,
&raid6_avx512x4,
+ &raid6_avx512x2,
+ &raid6_avx512x1,
#endif
+#ifdef CONFIG_AS_AVX2
+ &raid6_avx2x4,
+ &raid6_avx2x2,
+ &raid6_avx2x1,
+#endif
+ &raid6_sse2x4,
+ &raid6_sse2x2,
+ &raid6_sse2x1,
#endif
#ifdef CONFIG_ALTIVEC
- &raid6_altivec1,
- &raid6_altivec2,
- &raid6_altivec4,
- &raid6_altivec8,
- &raid6_vpermxor1,
- &raid6_vpermxor2,
- &raid6_vpermxor4,
&raid6_vpermxor8,
+ &raid6_vpermxor4,
+ &raid6_vpermxor2,
+ &raid6_vpermxor1,
+ &raid6_altivec8,
+ &raid6_altivec4,
+ &raid6_altivec2,
+ &raid6_altivec1,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
#endif
- &raid6_intx1,
- &raid6_intx2,
- &raid6_intx4,
- &raid6_intx8,
#ifdef CONFIG_KERNEL_MODE_NEON
- &raid6_neonx1,
- &raid6_neonx2,
- &raid6_neonx4,
&raid6_neonx8,
+ &raid6_neonx4,
+ &raid6_neonx2,
+ &raid6_neonx1,
#endif
+#if defined(__ia64__)
+ &raid6_intx32,
+ &raid6_intx16,
+#endif
+ &raid6_intx8,
+ &raid6_intx4,
+ &raid6_intx2,
+ &raid6_intx1,
NULL
};
@@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen(
if ((*algo)->valid && !(*algo)->valid())
continue;
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ best = *algo;
+ break;
+ }
+
perf = 0;
preempt_disable();
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 79777645cac9..3ab8720aa2f8 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -34,6 +34,9 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
+ CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
+ gcc -c -x assembler - >&/dev/null && \
+ rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)