summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-17 05:54:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-17 05:54:18 -0800
commit5ffdff81cff9ae697b1d57fea9a86b6c6ede6078 (patch)
tree97f0dca0492ab5e8bf5fa37c2b88078cab4cd453
parent575966e080270b7574175da35f7f7dd5ecd89ff4 (diff)
parentad6bf88a6c19a39fb3b0045d78ea880325dfcf15 (diff)
Merge tag 'block-5.5-2020-01-16' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Three fixes that should go into this release: - The 32-bit segment size fix that I mentioned last week (Ming) - Use uint for the block size (Mikulas) - A null_blk zone write handling fix (Damien)" * tag 'block-5.5-2020-01-16' of git://git.kernel.dk/linux-block: block: fix an integer overflow in logical block size null_blk: Fix zone write handling block: fix get_max_segment_size() overflow on 32bit arch
-rw-r--r--block/blk-merge.c9
-rw-r--r--block/blk-settings.c2
-rw-r--r--drivers/block/null_blk_zoned.c4
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--include/linux/blkdev.h8
6 files changed, 17 insertions, 10 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 347782a24a35..1534ed736363 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -164,8 +164,13 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
unsigned long mask = queue_segment_boundary(q);
offset = mask & (page_to_phys(start_page) + offset);
- return min_t(unsigned long, mask - offset + 1,
- queue_max_segment_size(q));
+
+ /*
+ * overflow may be triggered in case of zero page physical address
+ * on 32bit arch, use queue's max segment size when that happens.
+ */
+ return min_not_zero(mask - offset + 1,
+ (unsigned long)queue_max_segment_size(q));
}
/**
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5f6dcc7a47bd..c8eda2e7b91e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
* storage device can address. The default of 512 covers most
* hardware.
**/
-void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.logical_block_size = size;
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 5cf49d9db95e..ed34785dd64b 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
/* Writes must be at the write pointer position */
if (sector != zone->wp)
return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_EMPTY)
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += nr_sectors;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3c50c4e4da8f..963d3774c93e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -17,7 +17,7 @@
#include <linux/dm-bufio.h>
#define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
#define DM_PREFETCH_CHUNKS 12
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b7c20979bd19..322386ff5d22 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned short blksize = 512;
+ unsigned blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 47eb22a3b7f9..4c636c42ad68 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -328,6 +328,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
@@ -338,7 +339,6 @@ struct queue_limits {
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
@@ -1077,7 +1077,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
@@ -1291,7 +1291,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(const struct request_queue *q)
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1301,7 +1301,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue
return retval;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}