summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-25 21:05:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-25 21:05:15 -0700
commit8fba70b0850a0163f1018a122200ec11b854135c (patch)
tree390ae1ce5cdb36c96fb850e35eb65648d5e4a51d
parentc6dc3e711a28b6b4c5e63f2bca34a5cfd35e9a22 (diff)
parent4412efecf7fda3b8f9f18feed7938f2281f5ccbc (diff)
Merge tag 'for-linus-20180425' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "I ended up sitting on this about a week longer than I wanted to, since we were hashing out details with a timeout change. I've now killed that patch, so we can flush the existing queue in due time. This contains: - Fix for an old regression, where entering the queue can be disturbed by a signal to the process. This can cause spurious EIO. Fix from Alan Jenkins. - cdrom information leak fix from Dan. - Trivial helper for testing queue FUA from Dave Chinner, part of his O_DIRECT FUA series. - Series of swim fixes from Finn that actually makes it work again. - Loop O_DIRECT corruption fix, which caused data corruption in production for us. From me. - BFQ crash fix from me. - bcache maintainer update. Michael no longer has the time to do it, Coly has stepped up to serve as the new maintainer. - blkcg locking fixes from Jiang Biao. - Revert of a change from this merge window from Ming, that causes an issue on some hardware. - Minor clarification doc addition from Linus Walleij" * tag 'for-linus-20180425' of git://git.kernel.dk/linux-block: (22 commits) Revert "blk-mq: remove code for dealing with remapping queue" block: mq: Add some minor doc for core structs bcache: mark Coly Li as bcache maintainer MAINTAINERS: Remove me as maintainer of bcache blkcg: init root blkcg_gq under lock blkcg: small fix on comment in blkcg_init_queue blkcg: don't hold blkcg lock when deactivating policy block: add blk_queue_fua() helper function cdrom: information leak in cdrom_ioctl_media_changed() bfq-iosched: ensure to clear bic/bfqq pointers when preparing request blk-mq: start request gstate with gen 1 block/swim: Select appropriate drive on device open block/swim: Fix IO error at end of medium block/swim: Check drive type block/swim: Rename macros to avoid inconsistent inverted logic block/swim: Don't log an error message for an invalid ioctl block/swim: Remove extra put_disk() call from error path block/swim: Fix array bounds check m68k/mac: Don't remap SWIM MMIO region loop: handle short DIO reads ...
-rw-r--r--MAINTAINERS2
-rw-r--r--block/bfq-iosched.c10
-rw-r--r--block/blk-cgroup.c28
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/blk-mq.h3
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h1
13 files changed, 144 insertions, 81 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 92be777d060a..dd66ae9a847e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2617,7 +2617,7 @@ S: Maintained
F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE)
-M: Michael Lyle <mlyle@lyle.org>
+M: Coly Li <colyli@suse.de>
M: Kent Overstreet <kent.overstreet@gmail.com>
L: linux-bcache@vger.kernel.org
W: http://bcache.evilpiepirate.org
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f0ecd98509d8..771ae9730ac6 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
bool new_queue = false;
bool bfqq_already_existing = false, split = false;
- if (!rq->elv.icq)
+ /*
+ * Even if we don't have an icq attached, we should still clear
+ * the scheduler pointers, as they might point to previously
+ * allocated bic/bfqq structs.
+ */
+ if (!rq->elv.icq) {
+ rq->elv.priv[0] = rq->elv.priv[1] = NULL;
return;
+ }
+
bic = icq_to_bic(rq->elv.icq);
spin_lock_irq(&bfqd->lock);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1c16694ae145..eb85cb87c40f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
preloaded = !radix_tree_preload(GFP_KERNEL);
- /*
- * Make sure the root blkg exists and count the existing blkgs. As
- * @q is bypassing at this point, blkg_lookup_create() can't be
- * used. Open code insertion.
- */
+ /* Make sure the root blkg exists. */
rcu_read_lock();
spin_lock_irq(q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg);
+ if (IS_ERR(blkg))
+ goto err_unlock;
+ q->root_blkg = blkg;
+ q->root_rl.blkg = blkg;
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
- if (IS_ERR(blkg))
- return PTR_ERR(blkg);
-
- q->root_blkg = blkg;
- q->root_rl.blkg = blkg;
-
ret = blk_throtl_init(q);
if (ret) {
spin_lock_irq(q->queue_lock);
@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
}
return ret;
+
+err_unlock:
+ spin_unlock_irq(q->queue_lock);
+ rcu_read_unlock();
+ if (preloaded)
+ radix_tree_preload_end();
+ return PTR_ERR(blkg);
}
/**
@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
- /* grab blkcg lock too while removing @pd from @blkg */
- spin_lock(&blkg->blkcg->lock);
-
if (blkg->pd[pol->plid]) {
if (!blkg->pd[pol->plid]->offline &&
pol->pd_offline_fn) {
@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
-
- spin_unlock(&blkg->blkcg->lock);
}
spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-core.c b/block/blk-core.c
index 806ce2442819..85909b431eb0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->part = NULL;
seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync);
+ /*
+ * See comment of blk_mq_init_request
+ */
+ WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
while (true) {
bool success = false;
- int ret;
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
*/
smp_rmb();
- ret = wait_event_interruptible(q->mq_freeze_wq,
- (atomic_read(&q->mq_freeze_depth) == 0 &&
- (preempt || !blk_queue_preempt_only(q))) ||
- blk_queue_dying(q));
+ wait_event(q->mq_freeze_wq,
+ (atomic_read(&q->mq_freeze_depth) == 0 &&
+ (preempt || !blk_queue_preempt_only(q))) ||
+ blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
- if (ret)
- return ret;
}
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0dc9e341c2a7..c3621453ad87 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync);
+ /*
+ * start gstate with gen 1 instead of 0, otherwise it will be equal
+ * to aborted_gstate, and be identified timed out by
+ * blk_mq_terminate_expired.
+ */
+ WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
+
return 0;
}
@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q)
{
- unsigned int i;
+ unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Map software to hardware queues.
+ *
+ * If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
+ hctx_idx = q->mq_map[i];
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[hctx_idx] &&
+ !__blk_mq_alloc_rq_map(set, hctx_idx)) {
+ /*
+ * If tags initialization fail for some hctx,
+ * that hctx won't be brought online. In this
+ * case, remap the current ctx to hctx[0] which
+ * is guaranteed to always have tags allocated
+ */
+ q->mq_map[i] = 0;
+ }
+
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) {
- /* every hctx should get mapped by at least one CPU */
- WARN_ON(!hctx->nr_ctx);
+ /*
+ * If no software queues are mapped to this hardware queue,
+ * disable it and free the request entries.
+ */
+ if (!hctx->nr_ctx) {
+ /* Never unmap queue 0. We need it as a
+ * fallback in case of a new remap fails
+ * allocation
+ */
+ if (i && set->tags[i])
+ blk_mq_free_map_and_requests(set, i);
+
+ hctx->tags = NULL;
+ continue;
+ }
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 88c558f71819..89b5cd3a6c70 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,6 +7,9 @@
struct blk_mq_tag_set;
+/**
+ * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
+ */
struct blk_mq_ctx {
struct {
spinlock_t lock;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9d04497a415..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static void lo_complete_rq(struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ blk_status_t ret = BLK_STS_OK;
- if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
- cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
- struct bio *bio = cmd->rq->bio;
-
- bio_advance(bio, cmd->ret);
- zero_fill_bio(bio);
+ if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ req_op(rq) != REQ_OP_READ) {
+ if (cmd->ret < 0)
+ ret = BLK_STS_IOERR;
+ goto end_io;
}
- blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
+ /*
+ * Short READ - if we got some data, advance our request and
+ * retry it. If we got no data, end the rest with EIO.
+ */
+ if (cmd->ret) {
+ blk_update_request(rq, BLK_STS_OK, cmd->ret);
+ cmd->ret = 0;
+ blk_mq_requeue_request(rq, true);
+ } else {
+ if (cmd->use_aio) {
+ struct bio *bio = rq->bio;
+
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
+ }
+ }
+ ret = BLK_STS_IOERR;
+end_io:
+ blk_mq_end_request(rq, ret);
+ }
}
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(rq);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{
struct iov_iter iter;
struct bio_vec *bvec;
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- struct loop_device *lo = cmd->rq->q->queuedata;
+ struct request *rq = bd->rq;
+ struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_device *lo = rq->q->queuedata;
- blk_mq_start_request(bd->rq);
+ blk_mq_start_request(rq);
if (lo->lo_state != Lo_bound)
return BLK_STS_IOERR;
- switch (req_op(cmd->rq)) {
+ switch (req_op(rq)) {
case REQ_OP_FLUSH:
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) {
- cmd->css = cmd->rq->bio->bi_css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
+ cmd->css = rq->bio->bi_css;
css_get(cmd->css);
} else
#endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
- const bool write = op_is_write(req_op(cmd->rq));
- struct loop_device *lo = cmd->rq->q->queuedata;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ const bool write = op_is_write(req_op(rq));
+ struct loop_device *lo = rq->q->queuedata;
int ret = 0;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
goto failed;
}
- ret = do_req_filebacked(lo, cmd->rq);
+ ret = do_req_filebacked(lo, rq);
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
cmd->ret = ret ? -EIO : 0;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(rq);
}
}
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->rq = rq;
kthread_init_work(&cmd->work, loop_queue_work);
-
return 0;
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
- struct request *rq;
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
long ret;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
/* Select values for swim_select and swim_readbit */
#define READ_DATA_0 0x074
-#define TWOMEG_DRIVE 0x075
+#define ONEMEG_DRIVE 0x075
#define SINGLE_SIDED 0x076
#define DRIVE_PRESENT 0x077
#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
#define TRACK_ZERO 0x172
#define TACHO 0x173
#define READ_DATA_1 0x174
-#define MFM_MODE 0x175
+#define GCR_MODE 0x175
#define SEEK_COMPLETE 0x176
-#define ONEMEG_MEDIA 0x177
+#define TWOMEG_MEDIA 0x177
/* Bits in handshake register */
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
struct floppy_struct *g;
fs->disk_in = 1;
fs->write_protected = swim_readbit(base, WRITE_PROT);
- fs->type = swim_readbit(base, ONEMEG_MEDIA);
if (swim_track00(base))
printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
swim_track00(base);
+ fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
+ HD_MEDIA : DD_MEDIA;
+ fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
get_floppy_geometry(fs, 0, &g);
fs->total_secs = g->size;
fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
udelay(10);
- swim_drive(base, INTERNAL_DRIVE);
+ swim_drive(base, fs->location);
swim_motor(base, ON);
swim_action(base, SETMFM);
if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
goto out;
}
+ set_capacity(fs->disk, fs->total_secs);
+
if (mode & FMODE_NDELAY)
return 0;
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_to_user((void __user *) param, (void *) &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
- break;
-
- default:
- printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
- cmd);
- return -ENOSYS;
+ return 0;
}
- return 0;
+ return -ENOTTY;
}
static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
struct swim_priv *swd = data;
int drive = (*part & 3);
- if (drive > swd->floppy_count)
+ if (drive >= swd->floppy_count)
return NULL;
*part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
swim_motor(base, OFF);
- if (swim_readbit(base, SINGLE_SIDED))
- fs->head_number = 1;
- else
- fs->head_number = 2;
+ fs->type = HD_MEDIA;
+ fs->head_number = 2;
+
fs->ref_count = 0;
fs->ejected = 1;
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
/* scan floppy drives */
swim_drive(base, INTERNAL_DRIVE);
- if (swim_readbit(base, DRIVE_PRESENT))
+ if (swim_readbit(base, DRIVE_PRESENT) &&
+ !swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, INTERNAL_DRIVE);
swim_drive(base, EXTERNAL_DRIVE);
- if (swim_readbit(base, DRIVE_PRESENT))
+ if (swim_readbit(base, DRIVE_PRESENT) &&
+ !swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, EXTERNAL_DRIVE);
/* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
&swd->lock);
if (!swd->unit[drive].disk->queue) {
err = -ENOMEM;
- put_disk(swd->unit[drive].disk);
goto exit_put_disks;
}
blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
goto out;
}
- swim_base = ioremap(res->start, resource_size(res));
+ swim_base = (struct swim __iomem *)res->start;
if (!swim_base) {
ret = -ENOMEM;
goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
if (!get_swim_mode(swim_base)) {
printk(KERN_INFO "SWIM device not found !\n");
ret = -ENODEV;
- goto out_iounmap;
+ goto out_release_io;
}
/* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
if (!swd) {
ret = -ENOMEM;
- goto out_iounmap;
+ goto out_release_io;
}
platform_set_drvdata(dev, swd);
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
out_kfree:
kfree(swd);
-out_iounmap:
- iounmap(swim_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++)
floppy_eject(&swd->unit[drive]);
- iounmap(swd->base);
-
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
#define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4
-#define TWOMEG_DRIVE 5
+#define ONEMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7
#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
#define TRACK_ZERO 10
#define TACHO 11
#define READ_DATA_1 12
-#define MFM_MODE 13
+#define GCR_MODE 13
#define SEEK_COMPLETE 14
-#define ONEMEG_MEDIA 15
+#define TWOMEG_MEDIA 15
/* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3986f4b3461..ebc34a5686dc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,9 @@
struct blk_mq_tags;
struct blk_flush_queue;
+/**
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
+ */
struct blk_mq_hw_ctx {
struct {
spinlock_t lock;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9af3e0f430bc..c362aadfe036 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -737,6 +737,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_preempt_only(q) \
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q);