summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/bio.c26
-rw-r--r--block/blk-lib.c6
-rw-r--r--block/blk-map.c90
-rw-r--r--block/blk-mq.c18
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/fops.c4
-rw-r--r--block/ioctl.c84
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/zloop.c5
-rw-r--r--drivers/nvme/host/auth.c2
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fc.c8
-rw-r--r--drivers/nvme/host/ioctl.c2
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/pr.c6
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c18
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fc.c48
-rw-r--r--drivers/nvme/target/fcloop.c9
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/pci-epf.c14
-rw-r--r--drivers/nvme/target/rdma.c12
-rw-r--r--drivers/nvme/target/tcp.c6
-rw-r--r--drivers/scsi/sd.c12
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/uapi/linux/pr.h14
-rw-r--r--io_uring/rw.c1
30 files changed, 278 insertions, 153 deletions
diff --git a/block/bio.c b/block/bio.c
index 7b13bdf72de0..fa5ff36b443f 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -517,20 +517,18 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
return NULL;
- if (opf & REQ_ALLOC_CACHE) {
- if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
- bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
- gfp_mask, bs);
- if (bio)
- return bio;
- /*
- * No cached bio available, bio returned below marked with
- * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
- */
- } else {
- opf &= ~REQ_ALLOC_CACHE;
- }
- }
+ if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
+ opf |= REQ_ALLOC_CACHE;
+ bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
+ gfp_mask, bs);
+ if (bio)
+ return bio;
+ /*
+ * No cached bio available, bio returned below marked with
+ * REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
+ */
+ } else
+ opf &= ~REQ_ALLOC_CACHE;
/*
* submit_bio_noacct() converts recursion to iteration; this means if
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 19e0203cc18a..9e2cc58f881f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -202,13 +202,13 @@ static void __blkdev_issue_zero_pages(struct block_device *bdev,
unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
struct bio *bio;
- bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
- bio->bi_iter.bi_sector = sector;
-
if ((flags & BLKDEV_ZERO_KILLABLE) &&
fatal_signal_pending(current))
break;
+ bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
+ bio->bi_iter.bi_sector = sector;
+
do {
unsigned int len;
diff --git a/block/blk-map.c b/block/blk-map.c
index 17a1dc288678..4533094d9458 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -37,6 +37,25 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
return bmd;
}
+static inline void blk_mq_map_bio_put(struct bio *bio)
+{
+ bio_put(bio);
+}
+
+static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+ unsigned int nr_vecs, gfp_t gfp_mask)
+{
+ struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
+ &fs_bio_set);
+ if (!bio)
+ return NULL;
+
+ return bio;
+}
+
/**
* bio_copy_from_iter - copy all pages from iov_iter to bio
* @bio: The &struct bio which describes the I/O as destination
@@ -154,10 +173,9 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
ret = -ENOMEM;
- bio = bio_kmalloc(nr_pages, gfp_mask);
+ bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
if (!bio)
goto out_bmd;
- bio_init_inline(bio, NULL, nr_pages, req_op(rq));
if (map_data) {
nr_pages = 1U << map_data->page_order;
@@ -233,43 +251,12 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
cleanup:
if (!map_data)
bio_free_pages(bio);
- bio_uninit(bio);
- kfree(bio);
+ blk_mq_map_bio_put(bio);
out_bmd:
kfree(bmd);
return ret;
}
-static void blk_mq_map_bio_put(struct bio *bio)
-{
- if (bio->bi_opf & REQ_ALLOC_CACHE) {
- bio_put(bio);
- } else {
- bio_uninit(bio);
- kfree(bio);
- }
-}
-
-static struct bio *blk_rq_map_bio_alloc(struct request *rq,
- unsigned int nr_vecs, gfp_t gfp_mask)
-{
- struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
- struct bio *bio;
-
- if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
- bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
- &fs_bio_set);
- if (!bio)
- return NULL;
- } else {
- bio = bio_kmalloc(nr_vecs, gfp_mask);
- if (!bio)
- return NULL;
- bio_init_inline(bio, bdev, nr_vecs, req_op(rq));
- }
- return bio;
-}
-
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask)
{
@@ -318,25 +305,23 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
static void bio_map_kern_endio(struct bio *bio)
{
bio_invalidate_vmalloc_pages(bio);
- bio_uninit(bio);
- kfree(bio);
+ blk_mq_map_bio_put(bio);
}
-static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
+static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len,
gfp_t gfp_mask)
{
unsigned int nr_vecs = bio_add_max_vecs(data, len);
struct bio *bio;
- bio = bio_kmalloc(nr_vecs, gfp_mask);
+ bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
- bio_init_inline(bio, NULL, nr_vecs, op);
+
if (is_vmalloc_addr(data)) {
bio->bi_private = data;
if (!bio_add_vmalloc(bio, data, len)) {
- bio_uninit(bio);
- kfree(bio);
+ blk_mq_map_bio_put(bio);
return ERR_PTR(-EINVAL);
}
} else {
@@ -349,8 +334,7 @@ static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
static void bio_copy_kern_endio(struct bio *bio)
{
bio_free_pages(bio);
- bio_uninit(bio);
- kfree(bio);
+ blk_mq_map_bio_put(bio);
}
static void bio_copy_kern_endio_read(struct bio *bio)
@@ -369,6 +353,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
/**
* bio_copy_kern - copy kernel address into bio
+ * @rq: request to fill
* @data: pointer to buffer to copy
* @len: length in bytes
* @op: bio/request operation
@@ -377,9 +362,10 @@ static void bio_copy_kern_endio_read(struct bio *bio)
* copy the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
+static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len,
gfp_t gfp_mask)
{
+ enum req_op op = req_op(rq);
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
@@ -394,10 +380,9 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
return ERR_PTR(-EINVAL);
nr_pages = end - start;
- bio = bio_kmalloc(nr_pages, gfp_mask);
+ bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
- bio_init_inline(bio, NULL, nr_pages, op);
while (len) {
struct page *page;
@@ -431,8 +416,7 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
cleanup:
bio_free_pages(bio);
- bio_uninit(bio);
- kfree(bio);
+ blk_mq_map_bio_put(bio);
return ERR_PTR(-ENOMEM);
}
@@ -679,18 +663,16 @@ int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
return -EINVAL;
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
- bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask);
+ bio = bio_copy_kern(rq, kbuf, len, gfp_mask);
else
- bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask);
+ bio = bio_map_kern(rq, kbuf, len, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
ret = blk_rq_append_bio(rq, bio);
- if (unlikely(ret)) {
- bio_uninit(bio);
- kfree(bio);
- }
+ if (unlikely(ret))
+ blk_mq_map_bio_put(bio);
return ret;
}
EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4e96bb246247..bd8b11c472a2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -23,6 +23,7 @@
#include <linux/cache.h>
#include <linux/sched/topology.h>
#include <linux/sched/signal.h>
+#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/prefetch.h>
@@ -3718,6 +3719,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
+ int ret = 0;
if (blk_mq_hctx_has_online_cpu(hctx, cpu))
return 0;
@@ -3738,12 +3740,24 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
* frozen and there are no requests.
*/
if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
- while (blk_mq_hctx_has_requests(hctx))
+ while (blk_mq_hctx_has_requests(hctx)) {
+ /*
+ * The wakeup capable IRQ handler of block device is
+ * not called during suspend. Skip the loop by checking
+ * pm_wakeup_pending to prevent the deadlock and improve
+ * suspend latency.
+ */
+ if (pm_wakeup_pending()) {
+ clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+ ret = -EBUSY;
+ break;
+ }
msleep(5);
+ }
percpu_ref_put(&hctx->queue->q_usage_counter);
}
- return 0;
+ return ret;
}
/*
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index dcc295721c2c..394d8d74bba9 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -741,6 +741,8 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
{
struct bio *bio;
+ lockdep_assert_held(&zwplug->lock);
+
if (bio_list_empty(&zwplug->bio_list))
return;
@@ -748,6 +750,8 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
zwplug->disk->disk_name, zwplug->zone_no);
while ((bio = bio_list_pop(&zwplug->bio_list)))
blk_zone_wplug_bio_io_error(zwplug, bio);
+
+ zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
}
/*
diff --git a/block/fops.c b/block/fops.c
index 4dad9c2d5796..4d32785b31d9 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -184,8 +184,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos = iocb->ki_pos;
int ret = 0;
- if (iocb->ki_flags & IOCB_ALLOC_CACHE)
- opf |= REQ_ALLOC_CACHE;
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
&blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
@@ -333,8 +331,6 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
loff_t pos = iocb->ki_pos;
int ret = 0;
- if (iocb->ki_flags & IOCB_ALLOC_CACHE)
- opf |= REQ_ALLOC_CACHE;
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
&blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
diff --git a/block/ioctl.c b/block/ioctl.c
index 2b3ab9bfc413..61feed686418 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -423,6 +423,86 @@ static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
return ops->pr_clear(bdev, c.key);
}
+static int blkdev_pr_read_keys(struct block_device *bdev, blk_mode_t mode,
+ struct pr_read_keys __user *arg)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_keys *keys_info;
+ struct pr_read_keys read_keys;
+ u64 __user *keys_ptr;
+ size_t keys_info_len;
+ size_t keys_copy_len;
+ int ret;
+
+ if (!blkdev_pr_allowed(bdev, mode))
+ return -EPERM;
+ if (!ops || !ops->pr_read_keys)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&read_keys, arg, sizeof(read_keys)))
+ return -EFAULT;
+
+ keys_info_len = struct_size(keys_info, keys, read_keys.num_keys);
+ if (keys_info_len == SIZE_MAX)
+ return -EINVAL;
+
+ keys_info = kzalloc(keys_info_len, GFP_KERNEL);
+ if (!keys_info)
+ return -ENOMEM;
+
+ keys_info->num_keys = read_keys.num_keys;
+
+ ret = ops->pr_read_keys(bdev, keys_info);
+ if (ret)
+ goto out;
+
+ /* Copy out individual keys */
+ keys_ptr = u64_to_user_ptr(read_keys.keys_ptr);
+ keys_copy_len = min(read_keys.num_keys, keys_info->num_keys) *
+ sizeof(keys_info->keys[0]);
+
+ if (copy_to_user(keys_ptr, keys_info->keys, keys_copy_len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Copy out the arg struct */
+ read_keys.generation = keys_info->generation;
+ read_keys.num_keys = keys_info->num_keys;
+
+ if (copy_to_user(arg, &read_keys, sizeof(read_keys)))
+ ret = -EFAULT;
+out:
+ kfree(keys_info);
+ return ret;
+}
+
+static int blkdev_pr_read_reservation(struct block_device *bdev,
+ blk_mode_t mode, struct pr_read_reservation __user *arg)
+{
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_held_reservation rsv = {};
+ struct pr_read_reservation out = {};
+ int ret;
+
+ if (!blkdev_pr_allowed(bdev, mode))
+ return -EPERM;
+ if (!ops || !ops->pr_read_reservation)
+ return -EOPNOTSUPP;
+
+ ret = ops->pr_read_reservation(bdev, &rsv);
+ if (ret)
+ return ret;
+
+ out.key = rsv.key;
+ out.generation = rsv.generation;
+ out.type = rsv.type;
+
+ if (copy_to_user(arg, &out, sizeof(out)))
+ return -EFAULT;
+ return 0;
+}
+
static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
unsigned long arg)
{
@@ -645,6 +725,10 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
return blkdev_pr_preempt(bdev, mode, argp, true);
case IOC_PR_CLEAR:
return blkdev_pr_clear(bdev, mode, argp);
+ case IOC_PR_READ_KEYS:
+ return blkdev_pr_read_keys(bdev, mode, argp);
+ case IOC_PR_READ_RESERVATION:
+ return blkdev_pr_read_reservation(bdev, mode, argp);
default:
return blk_get_meta_cap(bdev, cmd, argp);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ebe751f39742..272bc608e528 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -348,11 +348,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
struct file *file = lo->lo_backing_file;
struct bio_vec tmp;
unsigned int offset;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index 3f50321aa4a7..77bd6081b244 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -394,7 +394,7 @@ static void zloop_rw(struct zloop_cmd *cmd)
struct bio_vec tmp;
unsigned long flags;
sector_t zone_end;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
atomic_set(&cmd->ref, 2);
@@ -487,8 +487,7 @@ static void zloop_rw(struct zloop_cmd *cmd)
spin_unlock_irqrestore(&zone->wp_lock, flags);
}
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
struct bio_vec *bvec;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index a01178caf15b..8f3ccb317e4d 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -1122,7 +1122,7 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
if (ctrl->dhchap_ctxs) {
for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
- kfree(ctrl->dhchap_ctxs);
+ kvfree(ctrl->dhchap_ctxs);
}
if (ctrl->host_key) {
nvme_auth_free_key(ctrl->host_key);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2e58a7ce1090..55a8afd2efd5 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -592,7 +592,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
if (status > 0 && (status & NVME_STATUS_DNR))
return false;
- if (status == -EKEYREJECTED)
+ if (status == -EKEYREJECTED || status == -ENOKEY)
return false;
if (ctrl->opts->max_reconnects == -1 ||
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 873954d43b18..bc455fa98246 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -520,6 +520,8 @@ nvme_fc_free_rport(struct kref *ref)
WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
WARN_ON(!list_empty(&rport->ctrl_list));
+ WARN_ON(!list_empty(&rport->ls_req_list));
+ WARN_ON(!list_empty(&rport->ls_rcv_list));
/* remove from lport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
@@ -1468,14 +1470,14 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
{
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&lsop->rqstbuf->rq_dis_assoc;
- struct nvme_fc_ctrl *ctrl, *ret = NULL;
+ struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL;
struct nvmefc_ls_rcv_op *oldls = NULL;
u64 association_id = be64_to_cpu(rqst->associd.association_id);
unsigned long flags;
spin_lock_irqsave(&rport->lock, flags);
- list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) {
if (!nvme_fc_ctrl_get(ctrl))
continue;
spin_lock(&ctrl->lock);
@@ -1488,7 +1490,9 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
if (ret)
/* leave the ctrl get reference */
break;
+ spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_ctrl_put(ctrl);
+ spin_lock_irqsave(&rport->lock, flags);
}
spin_unlock_irqrestore(&rport->lock, flags);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 4fa8400a5627..a9c097dacad6 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -447,7 +447,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct iov_iter iter;
struct iov_iter *map_iter = NULL;
struct request *req;
- blk_opf_t rq_flags = REQ_ALLOC_CACHE;
+ blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
int ret;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e5ca8301bb8b..0e4caeab739c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2984,6 +2984,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
pci_set_master(pdev);
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
+ dev_dbg(dev->ctrl.device, "reading CSTS register failed\n");
result = -ENODEV;
goto disable;
}
@@ -3609,6 +3610,7 @@ out_uninit_ctrl:
nvme_uninit_ctrl(&dev->ctrl);
out_put_ctrl:
nvme_put_ctrl(&dev->ctrl);
+ dev_err_probe(&pdev->dev, result, "probe failed\n");
return result;
}
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index ca6a74607b13..ad2ecc2f49a9 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -228,7 +228,8 @@ retry:
static int nvme_pr_read_keys(struct block_device *bdev,
struct pr_keys *keys_info)
{
- u32 rse_len, num_keys = keys_info->num_keys;
+ size_t rse_len;
+ u32 num_keys = keys_info->num_keys;
struct nvme_reservation_status_ext *rse;
int ret, i;
bool eds;
@@ -238,6 +239,9 @@ static int nvme_pr_read_keys(struct block_device *bdev,
* enough to get enough keys to fill the return keys buffer.
*/
rse_len = struct_size(rse, regctl_eds, num_keys);
+ if (rse_len > U32_MAX)
+ return -EINVAL;
+
rse = kzalloc(rse_len, GFP_KERNEL);
if (!rse)
return -ENOMEM;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 3e378153a781..3da31bb1183e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -708,7 +708,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/*
* We don't really have a practical limit on the number of abort
- * comands. But we don't do anything useful for abort either, so
+ * commands. But we don't do anything useful for abort either, so
* no point in allowing more abort commands than the spec requires.
*/
id->acl = 3;
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 300d5e032f6d..2eadeb7e06f2 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -381,8 +381,8 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
- ret = crypto_shash_update(shash, ctrl->subsysnqn,
- strlen(ctrl->subsysnqn));
+ ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
+ strlen(ctrl->subsys->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_final(shash, response);
@@ -429,7 +429,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
}
transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
- ctrl->subsysnqn);
+ ctrl->subsys->subsysnqn);
if (IS_ERR(transformed_key)) {
ret = PTR_ERR(transformed_key);
goto out_free_tfm;
@@ -484,8 +484,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, "Controller", 10);
if (ret)
goto out;
- ret = crypto_shash_update(shash, ctrl->subsysnqn,
- strlen(ctrl->subsysnqn));
+ ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn,
+ strlen(ctrl->subsys->subsysnqn));
if (ret)
goto out;
ret = crypto_shash_update(shash, buf, 1);
@@ -575,7 +575,7 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
return;
}
ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
- sq->ctrl->subsysnqn,
+ sq->ctrl->subsys->subsysnqn,
sq->ctrl->hostnqn, &digest);
if (ret) {
pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
@@ -590,8 +590,10 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq)
goto out_free_digest;
}
#ifdef CONFIG_NVME_TARGET_TCP_TLS
- tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn,
- sq->ctrl->shash_id, tls_psk, psk_len, digest);
+ tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn,
+ sq->ctrl->subsys->subsysnqn,
+ sq->ctrl->shash_id, tls_psk, psk_len,
+ digest);
if (IS_ERR(tls_key)) {
pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 5d7d483bfbe3..cc88e5a28c8a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq);
* - the nvmet_transports array
*
* When updating any of those lists/structures write lock should be obtained,
- * while when reading (popolating discovery log page or checking host-subsystem
+ * while when reading (populating discovery log page or checking host-subsystem
* link) read lock is obtained to allow concurrent reads.
*/
DECLARE_RWSEM(nvmet_config_sem);
@@ -1628,7 +1628,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref);
@@ -1903,6 +1902,8 @@ static void nvmet_subsys_free(struct kref *ref)
struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref);
+ WARN_ON_ONCE(!list_empty(&subsys->ctrls));
+ WARN_ON_ONCE(!list_empty(&subsys->hosts));
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
nvmet_debugfs_subsys_free(subsys);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7d84527d5a43..0d9784004c9b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -490,8 +490,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
sizeof(*discon_rqst) + sizeof(*discon_acc) +
tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
- dev_info(tgtport->dev,
- "{%d:%d} send Disconnect Association failed: ENOMEM\n",
+ pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n",
tgtport->fc_target_port.port_num, assoc->a_id);
return;
}
@@ -513,8 +512,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
nvmet_fc_disconnect_assoc_done);
if (ret) {
- dev_info(tgtport->dev,
- "{%d:%d} XMT Disconnect Association failed: %d\n",
+ pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n",
tgtport->fc_target_port.port_num, assoc->a_id, ret);
kfree(lsop);
}
@@ -1187,8 +1185,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
if (oldls)
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
ida_free(&tgtport->assoc_cnt, assoc->a_id);
- dev_info(tgtport->dev,
- "{%d:%d} Association freed\n",
+ pr_info("{%d:%d}: Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
kfree(assoc);
}
@@ -1224,8 +1221,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
flush_workqueue(assoc->queues[i]->work_q);
}
- dev_info(tgtport->dev,
- "{%d:%d} Association deleted\n",
+ pr_info("{%d:%d}: Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
nvmet_fc_tgtport_put(tgtport);
@@ -1716,9 +1712,9 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
}
if (ret) {
- dev_err(tgtport->dev,
- "Create Association LS failed: %s\n",
- validation_errors[ret]);
+ pr_err("{%d}: Create Association LS failed: %s\n",
+ tgtport->fc_target_port.port_num,
+ validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
FCNVME_RJT_RC_LOGIC,
@@ -1730,8 +1726,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */
- dev_info(tgtport->dev,
- "{%d:%d} Association created\n",
+ pr_info("{%d:%d}: Association created\n",
tgtport->fc_target_port.port_num, iod->assoc->a_id);
/* format a response */
@@ -1809,9 +1804,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
}
if (ret) {
- dev_err(tgtport->dev,
- "Create Connection LS failed: %s\n",
- validation_errors[ret]);
+ pr_err("{%d}: Create Connection LS failed: %s\n",
+ tgtport->fc_target_port.port_num,
+ validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
@@ -1871,9 +1866,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
}
if (ret || !assoc) {
- dev_err(tgtport->dev,
- "Disconnect LS failed: %s\n",
- validation_errors[ret]);
+ pr_err("{%d}: Disconnect LS failed: %s\n",
+ tgtport->fc_target_port.port_num,
+ validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
@@ -1907,8 +1902,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
spin_unlock_irqrestore(&tgtport->lock, flags);
if (oldls) {
- dev_info(tgtport->dev,
- "{%d:%d} Multiple Disconnect Association LS's "
+ pr_info("{%d:%d}: Multiple Disconnect Association LS's "
"received\n",
tgtport->fc_target_port.port_num, assoc->a_id);
/* overwrite good response with bogus failure */
@@ -2051,8 +2045,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
- dev_info(tgtport->dev,
- "RCV %s LS failed: payload too large (%d)\n",
+ pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n",
+ tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "",
lsreqbuf_len);
@@ -2060,8 +2054,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
}
if (!nvmet_fc_tgtport_get(tgtport)) {
- dev_info(tgtport->dev,
- "RCV %s LS failed: target deleting\n",
+ pr_info("{%d}: RCV %s LS failed: target deleting\n",
+ tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
return -ESHUTDOWN;
@@ -2069,8 +2063,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod = nvmet_fc_alloc_ls_iod(tgtport);
if (!iod) {
- dev_info(tgtport->dev,
- "RCV %s LS failed: context allocation failed\n",
+ pr_info("{%d}: RCV %s LS failed: context allocation failed\n",
+ tgtport->fc_target_port.port_num,
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
nvmet_fc_tgtport_put(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 5dffcc5becae..c30e9a3e014f 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -254,7 +254,6 @@ struct fcloop_nport {
struct fcloop_lsreq {
struct nvmefc_ls_req *lsreq;
struct nvmefc_ls_rsp ls_rsp;
- int lsdir; /* H2T or T2H */
int status;
struct list_head ls_list; /* fcloop_rport->ls_list */
};
@@ -1111,8 +1110,10 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
rport->nport->rport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (put_port)
+ if (put_port) {
+ WARN_ON(!list_empty(&rport->ls_list));
fcloop_nport_put(rport->nport);
+ }
}
static void
@@ -1130,8 +1131,10 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
tport->nport->tport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (put_port)
+ if (put_port) {
+ WARN_ON(!list_empty(&tport->ls_list));
fcloop_nport_put(tport->nport);
+ }
}
#define FCLOOP_HW_QUEUES 4
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index f3b09f4099f0..b664b584fdc8 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -285,7 +285,6 @@ struct nvmet_ctrl {
__le32 *changed_ns_list;
u32 nr_changed_ns;
- char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
struct device *p2p_client;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 0c361b1e3566..96648ec2fadb 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -150,7 +150,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
- memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
+ memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/* use fabric id-ctrl values */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 2e78397a7373..f858a6c9d7cb 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -320,12 +320,14 @@ static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
nvme_epf->dma_enabled = true;
dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
- dma_chan_name(chan),
- dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+ dma_chan_name(nvme_epf->dma_rx_chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
+ dma_rx_chan)));
dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
- dma_chan_name(chan),
- dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+ dma_chan_name(nvme_epf->dma_tx_chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf->
+ dma_tx_chan)));
return;
@@ -2325,6 +2327,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
return ret;
}
+ nvmet_pci_epf_init_dma(nvme_epf);
+
/* Set device ID, class, etc. */
epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
@@ -2422,8 +2426,6 @@ static int nvmet_pci_epf_bind(struct pci_epf *epf)
if (ret)
return ret;
- nvmet_pci_epf_init_dma(nvme_epf);
-
return 0;
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 0485e25ab797..9c12b2361a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -367,7 +367,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *cmds;
int ret = -EINVAL, i;
- cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
+ cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
if (!cmds)
goto out;
@@ -382,7 +382,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
out_free:
while (--i >= 0)
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
- kfree(cmds);
+ kvfree(cmds);
out:
return ERR_PTR(ret);
}
@@ -394,7 +394,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
for (i = 0; i < nr_cmds; i++)
nvmet_rdma_free_cmd(ndev, cmds + i, admin);
- kfree(cmds);
+ kvfree(cmds);
}
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
@@ -455,7 +455,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
NUMA_NO_NODE, false, true))
goto out;
- queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
+ queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
GFP_KERNEL);
if (!queue->rsps)
goto out_free_sbitmap;
@@ -473,7 +473,7 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
out_free:
while (--i >= 0)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
- kfree(queue->rsps);
+ kvfree(queue->rsps);
out_free_sbitmap:
sbitmap_free(&queue->rsp_tags);
out:
@@ -487,7 +487,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
for (i = 0; i < nr_rsps; i++)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
- kfree(queue->rsps);
+ kvfree(queue->rsps);
sbitmap_free(&queue->rsp_tags);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d543da09ef8e..15416ff0eac4 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1484,7 +1484,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
struct nvmet_tcp_cmd *cmds;
int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
- cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
+ cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
if (!cmds)
goto out;
@@ -1500,7 +1500,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
out_free:
while (--i >= 0)
nvmet_tcp_free_cmd(cmds + i);
- kfree(cmds);
+ kvfree(cmds);
out:
return ret;
}
@@ -1514,7 +1514,7 @@ static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
nvmet_tcp_free_cmd(cmds + i);
nvmet_tcp_free_cmd(&queue->connect);
- kfree(cmds);
+ kvfree(cmds);
}
static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f2c0744b4480..f50b92e63201 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2004,9 +2004,19 @@ static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info)
{
int result, i, data_offset, num_copy_keys;
u32 num_keys = keys_info->num_keys;
- int data_len = num_keys * 8 + 8;
+ int data_len;
u8 *data;
+ /*
+ * Each reservation key takes 8 bytes and there is an 8-byte header
+ * before the reservation key list. The total size must fit into the
+ * 16-bit ALLOCATION LENGTH field.
+ */
+ if (check_mul_overflow(num_keys, 8, &data_len) ||
+ check_add_overflow(data_len, 8, &data_len) ||
+ data_len > USHRT_MAX)
+ return -EINVAL;
+
data = kzalloc(data_len, GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index eb7254b3dddd..cae9e857aea4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1213,6 +1213,24 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
return max_t(unsigned short, rq->nr_phys_segments, 1);
}
+/**
+ * blk_rq_nr_bvec - return number of bvecs in a request
+ * @rq: request to calculate bvecs for
+ *
+ * Returns the number of bvecs.
+ */
+static inline unsigned int blk_rq_nr_bvec(struct request *rq)
+{
+ struct req_iterator rq_iter;
+ struct bio_vec bv;
+ unsigned int nr_bvec = 0;
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ nr_bvec++;
+
+ return nr_bvec;
+}
+
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
struct scatterlist **last_sg);
static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cbbcb9051ec3..5dc061d318a4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -479,10 +479,7 @@ static inline bool op_is_discard(blk_opf_t op)
}
/*
- * Check if a bio or request operation is a zone management operation, with
- * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
- * due to its different handling in the block layer and device response in
- * case of command failure.
+ * Check if a bio or request operation is a zone management operation.
*/
static inline bool op_is_zone_mgmt(enum req_op op)
{
diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h
index d8126415966f..847f3051057a 100644
--- a/include/uapi/linux/pr.h
+++ b/include/uapi/linux/pr.h
@@ -56,6 +56,18 @@ struct pr_clear {
__u32 __pad;
};
+struct pr_read_keys {
+ __u32 generation;
+ __u32 num_keys;
+ __u64 keys_ptr;
+};
+
+struct pr_read_reservation {
+ __u64 key;
+ __u32 generation;
+ __u32 type;
+};
+
#define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */
#define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration)
@@ -64,5 +76,7 @@ struct pr_clear {
#define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt)
#define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt)
#define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear)
+#define IOC_PR_READ_KEYS _IOWR('p', 206, struct pr_read_keys)
+#define IOC_PR_READ_RESERVATION _IOR('p', 207, struct pr_read_reservation)
#endif /* _UAPI_PR_H */
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 331af6bf4234..70ca88cc1f54 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -855,7 +855,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
if (unlikely(ret))
return ret;
- kiocb->ki_flags |= IOCB_ALLOC_CACHE;
/*
* If the file is marked O_NONBLOCK, still allow retry for it if it