summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c17
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-crypto-profile.c4
-rw-r--r--block/blk-integrity.c7
-rw-r--r--block/blk-iolatency.c3
-rw-r--r--block/blk-merge.c26
-rw-r--r--block/blk-stat.c2
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--block/blk-throttle.c3
-rw-r--r--block/blk-zoned.c8
-rw-r--r--block/elevator.c19
-rw-r--r--block/genhd.c26
-rw-r--r--block/kyber-iosched.c2
13 files changed, 65 insertions, 55 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index cb94e9be26dc..10912988c8f5 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -154,10 +154,9 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_integrity_add_page);
static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
- int nr_vecs, unsigned int len,
- unsigned int direction)
+ int nr_vecs, unsigned int len)
{
- bool write = direction == ITER_SOURCE;
+ bool write = op_is_write(bio_op(bio));
struct bio_integrity_payload *bip;
struct iov_iter iter;
void *buf;
@@ -168,7 +167,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
return -ENOMEM;
if (write) {
- iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
+ iov_iter_bvec(&iter, ITER_SOURCE, bvec, nr_vecs, len);
if (!copy_from_iter_full(buf, len, &iter)) {
ret = -EFAULT;
goto free_buf;
@@ -264,7 +263,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
size_t offset, bytes = iter->count;
- unsigned int direction, nr_bvecs;
+ unsigned int nr_bvecs;
int ret, nr_vecs;
bool copy;
@@ -273,11 +272,6 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
return -E2BIG;
- if (bio_data_dir(bio) == READ)
- direction = ITER_DEST;
- else
- direction = ITER_SOURCE;
-
nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1);
if (nr_vecs > BIO_MAX_VECS)
return -E2BIG;
@@ -300,8 +294,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
copy = true;
if (copy)
- ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
- direction);
+ ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes);
else
ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
if (ret)
diff --git a/block/blk-core.c b/block/blk-core.c
index b862c66018f2..fdac48aec5ef 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -381,7 +381,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
static void blk_rq_timed_out_timer(struct timer_list *t)
{
- struct request_queue *q = from_timer(q, t, timeout);
+ struct request_queue *q = timer_container_of(q, t, timeout);
kblockd_schedule_work(&q->timeout_work);
}
diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
index 94a155912bf1..81918f6e0cae 100644
--- a/block/blk-crypto-profile.c
+++ b/block/blk-crypto-profile.c
@@ -501,6 +501,7 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev,
blk_crypto_hw_exit(profile);
return err;
}
+EXPORT_SYMBOL_GPL(blk_crypto_derive_sw_secret);
int blk_crypto_import_key(struct blk_crypto_profile *profile,
const u8 *raw_key, size_t raw_key_size,
@@ -520,6 +521,7 @@ int blk_crypto_import_key(struct blk_crypto_profile *profile,
blk_crypto_hw_exit(profile);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_crypto_import_key);
int blk_crypto_generate_key(struct blk_crypto_profile *profile,
u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
@@ -537,6 +539,7 @@ int blk_crypto_generate_key(struct blk_crypto_profile *profile,
blk_crypto_hw_exit(profile);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_crypto_generate_key);
int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
const u8 *lt_key, size_t lt_key_size,
@@ -556,6 +559,7 @@ int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
blk_crypto_hw_exit(profile);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_crypto_prepare_key);
/**
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index a1678f0a9f81..e4e2567061f9 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -117,13 +117,8 @@ int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
{
int ret;
struct iov_iter iter;
- unsigned int direction;
- if (op_is_write(req_op(rq)))
- direction = ITER_DEST;
- else
- direction = ITER_SOURCE;
- iov_iter_ubuf(&iter, direction, ubuf, bytes);
+ iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
ret = bio_integrity_map_user(rq->bio, &iter);
if (ret)
return ret;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 42c1e0b9a68f..2f8fdecdd7a9 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -658,7 +658,8 @@ static const struct rq_qos_ops blkcg_iolatency_ops = {
static void blkiolatency_timer_fn(struct timer_list *t)
{
- struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
+ struct blk_iolatency *blkiolat = timer_container_of(blkiolat, t,
+ timer);
struct blkcg_gq *blkg;
struct cgroup_subsys_state *pos_css;
u64 now = blk_time_get_ns();
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3af1d284add5..70d704615be5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -998,20 +998,20 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
if (!plug || rq_list_empty(&plug->mq_list))
return false;
- rq_list_for_each(&plug->mq_list, rq) {
- if (rq->q == q) {
- if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
- BIO_MERGE_OK)
- return true;
- break;
- }
+ rq = plug->mq_list.tail;
+ if (rq->q == q)
+ return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
+ BIO_MERGE_OK;
+ else if (!plug->multiple_queues)
+ return false;
- /*
- * Only keep iterating plug list for merges if we have multiple
- * queues
- */
- if (!plug->multiple_queues)
- break;
+ rq_list_for_each(&plug->mq_list, rq) {
+ if (rq->q != q)
+ continue;
+ if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
+ BIO_MERGE_OK)
+ return true;
+ break;
}
return false;
}
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 46449da856f8..682a8ddb1173 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -76,7 +76,7 @@ void blk_stat_add(struct request *rq, u64 now)
static void blk_stat_timer_fn(struct timer_list *t)
{
- struct blk_stat_callback *cb = from_timer(cb, t, timer);
+ struct blk_stat_callback *cb = timer_container_of(cb, t, timer);
unsigned int bucket;
int cpu;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b2b9b89d6967..c611444480b3 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -960,4 +960,5 @@ void blk_unregister_queue(struct gendisk *disk)
elevator_set_none(q);
blk_debugfs_remove(disk);
+ kobject_put(&disk->queue_kobj);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index bd15357f23bd..397b6a410f9e 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1125,7 +1125,8 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
*/
static void throtl_pending_timer_fn(struct timer_list *t)
{
- struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
+ struct throtl_service_queue *sq = timer_container_of(sq, t,
+ pending_timer);
struct throtl_grp *tg = sq_to_tg(sq);
struct throtl_data *td = sq_to_td(sq);
struct throtl_service_queue *parent_sq;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 8f15d1aa6eb8..351d659280e1 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -1225,6 +1225,7 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
bio->bi_opf &= ~REQ_OP_MASK;
bio->bi_opf |= REQ_OP_ZONE_APPEND;
+ bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND);
}
/*
@@ -1306,7 +1307,6 @@ again:
spin_unlock_irqrestore(&zwplug->lock, flags);
bdev = bio->bi_bdev;
- submit_bio_noacct_nocheck(bio);
/*
* blk-mq devices will reuse the extra reference on the request queue
@@ -1314,8 +1314,12 @@ again:
* path for BIO-based devices will not do that. So drop this extra
* reference here.
*/
- if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
+ if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) {
+ bdev->bd_disk->fops->submit_bio(bio);
blk_queue_exit(bdev->bd_disk->queue);
+ } else {
+ blk_mq_submit_bio(bio);
+ }
put_zwplug:
/* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
diff --git a/block/elevator.c b/block/elevator.c
index ab22542e6cf0..a960bdc869bc 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -719,7 +719,8 @@ void elevator_set_default(struct request_queue *q)
.name = "mq-deadline",
.no_uevent = true,
};
- int err = 0;
+ int err;
+ struct elevator_type *e;
/* now we allow to switch elevator */
blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
@@ -732,12 +733,18 @@ void elevator_set_default(struct request_queue *q)
* have multiple queues or mq-deadline is not available, default
* to "none".
*/
- if (elevator_find_get(ctx.name) && (q->nr_hw_queues == 1 ||
- blk_mq_is_shared_tags(q->tag_set->flags)))
+ e = elevator_find_get(ctx.name);
+ if (!e)
+ return;
+
+ if ((q->nr_hw_queues == 1 ||
+ blk_mq_is_shared_tags(q->tag_set->flags))) {
err = elevator_change(q, &ctx);
- if (err < 0)
- pr_warn("\"%s\" elevator initialization, failed %d, "
- "falling back to \"none\"\n", ctx.name, err);
+ if (err < 0)
+ pr_warn("\"%s\" elevator initialization, failed %d, falling back to \"none\"\n",
+ ctx.name, err);
+ }
+ elevator_put(e);
}
void elevator_set_none(struct request_queue *q)
diff --git a/block/genhd.c b/block/genhd.c
index 8171a6bc3210..c26733f6324b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -128,23 +128,27 @@ static void part_stat_read_all(struct block_device *part,
static void bdev_count_inflight_rw(struct block_device *part,
unsigned int inflight[2], bool mq_driver)
{
+ int write = 0;
+ int read = 0;
int cpu;
if (mq_driver) {
blk_mq_in_driver_rw(part, inflight);
- } else {
- for_each_possible_cpu(cpu) {
- inflight[READ] += part_stat_local_read_cpu(
- part, in_flight[READ], cpu);
- inflight[WRITE] += part_stat_local_read_cpu(
- part, in_flight[WRITE], cpu);
- }
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ read += part_stat_local_read_cpu(part, in_flight[READ], cpu);
+ write += part_stat_local_read_cpu(part, in_flight[WRITE], cpu);
}
- if (WARN_ON_ONCE((int)inflight[READ] < 0))
- inflight[READ] = 0;
- if (WARN_ON_ONCE((int)inflight[WRITE] < 0))
- inflight[WRITE] = 0;
+ /*
+ * While iterating all CPUs, some IOs may be issued from a CPU already
+ * traversed and complete on a CPU that has not yet been traversed,
+ * causing the inflight number to be negative.
+ */
+ inflight[READ] = read > 0 ? read : 0;
+ inflight[WRITE] = write > 0 ? write : 0;
}
/**
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 0f0f8452609a..4dba8405bd01 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -276,7 +276,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
static void kyber_timer_fn(struct timer_list *t)
{
- struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
+ struct kyber_queue_data *kqd = timer_container_of(kqd, t, timer);
unsigned int sched_domain;
int cpu;
bool bad = false;