diff options
Diffstat (limited to 'block/blk-map.c')
| -rw-r--r-- | block/blk-map.c | 749 |
1 files changed, 548 insertions, 201 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 623e1cd4cffe..4533094d9458 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -1,177 +1,493 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Functions related to mapping data to requests */ #include <linux/kernel.h> +#include <linux/sched/task_stack.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> -#include <scsi/sg.h> /* for struct sg_iovec */ +#include <linux/uio.h> #include "blk.h" -int blk_rq_append_bio(struct request_queue *q, struct request *rq, - struct bio *bio) +struct bio_map_data { + bool is_our_pages : 1; + bool is_null_mapped : 1; + struct iov_iter iter; + struct iovec iov[]; +}; + +static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, + gfp_t gfp_mask) { - if (!rq->bio) - blk_rq_bio_prep(q, rq, bio); - else if (!ll_back_merge_fn(q, rq, bio)) - return -EINVAL; - else { - rq->biotail->bi_next = bio; - rq->biotail = bio; + struct bio_map_data *bmd; + + if (data->nr_segs > UIO_MAXIOV) + return NULL; + + bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); + if (!bmd) + return NULL; + bmd->iter = *data; + if (iter_is_iovec(data)) { + memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs); + bmd->iter.__iov = bmd->iov; + } + return bmd; +} + +static inline void blk_mq_map_bio_put(struct bio *bio) +{ + bio_put(bio); +} + +static struct bio *blk_rq_map_bio_alloc(struct request *rq, + unsigned int nr_vecs, gfp_t gfp_mask) +{ + struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL; + struct bio *bio; - rq->__data_len += bio->bi_size; + bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask, + &fs_bio_set); + if (!bio) + return NULL; + + return bio; +} + +/** + * bio_copy_from_iter - copy all pages from iov_iter to bio + * @bio: The &struct bio which describes the I/O as destination + * @iter: iov_iter as source + * + * Copy all pages from iov_iter to bio. + * Returns 0 on success, or error on failure. + */ +static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) +{ + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + bio_for_each_segment_all(bvec, bio, iter_all) { + ssize_t ret; + + ret = copy_page_from_iter(bvec->bv_page, + bvec->bv_offset, + bvec->bv_len, + iter); + + if (!iov_iter_count(iter)) + break; + + if (ret < bvec->bv_len) + return -EFAULT; } + return 0; } -static int __blk_rq_unmap_user(struct bio *bio) +/** + * bio_copy_to_iter - copy all pages from bio to iov_iter + * @bio: The &struct bio which describes the I/O as source + * @iter: iov_iter as destination + * + * Copy all pages from bio to iov_iter. + * Returns 0 on success, or error on failure. + */ +static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) { - int ret = 0; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; - if (bio) { - if (bio_flagged(bio, BIO_USER_MAPPED)) - bio_unmap_user(bio); - else - ret = bio_uncopy_user(bio); + bio_for_each_segment_all(bvec, bio, iter_all) { + ssize_t ret; + + ret = copy_page_to_iter(bvec->bv_page, + bvec->bv_offset, + bvec->bv_len, + &iter); + + if (!iov_iter_count(&iter)) + break; + + if (ret < bvec->bv_len) + return -EFAULT; } + return 0; +} + +/** + * bio_uncopy_user - finish previously mapped bio + * @bio: bio being terminated + * + * Free pages allocated from bio_copy_user_iov() and write back data + * to user space in case of a read. + */ +static int bio_uncopy_user(struct bio *bio) +{ + struct bio_map_data *bmd = bio->bi_private; + int ret = 0; + + if (!bmd->is_null_mapped) { + /* + * if we're in a workqueue, the request is orphaned, so + * don't copy into a random user address space, just free + * and return -EINTR so user space doesn't expect any data. + */ + if (!current->mm) + ret = -EINTR; + else if (bio_data_dir(bio) == READ) + ret = bio_copy_to_iter(bio, bmd->iter); + if (bmd->is_our_pages) + bio_free_pages(bio); + } + kfree(bmd); return ret; } -static int __blk_rq_map_user(struct request_queue *q, struct request *rq, - struct rq_map_data *map_data, void __user *ubuf, - unsigned int len, gfp_t gfp_mask) +static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, + struct iov_iter *iter, gfp_t gfp_mask) { - unsigned long uaddr; - struct bio *bio, *orig_bio; - int reading, ret; + struct bio_map_data *bmd; + struct page *page; + struct bio *bio; + int i = 0, ret; + int nr_pages; + unsigned int len = iter->count; + unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; - reading = rq_data_dir(rq) == READ; + bmd = bio_alloc_map_data(iter, gfp_mask); + if (!bmd) + return -ENOMEM; /* - * if alignment requirement is satisfied, map in user pages for - * direct dma. else, set up kernel bounce buffers + * We need to do a deep copy of the iov_iter including the iovecs. + * The caller provided iov might point to an on-stack or otherwise + * shortlived one. */ - uaddr = (unsigned long) ubuf; - if (blk_rq_aligned(q, uaddr, len) && !map_data) - bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); - else - bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); + bmd->is_our_pages = !map_data; + bmd->is_null_mapped = (map_data && map_data->null_mapped); - if (IS_ERR(bio)) - return PTR_ERR(bio); + nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); - if (map_data && map_data->null_mapped) - bio->bi_flags |= (1 << BIO_NULL_MAPPED); + ret = -ENOMEM; + bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); + if (!bio) + goto out_bmd; - orig_bio = bio; - blk_queue_bounce(q, &bio); + if (map_data) { + nr_pages = 1U << map_data->page_order; + i = map_data->offset / PAGE_SIZE; + } + while (len) { + unsigned int bytes = PAGE_SIZE; + + bytes -= offset; + + if (bytes > len) + bytes = len; + + if (map_data) { + if (i == map_data->nr_entries * nr_pages) { + ret = -ENOMEM; + goto cleanup; + } + + page = map_data->pages[i / nr_pages]; + page += (i % nr_pages); + + i++; + } else { + page = alloc_page(GFP_NOIO | gfp_mask); + if (!page) { + ret = -ENOMEM; + goto cleanup; + } + } + + if (bio_add_page(bio, page, bytes, offset) < bytes) { + if (!map_data) + __free_page(page); + break; + } + + len -= bytes; + offset = 0; + } + + if (map_data) + map_data->offset += bio->bi_iter.bi_size; /* - * We link the bounce buffer in and could have to traverse it - * later so we have to get a ref to prevent it from being freed + * success */ - bio_get(bio); + if (iov_iter_rw(iter) == WRITE && + (!map_data || !map_data->null_mapped)) { + ret = bio_copy_from_iter(bio, iter); + if (ret) + goto cleanup; + } else if (map_data && map_data->from_user) { + struct iov_iter iter2 = *iter; + + /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */ + iter2.data_source = ITER_SOURCE; + ret = bio_copy_from_iter(bio, &iter2); + if (ret) + goto cleanup; + } else { + if (bmd->is_our_pages) + zero_fill_bio(bio); + iov_iter_advance(iter, bio->bi_iter.bi_size); + } - ret = blk_rq_append_bio(q, rq, bio); - if (!ret) - return bio->bi_size; + bio->bi_private = bmd; - /* if it was boucned we must call the end io function */ - bio_endio(bio, 0); - __blk_rq_unmap_user(orig_bio); - bio_put(bio); + ret = blk_rq_append_bio(rq, bio); + if (ret) + goto cleanup; + return 0; +cleanup: + if (!map_data) + bio_free_pages(bio); + blk_mq_map_bio_put(bio); +out_bmd: + kfree(bmd); return ret; } +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, + gfp_t gfp_mask) +{ + unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); + struct bio *bio; + int ret; + + if (!iov_iter_count(iter)) + return -EINVAL; + + bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); + if (!bio) + return -ENOMEM; + /* + * No alignment requirements on our part to support arbitrary + * passthrough commands. + */ + ret = bio_iov_iter_get_pages(bio, iter, 0); + if (ret) + goto out_put; + ret = blk_rq_append_bio(rq, bio); + if (ret) + goto out_release; + return 0; + +out_release: + bio_release_pages(bio, false); +out_put: + blk_mq_map_bio_put(bio); + return ret; +} + +static void bio_invalidate_vmalloc_pages(struct bio *bio) +{ +#ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE + if (bio->bi_private && !op_is_write(bio_op(bio))) { + unsigned long i, len = 0; + + for (i = 0; i < bio->bi_vcnt; i++) + len += bio->bi_io_vec[i].bv_len; + invalidate_kernel_vmap_range(bio->bi_private, len); + } +#endif +} + +static void bio_map_kern_endio(struct bio *bio) +{ + bio_invalidate_vmalloc_pages(bio); + blk_mq_map_bio_put(bio); +} + +static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len, + gfp_t gfp_mask) +{ + unsigned int nr_vecs = bio_add_max_vecs(data, len); + struct bio *bio; + + bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); + if (!bio) + return ERR_PTR(-ENOMEM); + + if (is_vmalloc_addr(data)) { + bio->bi_private = data; + if (!bio_add_vmalloc(bio, data, len)) { + blk_mq_map_bio_put(bio); + return ERR_PTR(-EINVAL); + } + } else { + bio_add_virt_nofail(bio, data, len); + } + bio->bi_end_io = bio_map_kern_endio; + return bio; +} + +static void bio_copy_kern_endio(struct bio *bio) +{ + bio_free_pages(bio); + blk_mq_map_bio_put(bio); +} + +static void bio_copy_kern_endio_read(struct bio *bio) +{ + char *p = bio->bi_private; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + bio_for_each_segment_all(bvec, bio, iter_all) { + memcpy_from_bvec(p, bvec); + p += bvec->bv_len; + } + + bio_copy_kern_endio(bio); +} + /** - * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage - * @q: request queue where request should be inserted - * @rq: request structure to fill - * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @ubuf: the user buffer - * @len: length of user data - * @gfp_mask: memory allocation flags + * bio_copy_kern - copy kernel address into bio + * @rq: request to fill + * @data: pointer to buffer to copy + * @len: length in bytes + * @op: bio/request operation + * @gfp_mask: allocation flags for bio and page allocation * - * Description: - * Data will be mapped directly for zero copy I/O, if possible. Otherwise - * a kernel bounce buffer is used. - * - * A matching blk_rq_unmap_user() must be issued at the end of I/O, while - * still in process context. - * - * Note: The mapped bio may need to be bounced through blk_queue_bounce() - * before being submitted to the device, as pages mapped may be out of - * reach. It's the callers responsibility to make sure this happens. The - * original bio must be passed back in to blk_rq_unmap_user() for proper - * unmapping. + * copy the kernel address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. */ -int blk_rq_map_user(struct request_queue *q, struct request *rq, - struct rq_map_data *map_data, void __user *ubuf, - unsigned long len, gfp_t gfp_mask) +static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len, + gfp_t gfp_mask) { - unsigned long bytes_read = 0; - struct bio *bio = NULL; - int ret; + enum req_op op = req_op(rq); + unsigned long kaddr = (unsigned long)data; + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = kaddr >> PAGE_SHIFT; + struct bio *bio; + void *p = data; + int nr_pages = 0; - if (len > (queue_max_hw_sectors(q) << 9)) - return -EINVAL; - if (!len) - return -EINVAL; + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); - if (!ubuf && (!map_data || !map_data->null_mapped)) - return -EINVAL; + nr_pages = end - start; + bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); + if (!bio) + return ERR_PTR(-ENOMEM); - while (bytes_read != len) { - unsigned long map_len, end, start; + while (len) { + struct page *page; + unsigned int bytes = PAGE_SIZE; - map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); - end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) - >> PAGE_SHIFT; - start = (unsigned long)ubuf >> PAGE_SHIFT; + if (bytes > len) + bytes = len; - /* - * A bad offset could cause us to require BIO_MAX_PAGES + 1 - * pages. If this happens we just lower the requested - * mapping len by a page so that we can fit - */ - if (end - start > BIO_MAX_PAGES) - map_len -= PAGE_SIZE; + page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); + if (!page) + goto cleanup; - ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, - gfp_mask); - if (ret < 0) - goto unmap_rq; - if (!bio) - bio = rq->bio; - bytes_read += ret; - ubuf += ret; + if (op_is_write(op)) + memcpy(page_address(page), p, bytes); + + if (bio_add_page(bio, page, bytes, 0) < bytes) + break; + + len -= bytes; + p += bytes; + } + + if (op_is_write(op)) { + bio->bi_end_io = bio_copy_kern_endio; + } else { + bio->bi_end_io = bio_copy_kern_endio_read; + bio->bi_private = data; + } + + return bio; + +cleanup: + bio_free_pages(bio); + blk_mq_map_bio_put(bio); + return ERR_PTR(-ENOMEM); +} - if (map_data) - map_data->offset += ret; +/* + * Append a bio to a passthrough request. Only works if the bio can be merged + * into the request based on the driver constraints. + */ +int blk_rq_append_bio(struct request *rq, struct bio *bio) +{ + const struct queue_limits *lim = &rq->q->limits; + unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT; + unsigned int nr_segs = 0; + int ret; + + /* check that the data layout matches the hardware restrictions */ + ret = bio_split_io_at(bio, lim, &nr_segs, max_bytes, 0); + if (ret) { + /* if we would have to split the bio, copy instead */ + if (ret > 0) + ret = -EREMOTEIO; + return ret; } - if (!bio_flagged(bio, BIO_USER_MAPPED)) - rq->cmd_flags |= REQ_COPY_USER; + if (rq->bio) { + if (!ll_back_merge_fn(rq, bio, nr_segs)) + return -EINVAL; + rq->phys_gap_bit = bio_seg_gap(rq->q, rq->biotail, bio, + rq->phys_gap_bit); + rq->biotail->bi_next = bio; + rq->biotail = bio; + rq->__data_len += bio->bi_iter.bi_size; + bio_crypt_free_ctx(bio); + return 0; + } - rq->buffer = NULL; + rq->nr_phys_segments = nr_segs; + rq->bio = rq->biotail = bio; + rq->__data_len = bio->bi_iter.bi_size; + rq->phys_gap_bit = bio->bi_bvec_gap_bit; return 0; -unmap_rq: - blk_rq_unmap_user(bio); - rq->bio = NULL; +} +EXPORT_SYMBOL(blk_rq_append_bio); + +/* Prepare bio for passthrough IO given ITER_BVEC iter */ +static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) +{ + unsigned int max_bytes = rq->q->limits.max_hw_sectors << SECTOR_SHIFT; + struct bio *bio; + int ret; + + if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes) + return -EINVAL; + + /* reuse the bvecs from the iterator instead of allocating new ones */ + bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); + if (!bio) + return -ENOMEM; + bio_iov_bvec_set(bio, iter); + + ret = blk_rq_append_bio(rq, bio); + if (ret) + blk_mq_map_bio_put(bio); return ret; } -EXPORT_SYMBOL(blk_rq_map_user); /** - * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage + * blk_rq_map_user_iov - map user data to a request, for passthrough requests * @q: request queue where request should be inserted * @rq: request to map data to * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @iov: pointer to the iovec - * @iov_count: number of elements in the iovec - * @len: I/O byte count + * @iter: iovec iterator * @gfp_mask: memory allocation flags * * Description: @@ -180,68 +496,112 @@ EXPORT_SYMBOL(blk_rq_map_user); * * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context. - * - * Note: The mapped bio may need to be bounced through blk_queue_bounce() - * before being submitted to the device, as pages mapped may be out of - * reach. It's the callers responsibility to make sure this happens. The - * original bio must be passed back in to blk_rq_unmap_user() for proper - * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, - struct rq_map_data *map_data, struct sg_iovec *iov, - int iov_count, unsigned int len, gfp_t gfp_mask) + struct rq_map_data *map_data, + const struct iov_iter *iter, gfp_t gfp_mask) { - struct bio *bio; - int i, read = rq_data_dir(rq) == READ; - int unaligned = 0; + bool copy = false, map_bvec = false; + unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits); + struct bio *bio = NULL; + struct iov_iter i; + int ret = -EINVAL; + + if (map_data) + copy = true; + else if (iov_iter_alignment(iter) & align) + copy = true; + else if (iov_iter_is_bvec(iter)) + map_bvec = true; + else if (!user_backed_iter(iter)) + copy = true; + else if (queue_virt_boundary(q)) + copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); + + if (map_bvec) { + ret = blk_rq_map_user_bvec(rq, iter); + if (!ret) + return 0; + if (ret != -EREMOTEIO) + goto fail; + /* fall back to copying the data on limits mismatches */ + copy = true; + } - if (!iov || iov_count <= 0) - return -EINVAL; + i = *iter; + do { + if (copy) + ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); + else + ret = bio_map_user_iov(rq, &i, gfp_mask); + if (ret) { + if (ret == -EREMOTEIO) + ret = -EINVAL; + goto unmap_rq; + } + if (!bio) + bio = rq->bio; + } while (iov_iter_count(&i)); - for (i = 0; i < iov_count; i++) { - unsigned long uaddr = (unsigned long)iov[i].iov_base; + return 0; - if (!iov[i].iov_len) - return -EINVAL; +unmap_rq: + blk_rq_unmap_user(bio); +fail: + rq->bio = NULL; + return ret; +} +EXPORT_SYMBOL(blk_rq_map_user_iov); - /* - * Keep going so we check length of all segments - */ - if (uaddr & queue_dma_alignment(q)) - unaligned = 1; - } +int blk_rq_map_user(struct request_queue *q, struct request *rq, + struct rq_map_data *map_data, void __user *ubuf, + unsigned long len, gfp_t gfp_mask) +{ + struct iov_iter i; + int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i); - if (unaligned || (q->dma_pad_mask & len) || map_data) - bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, - gfp_mask); - else - bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); + if (unlikely(ret < 0)) + return ret; - if (IS_ERR(bio)) - return PTR_ERR(bio); + return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); +} +EXPORT_SYMBOL(blk_rq_map_user); - if (bio->bi_size != len) { - /* - * Grab an extra reference to this bio, as bio_unmap_user() - * expects to be able to drop it twice as it happens on the - * normal IO completion path - */ - bio_get(bio); - bio_endio(bio, 0); - __blk_rq_unmap_user(bio); - return -EINVAL; - } +int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, + void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, + bool vec, int iov_count, bool check_iter_count, int rw) +{ + int ret = 0; - if (!bio_flagged(bio, BIO_USER_MAPPED)) - rq->cmd_flags |= REQ_COPY_USER; + if (vec) { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov = fast_iov; + struct iov_iter iter; - blk_queue_bounce(q, &bio); - bio_get(bio); - blk_rq_bio_prep(q, rq, bio); - rq->buffer = NULL; - return 0; + ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len, + UIO_FASTIOV, &iov, &iter); + if (ret < 0) + return ret; + + if (iov_count) { + /* SG_IO howto says that the shorter of the two wins */ + iov_iter_truncate(&iter, buf_len); + if (check_iter_count && !iov_iter_count(&iter)) { + kfree(iov); + return -EINVAL; + } + } + + ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, + gfp_mask); + kfree(iov); + } else if (buf_len) { + ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, + gfp_mask); + } + return ret; } -EXPORT_SYMBOL(blk_rq_map_user_iov); +EXPORT_SYMBOL(blk_rq_map_user_io); /** * blk_rq_unmap_user - unmap a request with user data @@ -254,21 +614,24 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); */ int blk_rq_unmap_user(struct bio *bio) { - struct bio *mapped_bio; + struct bio *next_bio; int ret = 0, ret2; while (bio) { - mapped_bio = bio; - if (unlikely(bio_flagged(bio, BIO_BOUNCED))) - mapped_bio = bio->bi_private; - - ret2 = __blk_rq_unmap_user(mapped_bio); - if (ret2 && !ret) - ret = ret2; - - mapped_bio = bio; + if (bio->bi_private) { + ret2 = bio_uncopy_user(bio); + if (ret2 && !ret) + ret = ret2; + } else { + bio_release_pages(bio, bio_data_dir(bio) == READ); + } + + if (bio_integrity(bio)) + bio_integrity_unmap_user(bio); + + next_bio = bio; bio = bio->bi_next; - bio_put(mapped_bio); + blk_mq_map_bio_put(next_bio); } return ret; @@ -276,8 +639,7 @@ int blk_rq_unmap_user(struct bio *bio) EXPORT_SYMBOL(blk_rq_unmap_user); /** - * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage - * @q: request queue where request should be inserted + * blk_rq_map_kern - map kernel data to a request, for passthrough requests * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data @@ -285,47 +647,32 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. Can be called multple times to append multple + * buffer is used. Can be called multiple times to append multiple * buffers. */ -int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, - unsigned int len, gfp_t gfp_mask) +int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, + gfp_t gfp_mask) { - int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; - int do_copy = 0; struct bio *bio; int ret; - if (len > (queue_max_hw_sectors(q) << 9)) + if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT)) return -EINVAL; if (!len || !kbuf) return -EINVAL; - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); - if (do_copy) - bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf)) + bio = bio_copy_kern(rq, kbuf, len, gfp_mask); else - bio = bio_map_kern(q, kbuf, len, gfp_mask); + bio = bio_map_kern(rq, kbuf, len, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); - if (!reading) - bio->bi_rw |= REQ_WRITE; - - if (do_copy) - rq->cmd_flags |= REQ_COPY_USER; - - ret = blk_rq_append_bio(q, rq, bio); - if (unlikely(ret)) { - /* request is too big */ - bio_put(bio); - return ret; - } - - blk_queue_bounce(q, &rq->bio); - rq->buffer = NULL; - return 0; + ret = blk_rq_append_bio(rq, bio); + if (unlikely(ret)) + blk_mq_map_bio_put(bio); + return ret; } EXPORT_SYMBOL(blk_rq_map_kern); |
