diff options
Diffstat (limited to 'drivers/mmc/core/queue.c')
| -rw-r--r-- | drivers/mmc/core/queue.c | 227 |
1 files changed, 133 insertions, 94 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 35cc138b096d..284856c8f655 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -1,19 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Russell King, All Rights Reserved. * Copyright 2006-2007 Pierre Ossman - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/freezer.h> -#include <linux/kthread.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> +#include <linux/backing-dev.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -22,8 +18,11 @@ #include "block.h" #include "core.h" #include "card.h" +#include "crypto.h" #include "host.h" +#define MMC_DMA_MAP_MERGE_SEGMENTS 512 + static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) { /* Allow only 1 DCMD at a time */ @@ -34,8 +33,6 @@ void mmc_cqe_check_busy(struct mmc_queue *mq) { if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; - - mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; } static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) @@ -51,6 +48,7 @@ static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, case REQ_OP_DRV_OUT: case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: return MMC_ISSUE_SYNC; case REQ_OP_FLUSH: return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; @@ -63,7 +61,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) { struct mmc_host *host = mq->card->host; - if (mq->use_cqe) + if (host->cqe_enabled && !host->hsq_enabled) return mmc_cqe_issue_type(host, req); if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) @@ -108,11 +106,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) case MMC_ISSUE_DCMD: if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { if (recovery_needed) - __mmc_cqe_recovery_notifier(mq); + mmc_cqe_recovery_notifier(mrq); return BLK_EH_RESET_TIMER; } - /* No timeout (XXX: huh? comment doesn't make much sense) */ - blk_mq_complete_request(req); + /* The request has gone already */ return BLK_EH_DONE; default: /* Timeout is handled by mmc core */ @@ -120,24 +117,20 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) } } -static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, - bool reserved) +static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req) { struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; unsigned long flags; - int ret; + bool ignore_tout; spin_lock_irqsave(&mq->lock, flags); - - if (mq->recovery_needed || !mq->use_cqe) - ret = BLK_EH_RESET_TIMER; - else - ret = mmc_cqe_timed_out(req); - + ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled; spin_unlock_irqrestore(&mq->lock, flags); - return ret; + return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); } static void mmc_mq_recovery_handler(struct work_struct *work) @@ -145,12 +138,13 @@ static void mmc_mq_recovery_handler(struct work_struct *work) struct mmc_queue *mq = container_of(work, struct mmc_queue, recovery_work); struct request_queue *q = mq->queue; + struct mmc_host *host = mq->card->host; mmc_get_card(mq->card, &mq->ctx); mq->in_recovery = true; - if (mq->use_cqe) + if (host->cqe_enabled && !host->hsq_enabled) mmc_blk_cqe_recovery(mq); else mmc_blk_mq_recovery(mq); @@ -161,12 +155,15 @@ static void mmc_mq_recovery_handler(struct work_struct *work) mq->recovery_needed = false; spin_unlock_irq(&mq->lock); + if (host->hsq_enabled) + host->cqe_ops->cqe_recovery_finish(host); + mmc_put_card(mq->card, &mq->ctx); blk_mq_run_hw_queues(q, true); } -static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) +static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp) { struct scatterlist *sg; @@ -177,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) return sg; } -static void mmc_queue_setup_discard(struct request_queue *q, - struct mmc_card *card) +static void mmc_queue_setup_discard(struct mmc_card *card, + struct queue_limits *lim) { unsigned max_discard; @@ -186,37 +183,42 @@ static void mmc_queue_setup_discard(struct request_queue *q, if (!max_discard) return; - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - blk_queue_max_discard_sectors(q, max_discard); - q->limits.discard_granularity = card->pref_erase << 9; + lim->max_hw_discard_sectors = max_discard; + if (mmc_card_can_secure_erase_trim(card)) + lim->max_secure_erase_sectors = max_discard; + if (mmc_card_can_trim(card) && card->erased_byte == 0) + lim->max_write_zeroes_sectors = max_discard; + /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) - q->limits.discard_granularity = 0; - if (mmc_can_secure_erase_trim(card)) - blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); + lim->discard_granularity = SECTOR_SIZE; + else + lim->discard_granularity = card->pref_erase << 9; } -/** - * mmc_init_request() - initialize the MMC-specific per-request data - * @q: the request queue - * @req: the request - * @gfp: memory allocation policy - */ -static int __mmc_init_request(struct mmc_queue *mq, struct request *req, - gfp_t gfp) +static unsigned short mmc_get_max_segments(struct mmc_host *host) +{ + return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : + host->max_segs; +} + +static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx, unsigned int numa_node) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); + struct mmc_queue *mq = set->driver_data; struct mmc_card *card = mq->card; struct mmc_host *host = card->host; - mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); + mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL); if (!mq_rq->sg) return -ENOMEM; return 0; } -static void mmc_exit_request(struct request_queue *q, struct request *req) +static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, + unsigned int hctx_idx) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); @@ -224,20 +226,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) mq_rq->sg = NULL; } -static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, - unsigned int hctx_idx, unsigned int numa_node) -{ - return __mmc_init_request(set->driver_data, req, GFP_KERNEL); -} - -static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, - unsigned int hctx_idx) -{ - struct mmc_queue *mq = set->driver_data; - - mmc_exit_request(mq->queue, req); -} - static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -249,7 +237,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, enum mmc_issue_type issue_type; enum mmc_issued issued; bool get_card, cqe_retune_ok; - int ret; + blk_status_t ret; if (mmc_card_removed(mq->card)) { req->rq_flags |= RQF_QUIET; @@ -274,6 +262,10 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, } break; case MMC_ISSUE_ASYNC: + if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) { + spin_unlock_irq(&mq->lock); + return BLK_STS_RESOURCE; + } break; default: /* @@ -304,7 +296,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (get_card) mmc_get_card(card, &mq->ctx); - if (mq->use_cqe) { + if (host->cqe_enabled) { host->retune_now = host->need_retune && cqe_retune_ok && !host->hold_retune; } @@ -351,24 +343,53 @@ static const struct blk_mq_ops mmc_mq_ops = { .timeout = mmc_mq_timed_out, }; -static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) +static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, + struct mmc_card *card, unsigned int features) { struct mmc_host *host = card->host; - u64 limit = BLK_BOUNCE_HIGH; + struct queue_limits lim = { + .features = features, + }; + struct gendisk *disk; + + if (mmc_card_can_erase(card)) + mmc_queue_setup_discard(card, &lim); + + lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); + + if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) + lim.logical_block_size = card->ext_csd.data_sector_size; + else + lim.logical_block_size = 512; + + WARN_ON_ONCE(lim.logical_block_size != 512 && + lim.logical_block_size != 4096); - if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + /* + * Setting a virt_boundary implicity sets a max_segment_size, so try + * to set the hardware one here. + */ + if (host->can_dma_map_merge) { + lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host)); + lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS; + } else { + lim.max_segment_size = + round_down(host->max_seg_size, lim.logical_block_size); + lim.max_segments = host->max_segs; + } - blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); - if (mmc_can_erase(card)) - mmc_queue_setup_discard(mq->queue, card); + if (mmc_host_is_spi(host) && host->use_spi_crc) + lim.features |= BLK_FEAT_STABLE_WRITES; - blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_hw_sectors(mq->queue, - min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); + disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); + if (IS_ERR(disk)) + return disk; + mq->queue = disk->queue; + + blk_queue_rq_timeout(mq->queue, 60 * HZ); + + if (mmc_dev(host)->dma_parms) + dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); @@ -376,6 +397,14 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) mutex_init(&mq->complete_lock); init_waitqueue_head(&mq->wait); + + mmc_crypto_setup_queue(mq->queue, host); + return disk; +} + +static inline bool mmc_merge_capable(struct mmc_host *host) +{ + return host->caps2 & MMC_CAP2_MERGE_CAPABLE; } /* Set queue depth to get a reasonable value for q->nr_requests */ @@ -385,16 +414,18 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue + * @features: block layer features (BLK_FEAT_*) * * Initialise a MMC card request queue. */ -int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, + unsigned int features) { struct mmc_host *host = card->host; + struct gendisk *disk; int ret; mq->card = card; - mq->use_cqe = host->cqe_enabled; spin_lock_init(&mq->lock); @@ -404,37 +435,38 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) * The queue depth for CQE must match the hardware because the request * tag is used to index the hardware queue. */ - if (mq->use_cqe) + if (host->cqe_enabled && !host->hsq_enabled) mq->tag_set.queue_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); else mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; mq->tag_set.numa_node = NUMA_NO_NODE; - mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | - BLK_MQ_F_BLOCKING; + mq->tag_set.flags = BLK_MQ_F_BLOCKING; mq->tag_set.nr_hw_queues = 1; mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); mq->tag_set.driver_data = mq; + /* + * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, + * the host->can_dma_map_merge should be set before to get max_segs + * from mmc_get_max_segments(). + */ + if (mmc_merge_capable(host) && + host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && + dma_get_merge_boundary(mmc_dev(host))) + host->can_dma_map_merge = 1; + else + host->can_dma_map_merge = 0; + ret = blk_mq_alloc_tag_set(&mq->tag_set); if (ret) - return ret; - - mq->queue = blk_mq_init_queue(&mq->tag_set); - if (IS_ERR(mq->queue)) { - ret = PTR_ERR(mq->queue); - goto free_tag_set; - } - - mq->queue->queuedata = mq; - blk_queue_rq_timeout(mq->queue, 60 * HZ); - - mmc_setup_queue(mq, card); - return 0; + return ERR_PTR(ret); + -free_tag_set: - blk_mq_free_tag_set(&mq->tag_set); - return ret; + disk = mmc_alloc_disk(mq, card, features); + if (IS_ERR(disk)) + blk_mq_free_tag_set(&mq->tag_set); + return disk; } void mmc_queue_suspend(struct mmc_queue *mq) @@ -465,7 +497,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq) if (blk_queue_quiesced(q)) blk_mq_unquiesce_queue(q); - blk_cleanup_queue(q); + /* + * If the recovery completes the last (and only remaining) request in + * the queue, and the card has been removed, we could end up here with + * the recovery not quite finished yet, so cancel it. + */ + cancel_work_sync(&mq->recovery_work); + + blk_mq_free_tag_set(&mq->tag_set); /* * A request can be completed before the next request, potentially @@ -484,5 +523,5 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) { struct request *req = mmc_queue_req_to_req(mqrq); - return blk_rq_map_sg(mq->queue, req, mqrq->sg); + return blk_rq_map_sg(req, mqrq->sg); } |
