summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorMatias Bjørling <m@bjorling.me>2016-05-06 20:02:58 +0200
committerJens Axboe <axboe@fb.com>2016-05-06 12:51:10 -0600
commit22e8c9766a669d49cf3749d397082a5cd93374a9 (patch)
treec24facba8321c5359bf6f111f2ca892cf894b62a /drivers/nvme
parent4891d120b9cd419f4350b11e1231083745dcdc8b (diff)
lightnvm: move block fold outside of get_bb_tbl()
The get block table command returns a list of blocks and planes with their associated state. Users, such as gennvm and sysblk, manages all planes as a single virtual block. It was therefore natural to fold the bad block list before it is returned. However, to allow users, which manages on a per-plane block level, to also use the interface, the get_bb_tbl interface is changed to not fold by default and instead let the caller fold if necessary. Reviewed by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/lightnvm.c47
1 files changed, 6 insertions, 41 deletions
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 9461dd639acd..d289980d2bc8 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -387,41 +387,16 @@ out:
return ret;
}
-static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
- int nr_dst_blks, u8 *dst_blks,
- int nr_src_blks, u8 *src_blks)
-{
- int blk, offset, pl, blktype;
-
- for (blk = 0; blk < nr_dst_blks; blk++) {
- offset = blk * nvmdev->plane_mode;
- blktype = src_blks[offset];
-
- /* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < nvmdev->plane_mode; pl++) {
- if (src_blks[offset + pl] &
- (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
- blktype = src_blks[offset + pl];
- break;
- }
- }
-
- dst_blks[blk] = blktype;
- }
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
- void *priv)
+ nvm_bb_update_fn *update_bbtbl, void *priv)
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- u8 *dst_blks = NULL;
- int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
+ int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -432,12 +407,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
- dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
- if (!dst_blks) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
@@ -459,21 +428,17 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
ret = -EINVAL;
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_src_blks);
+ le32_to_cpu(bb_tbl->tblks), nr_blks);
goto out;
}
- nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
- nr_src_blks, bb_tbl->blk);
-
ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
+ ret = update_bbtbl(nvmdev, ppa, bb_tbl->blk, nr_blks, priv);
out:
- kfree(dst_blks);
kfree(bb_tbl);
return ret;
}