summaryrefslogtreecommitdiff
path: root/drivers/lightnvm/pblk-core.c
diff options
context:
space:
mode:
authorJavier González <javier@javigon.com>2018-10-09 13:12:03 +0200
committerJens Axboe <axboe@kernel.dk>2018-10-09 08:25:07 -0600
commitaf3fac1664b978f70a838571f3f35298ce1786da (patch)
treecdcd70a45e2e2b5bb71d2bdf0d2f7e01d8cc926f /drivers/lightnvm/pblk-core.c
parent45dcf29b98377bbdc40aa4a23a79ade60295dbaf (diff)
lightnvm: pblk: refactor metadata paths
pblk maintains two different metadata paths for smeta and emeta, which store metadata at the start of the line and at the end of the line, respectively. Until now, these path has been common for writing and retrieving metadata, however, as these paths diverge, the common code becomes less clear and unnecessary complicated. In preparation for further changes to the metadata write path, this patch separates the write and read paths for smeta and emeta and removes the synchronous emeta path as it not used anymore (emeta is scheduled asynchronously to prevent jittering due to internal I/Os). Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm/pblk-core.c')
-rw-r--r--drivers/lightnvm/pblk-core.c307
1 files changed, 141 insertions, 166 deletions
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 8ae40855d4c9..49cef93e328e 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -685,180 +685,80 @@ u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
return paddr;
}
-/*
- * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
- * taking the per LUN semaphore.
- */
-static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
- void *emeta_buf, u64 paddr, int dir)
+u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
- struct bio *bio;
- struct nvm_rq rqd;
- dma_addr_t dma_ppa_list, dma_meta_list;
- int min = pblk->min_write_pgs;
- int left_ppas = lm->emeta_sec[0];
- int id = line->id;
- int rq_ppas, rq_len;
- int cmd_op, bio_op;
- int i, j;
- int ret;
+ int bit;
- if (dir == PBLK_WRITE) {
- bio_op = REQ_OP_WRITE;
- cmd_op = NVM_OP_PWRITE;
- } else if (dir == PBLK_READ) {
- bio_op = REQ_OP_READ;
- cmd_op = NVM_OP_PREAD;
- } else
- return -EINVAL;
+ /* This usually only happens on bad lines */
+ bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
+ if (bit >= lm->blk_per_line)
+ return -1;
- meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &dma_meta_list);
- if (!meta_list)
- return -ENOMEM;
+ return bit * geo->ws_opt;
+}
- ppa_list = meta_list + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct bio *bio;
+ struct nvm_rq rqd;
+ u64 paddr = pblk_line_smeta_start(pblk, line);
+ int i, ret;
-next_rq:
memset(&rqd, 0, sizeof(struct nvm_rq));
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- rq_len = rq_ppas * geo->csecs;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
- bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto free_rqd_dma;
+ goto clear_rqd;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, bio_op, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
rqd.bio = bio;
- rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
- rqd.dma_meta_list = dma_meta_list;
- rqd.dma_ppa_list = dma_ppa_list;
- rqd.opcode = cmd_op;
- rqd.nr_ppas = rq_ppas;
-
- if (dir == PBLK_WRITE) {
- struct pblk_sec_meta *meta_list = rqd.meta_list;
-
- rqd.is_seq = 1;
- for (i = 0; i < rqd.nr_ppas; ) {
- spin_lock(&line->lock);
- paddr = __pblk_alloc_page(pblk, line, min);
- spin_unlock(&line->lock);
- for (j = 0; j < min; j++, i++, paddr++) {
- meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
- rqd.ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, id);
- }
- }
- } else {
- for (i = 0; i < rqd.nr_ppas; ) {
- struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_ppa_to_pos(geo, ppa);
-
- if (pblk_io_aligned(pblk, rq_ppas))
- rqd.is_seq = 1;
-
- while (test_bit(pos, line->blk_bitmap)) {
- paddr += min;
- if (pblk_boundary_paddr_checks(pblk, paddr)) {
- pblk_err(pblk, "corrupt emeta line:%d\n",
- line->id);
- bio_put(bio);
- ret = -EINTR;
- goto free_rqd_dma;
- }
-
- ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
- pblk_err(pblk, "corrupt emeta line:%d\n",
- line->id);
- bio_put(bio);
- ret = -EINTR;
- goto free_rqd_dma;
- }
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.nr_ppas = lm->smeta_sec;
+ rqd.is_seq = 1;
- for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, line->id);
- }
- }
+ for (i = 0; i < lm->smeta_sec; i++, paddr++)
+ rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
- goto free_rqd_dma;
+ goto clear_rqd;
}
atomic_dec(&pblk->inflight_io);
- if (rqd.error) {
- if (dir == PBLK_WRITE)
- pblk_log_write_err(pblk, &rqd);
- else
- pblk_log_read_err(pblk, &rqd);
- }
+ if (rqd.error)
+ pblk_log_read_err(pblk, &rqd);
- emeta_buf += rq_len;
- left_ppas -= rq_ppas;
- if (left_ppas)
- goto next_rq;
-free_rqd_dma:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+clear_rqd:
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line_meta *lm = &pblk->lm;
- int bit;
-
- /* This usually only happens on bad lines */
- bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
- if (bit >= lm->blk_per_line)
- return -1;
-
- return bit * geo->ws_opt;
-}
-
-static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
- u64 paddr, int dir)
+static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
+ u64 paddr)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
struct nvm_rq rqd;
- __le64 *lba_list = NULL;
+ __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
int i, ret;
- int cmd_op, bio_op;
-
- if (dir == PBLK_WRITE) {
- bio_op = REQ_OP_WRITE;
- cmd_op = NVM_OP_PWRITE;
- lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
- bio_op = REQ_OP_READ;
- cmd_op = NVM_OP_PREAD;
- } else
- return -EINVAL;
memset(&rqd, 0, sizeof(struct nvm_rq));
@@ -873,30 +773,20 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
}
bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, bio_op, 0);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
rqd.bio = bio;
- rqd.opcode = cmd_op;
- rqd.is_seq = 1;
+ rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
+ rqd.is_seq = 1;
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
- if (dir == PBLK_WRITE) {
- __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
- meta_list[i].lba = lba_list[paddr] = addr_empty;
- }
+ meta_list[i].lba = lba_list[paddr] = addr_empty;
}
- /*
- * This I/O is sent by the write thread when a line is replace. Since
- * the write thread is the only one sending write and erase commands,
- * there is no need to take the LUN semaphore.
- */
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
@@ -907,11 +797,8 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
- if (dir == PBLK_WRITE) {
- pblk_log_write_err(pblk, &rqd);
- ret = 1;
- } else if (dir == PBLK_READ)
- pblk_log_read_err(pblk, &rqd);
+ pblk_log_write_err(pblk, &rqd);
+ ret = -EIO;
}
clear_rqd:
@@ -919,18 +806,106 @@ clear_rqd:
return ret;
}
-int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
+int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
+ void *emeta_buf)
{
- u64 bpaddr = pblk_line_smeta_start(pblk, line);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
+ void *ppa_list, *meta_list;
+ struct bio *bio;
+ struct nvm_rq rqd;
+ u64 paddr = line->emeta_ssec;
+ dma_addr_t dma_ppa_list, dma_meta_list;
+ int min = pblk->min_write_pgs;
+ int left_ppas = lm->emeta_sec[0];
+ int line_id = line->id;
+ int rq_ppas, rq_len;
+ int i, j;
+ int ret;
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
-}
+ meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+ &dma_meta_list);
+ if (!meta_list)
+ return -ENOMEM;
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
- void *emeta_buf)
-{
- return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
- line->emeta_ssec, PBLK_READ);
+ ppa_list = meta_list + pblk_dma_meta_size;
+ dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+
+next_rq:
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_len = rq_ppas * geo->csecs;
+
+ bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
+ l_mg->emeta_alloc_type, GFP_KERNEL);
+ if (IS_ERR(bio)) {
+ ret = PTR_ERR(bio);
+ goto free_rqd_dma;
+ }
+
+ bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ rqd.bio = bio;
+ rqd.meta_list = meta_list;
+ rqd.ppa_list = ppa_list;
+ rqd.dma_meta_list = dma_meta_list;
+ rqd.dma_ppa_list = dma_ppa_list;
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.nr_ppas = rq_ppas;
+
+ for (i = 0; i < rqd.nr_ppas; ) {
+ struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
+ int pos = pblk_ppa_to_pos(geo, ppa);
+
+ if (pblk_io_aligned(pblk, rq_ppas))
+ rqd.is_seq = 1;
+
+ while (test_bit(pos, line->blk_bitmap)) {
+ paddr += min;
+ if (pblk_boundary_paddr_checks(pblk, paddr)) {
+ bio_put(bio);
+ ret = -EINTR;
+ goto free_rqd_dma;
+ }
+
+ ppa = addr_to_gen_ppa(pblk, paddr, line_id);
+ pos = pblk_ppa_to_pos(geo, ppa);
+ }
+
+ if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
+ bio_put(bio);
+ ret = -EINTR;
+ goto free_rqd_dma;
+ }
+
+ for (j = 0; j < min; j++, i++, paddr++)
+ rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ }
+
+ ret = pblk_submit_io_sync(pblk, &rqd);
+ if (ret) {
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
+ bio_put(bio);
+ goto free_rqd_dma;
+ }
+
+ atomic_dec(&pblk->inflight_io);
+
+ if (rqd.error)
+ pblk_log_read_err(pblk, &rqd);
+
+ emeta_buf += rq_len;
+ left_ppas -= rq_ppas;
+ if (left_ppas)
+ goto next_rq;
+
+free_rqd_dma:
+ nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ return ret;
}
static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -1169,7 +1144,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
- if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
+ if (init && pblk_line_smeta_write(pblk, line, off)) {
pblk_debug(pblk, "line smeta I/O failed. Retry\n");
return 0;
}