summaryrefslogtreecommitdiff
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorIgor Konopko <igor.j.konopko@intel.com>2019-05-04 20:38:11 +0200
committerJens Axboe <axboe@kernel.dk>2019-05-06 10:19:19 -0600
commit45c5fcbb73416f367100a4bccd116fd2752e5940 (patch)
treea639b328a9d5ccf9d2c447ca303af8daac017702 /drivers/lightnvm
parenta96de64a24e5035018c5a912f2b877da8797277e (diff)
lightnvm: pblk: use nvm_rq_to_ppa_list()
This patch replaces few remaining usages of rqd->ppa_list[] with existing nvm_rq_to_ppa_list() helpers. This is needed for theoretical devices with ws_min/ws_opt equal to 1. Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> Reviewed-by: Javier González <javier@javigon.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/pblk-core.c26
-rw-r--r--drivers/lightnvm/pblk-recovery.c13
2 files changed, 22 insertions, 17 deletions
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 07270ba1e95f..773537804319 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -562,11 +562,9 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int ret;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io_sync(pblk, rqd);
pblk_up_chunk(pblk, ppa_list[0]);
@@ -725,6 +723,7 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = pblk_line_smeta_start(pblk, line);
int i, ret;
@@ -748,9 +747,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
@@ -777,6 +777,7 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
@@ -801,12 +802,13 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta = pblk_get_meta(pblk,
rqd.meta_list, i);
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
meta->lba = lba_list[paddr] = addr_empty;
}
@@ -836,8 +838,9 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
+ void *ppa_list_buf, *meta_list;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = line->emeta_ssec;
dma_addr_t dma_ppa_list, dma_meta_list;
@@ -853,7 +856,7 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
if (!meta_list)
return -ENOMEM;
- ppa_list = meta_list + pblk_dma_meta_size(pblk);
+ ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
next_rq:
@@ -874,11 +877,12 @@ next_rq:
rqd.bio = bio;
rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
+ rqd.ppa_list = ppa_list_buf;
rqd.dma_meta_list = dma_meta_list;
rqd.dma_ppa_list = dma_ppa_list;
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = rq_ppas;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
@@ -906,7 +910,7 @@ next_rq:
}
for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
}
ret = pblk_submit_io_sync(pblk, &rqd);
@@ -1525,11 +1529,9 @@ void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int i;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
for (i = 0; i < rqd->nr_ppas; i++)
pblk_ppa_to_line_put(pblk, ppa_list[i]);
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index a9085b0e6611..e6dda04de144 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -179,6 +179,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
+ struct ppa_addr *ppa_list;
void *data;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
@@ -239,6 +240,7 @@ next_pad_rq:
rqd->end_io = pblk_end_io_recov;
rqd->private = pad_rq;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
meta_list = rqd->meta_list;
for (i = 0; i < rqd->nr_ppas; ) {
@@ -266,17 +268,17 @@ next_pad_rq:
lba_list[w_ptr] = addr_empty;
meta = pblk_get_meta(pblk, meta_list, i);
meta->lba = addr_empty;
- rqd->ppa_list[i] = dev_ppa;
+ ppa_list[i] = dev_ppa;
}
}
kref_get(&pad_rq->ref);
- pblk_down_chunk(pblk, rqd->ppa_list[0]);
+ pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- pblk_up_chunk(pblk, rqd->ppa_list[0]);
+ pblk_up_chunk(pblk, ppa_list[0]);
kref_put(&pad_rq->ref, pblk_recov_complete);
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
bio_put(bio);
@@ -420,6 +422,7 @@ retry_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
if (pblk_io_aligned(pblk, rq_ppas))
rqd->is_seq = 1;
@@ -438,7 +441,7 @@ retry_rq:
}
for (j = 0; j < pblk->min_write_pgs; j++, i++)
- rqd->ppa_list[i] =
+ ppa_list[i] =
addr_to_gen_ppa(pblk, paddr + j, line->id);
}
@@ -486,7 +489,7 @@ retry_rq:
continue;
line->nr_valid_lbas++;
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
+ pblk_update_map(pblk, lba, ppa_list[i]);
}
left_ppas -= rq_ppas;