2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
20 #include <linux/time.h>
22 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
25 struct nvm_tgt_dev *dev = pblk->dev;
26 struct nvm_geo *geo = &dev->geo;
27 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
29 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30 atomic_long_inc(&pblk->erase_failed);
32 atomic_dec(&line->blk_in_line);
33 if (test_and_set_bit(pos, line->blk_bitmap))
34 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
37 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
40 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
42 struct pblk_line *line;
44 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
45 atomic_dec(&line->left_seblks);
50 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
55 pblk_mark_bb(pblk, line, ppa);
59 /* Erase completion assumes that only one block is erased at the time */
60 static void pblk_end_io_erase(struct nvm_rq *rqd)
62 struct pblk *pblk = rqd->private;
65 __pblk_end_io_erase(pblk, rqd);
66 mempool_free(rqd, pblk->r_rq_pool);
69 static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
72 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73 struct list_head *move_list = NULL;
75 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76 * table is modified with reclaimed sectors, a check is done to endure
77 * that newer updates are not overwritten.
79 spin_lock(&line->lock);
80 if (line->state == PBLK_LINESTATE_GC ||
81 line->state == PBLK_LINESTATE_FREE) {
82 spin_unlock(&line->lock);
86 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87 WARN_ONCE(1, "pblk: double invalidate\n");
88 spin_unlock(&line->lock);
93 if (line->state == PBLK_LINESTATE_CLOSED)
94 move_list = pblk_line_gc_list(pblk, line);
95 spin_unlock(&line->lock);
98 spin_lock(&l_mg->gc_lock);
99 spin_lock(&line->lock);
100 /* Prevent moving a line that has just been chosen for GC */
101 if (line->state == PBLK_LINESTATE_GC ||
102 line->state == PBLK_LINESTATE_FREE) {
103 spin_unlock(&line->lock);
104 spin_unlock(&l_mg->gc_lock);
107 spin_unlock(&line->lock);
109 list_move_tail(&line->list, move_list);
110 spin_unlock(&l_mg->gc_lock);
114 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
116 struct pblk_line *line;
120 #ifdef CONFIG_NVM_DEBUG
121 /* Callers must ensure that the ppa points to a device address */
122 BUG_ON(pblk_addr_in_cache(ppa));
123 BUG_ON(pblk_ppa_empty(ppa));
126 line_id = pblk_tgt_ppa_to_line(ppa);
127 line = &pblk->lines[line_id];
128 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
130 __pblk_map_invalidate(pblk, line, paddr);
133 void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
136 __pblk_map_invalidate(pblk, line, paddr);
138 pblk_rb_sync_init(&pblk->rwb, NULL);
140 if (!line->left_ssecs)
141 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
142 pblk_rb_sync_end(&pblk->rwb, NULL);
145 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
146 unsigned int nr_secs)
150 spin_lock(&pblk->trans_lock);
151 for (lba = slba; lba < slba + nr_secs; lba++) {
154 ppa = pblk_trans_map_get(pblk, lba);
156 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
157 pblk_map_invalidate(pblk, ppa);
159 pblk_ppa_set_empty(&ppa);
160 pblk_trans_map_set(pblk, lba, ppa);
162 spin_unlock(&pblk->trans_lock);
165 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
172 pool = pblk->w_rq_pool;
173 rq_size = pblk_w_rq_size;
175 pool = pblk->r_rq_pool;
176 rq_size = pblk_r_rq_size;
179 rqd = mempool_alloc(pool, GFP_KERNEL);
180 memset(rqd, 0, rq_size);
185 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
190 pool = pblk->w_rq_pool;
192 pool = pblk->r_rq_pool;
194 mempool_free(rqd, pool);
197 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
203 WARN_ON(off + nr_pages != bio->bi_vcnt);
205 bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
206 for (i = off; i < nr_pages + off; i++) {
207 bv = bio->bi_io_vec[i];
208 mempool_free(bv.bv_page, pblk->page_pool);
212 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
215 struct request_queue *q = pblk->dev->q;
219 for (i = 0; i < nr_pages; i++) {
220 page = mempool_alloc(pblk->page_pool, flags);
224 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
225 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
226 pr_err("pblk: could not add page to bio\n");
227 mempool_free(page, pblk->page_pool);
234 pblk_bio_free_pages(pblk, bio, 0, i - 1);
238 static void pblk_write_kick(struct pblk *pblk)
240 wake_up_process(pblk->writer_ts);
241 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
244 void pblk_write_timer_fn(unsigned long data)
246 struct pblk *pblk = (struct pblk *)data;
248 /* kick the write thread every tick to flush outstanding data */
249 pblk_write_kick(pblk);
252 void pblk_write_should_kick(struct pblk *pblk)
254 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
256 if (secs_avail >= pblk->min_write_pgs)
257 pblk_write_kick(pblk);
260 void pblk_end_bio_sync(struct bio *bio)
262 struct completion *waiting = bio->bi_private;
267 void pblk_end_io_sync(struct nvm_rq *rqd)
269 struct completion *waiting = rqd->private;
274 void pblk_flush_writer(struct pblk *pblk)
278 DECLARE_COMPLETION_ONSTACK(wait);
280 bio = bio_alloc(GFP_KERNEL, 1);
284 bio->bi_iter.bi_sector = 0; /* internal bio */
285 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
286 bio->bi_private = &wait;
287 bio->bi_end_io = pblk_end_bio_sync;
289 ret = pblk_write_to_cache(pblk, bio, 0);
290 if (ret == NVM_IO_OK) {
291 if (!wait_for_completion_io_timeout(&wait,
292 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
293 pr_err("pblk: flush cache timed out\n");
295 } else if (ret != NVM_IO_DONE) {
296 pr_err("pblk: tear down bio failed\n");
300 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
305 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
307 struct pblk_line_meta *lm = &pblk->lm;
308 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
309 struct list_head *move_list = NULL;
312 if (line->gc_group != PBLK_LINEGC_FULL) {
313 line->gc_group = PBLK_LINEGC_FULL;
314 move_list = &l_mg->gc_full_list;
316 } else if (line->vsc < lm->mid_thrs) {
317 if (line->gc_group != PBLK_LINEGC_HIGH) {
318 line->gc_group = PBLK_LINEGC_HIGH;
319 move_list = &l_mg->gc_high_list;
321 } else if (line->vsc < lm->high_thrs) {
322 if (line->gc_group != PBLK_LINEGC_MID) {
323 line->gc_group = PBLK_LINEGC_MID;
324 move_list = &l_mg->gc_mid_list;
326 } else if (line->vsc < line->sec_in_line) {
327 if (line->gc_group != PBLK_LINEGC_LOW) {
328 line->gc_group = PBLK_LINEGC_LOW;
329 move_list = &l_mg->gc_low_list;
331 } else if (line->vsc == line->sec_in_line) {
332 if (line->gc_group != PBLK_LINEGC_EMPTY) {
333 line->gc_group = PBLK_LINEGC_EMPTY;
334 move_list = &l_mg->gc_empty_list;
337 line->state = PBLK_LINESTATE_CORRUPT;
338 line->gc_group = PBLK_LINEGC_NONE;
339 move_list = &l_mg->corrupt_list;
340 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
343 lm->high_thrs, lm->mid_thrs);
349 void pblk_discard(struct pblk *pblk, struct bio *bio)
351 sector_t slba = pblk_get_lba(bio);
352 sector_t nr_secs = pblk_get_secs(bio);
354 pblk_invalidate_range(pblk, slba, nr_secs);
357 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
361 spin_lock(&pblk->trans_lock);
362 ppa = pblk_trans_map_get(pblk, lba);
363 spin_unlock(&pblk->trans_lock);
368 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
370 atomic_long_inc(&pblk->write_failed);
371 #ifdef CONFIG_NVM_DEBUG
372 pblk_print_failed_rqd(pblk, rqd, rqd->error);
376 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
378 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
379 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
380 atomic_long_inc(&pblk->read_empty);
384 switch (rqd->error) {
385 case NVM_RSP_WARN_HIGHECC:
386 atomic_long_inc(&pblk->read_high_ecc);
388 case NVM_RSP_ERR_FAILECC:
389 case NVM_RSP_ERR_FAILCRC:
390 atomic_long_inc(&pblk->read_failed);
393 pr_err("pblk: unknown read error:%d\n", rqd->error);
395 #ifdef CONFIG_NVM_DEBUG
396 pblk_print_failed_rqd(pblk, rqd, rqd->error);
400 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
402 pblk->sec_per_write = sec_per_write;
405 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
407 struct nvm_tgt_dev *dev = pblk->dev;
409 #ifdef CONFIG_NVM_DEBUG
410 struct ppa_addr *ppa_list;
412 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
413 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
418 if (rqd->opcode == NVM_OP_PWRITE) {
419 struct pblk_line *line;
423 for (i = 0; i < rqd->nr_ppas; i++) {
425 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
427 spin_lock(&line->lock);
428 if (line->state != PBLK_LINESTATE_OPEN) {
429 pr_err("pblk: bad ppa: line:%d,state:%d\n",
430 line->id, line->state);
432 spin_unlock(&line->lock);
435 spin_unlock(&line->lock);
439 return nvm_submit_io(dev, rqd);
442 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
443 unsigned int nr_secs, unsigned int len,
446 struct nvm_tgt_dev *dev = pblk->dev;
447 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
453 if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
454 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
456 bio = bio_kmalloc(gfp_mask, nr_secs);
458 return ERR_PTR(-ENOMEM);
460 for (i = 0; i < nr_secs; i++) {
461 page = vmalloc_to_page(kaddr);
463 pr_err("pblk: could not map vmalloc bio\n");
465 bio = ERR_PTR(-ENOMEM);
469 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
470 if (ret != PAGE_SIZE) {
471 pr_err("pblk: could not add page to bio\n");
473 bio = ERR_PTR(-ENOMEM);
483 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
484 unsigned long secs_to_flush)
486 int max = pblk->sec_per_write;
487 int min = pblk->min_write_pgs;
488 int secs_to_sync = 0;
490 if (secs_avail >= max)
492 else if (secs_avail >= min)
493 secs_to_sync = min * (secs_avail / min);
494 else if (secs_to_flush)
500 static u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line,
506 /* logic error: ppa out-of-bounds. Prevent generating bad address */
507 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
508 WARN(1, "pblk: page allocation out of bounds\n");
509 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
512 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
513 pblk->lm.sec_per_line, line->cur_sec);
514 for (i = 0; i < nr_secs; i++, line->cur_sec++)
515 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
520 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
524 /* Lock needed in case a write fails and a recovery needs to remap
525 * failed write buffer entries
527 spin_lock(&line->lock);
528 addr = __pblk_alloc_page(pblk, line, nr_secs);
529 line->left_msecs -= nr_secs;
530 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
531 spin_unlock(&line->lock);
537 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
538 * taking the per LUN semaphore.
540 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
543 struct nvm_tgt_dev *dev = pblk->dev;
544 struct nvm_geo *geo = &dev->geo;
545 struct pblk_line_meta *lm = &pblk->lm;
548 struct ppa_addr *ppa_list;
549 dma_addr_t dma_ppa_list;
550 void *emeta = line->emeta;
551 int min = pblk->min_write_pgs;
552 int left_ppas = lm->emeta_sec;
559 DECLARE_COMPLETION_ONSTACK(wait);
562 bio_op = REQ_OP_WRITE;
563 cmd_op = NVM_OP_PWRITE;
564 flags = pblk_set_progr_mode(pblk, WRITE);
565 } else if (dir == READ) {
566 bio_op = REQ_OP_READ;
567 cmd_op = NVM_OP_PREAD;
568 flags = pblk_set_read_mode(pblk);
572 ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
577 memset(&rqd, 0, sizeof(struct nvm_rq));
579 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
580 rq_len = rq_ppas * geo->sec_size;
582 bio = pblk_bio_map_addr(pblk, emeta, rq_ppas, rq_len, GFP_KERNEL);
588 bio->bi_iter.bi_sector = 0; /* internal bio */
589 bio_set_op_attrs(bio, bio_op, 0);
594 rqd.nr_ppas = rq_ppas;
595 rqd.ppa_list = ppa_list;
596 rqd.dma_ppa_list = dma_ppa_list;
597 rqd.end_io = pblk_end_io_sync;
601 for (i = 0; i < rqd.nr_ppas; ) {
602 spin_lock(&line->lock);
603 paddr = __pblk_alloc_page(pblk, line, min);
604 spin_unlock(&line->lock);
605 for (j = 0; j < min; j++, i++, paddr++)
607 addr_to_gen_ppa(pblk, paddr, id);
610 for (i = 0; i < rqd.nr_ppas; ) {
611 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
612 int pos = pblk_dev_ppa_to_pos(geo, ppa);
614 while (test_bit(pos, line->blk_bitmap)) {
616 if (pblk_boundary_paddr_checks(pblk, paddr)) {
617 pr_err("pblk: corrupt emeta line:%d\n",
624 ppa = addr_to_gen_ppa(pblk, paddr, id);
625 pos = pblk_dev_ppa_to_pos(geo, ppa);
628 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
629 pr_err("pblk: corrupt emeta line:%d\n",
636 for (j = 0; j < min; j++, i++, paddr++)
638 addr_to_gen_ppa(pblk, paddr, line->id);
642 ret = pblk_submit_io(pblk, &rqd);
644 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
649 if (!wait_for_completion_io_timeout(&wait,
650 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
651 pr_err("pblk: emeta I/O timed out\n");
653 reinit_completion(&wait);
659 pblk_log_write_err(pblk, &rqd);
661 pblk_log_read_err(pblk, &rqd);
665 left_ppas -= rq_ppas;
669 nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
673 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
675 struct nvm_tgt_dev *dev = pblk->dev;
676 struct nvm_geo *geo = &dev->geo;
677 struct pblk_line_meta *lm = &pblk->lm;
680 /* This usually only happens on bad lines */
681 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
682 if (bit >= lm->blk_per_line)
685 return bit * geo->sec_per_pl;
688 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
691 struct nvm_tgt_dev *dev = pblk->dev;
692 struct pblk_line_meta *lm = &pblk->lm;
695 __le64 *lba_list = NULL;
699 DECLARE_COMPLETION_ONSTACK(wait);
702 bio_op = REQ_OP_WRITE;
703 cmd_op = NVM_OP_PWRITE;
704 flags = pblk_set_progr_mode(pblk, WRITE);
705 lba_list = pblk_line_emeta_to_lbas(line->emeta);
706 } else if (dir == READ) {
707 bio_op = REQ_OP_READ;
708 cmd_op = NVM_OP_PREAD;
709 flags = pblk_set_read_mode(pblk);
713 memset(&rqd, 0, sizeof(struct nvm_rq));
715 rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
720 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
726 bio->bi_iter.bi_sector = 0; /* internal bio */
727 bio_set_op_attrs(bio, bio_op, 0);
732 rqd.nr_ppas = lm->smeta_sec;
733 rqd.end_io = pblk_end_io_sync;
736 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
737 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
739 lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
743 * This I/O is sent by the write thread when a line is replace. Since
744 * the write thread is the only one sending write and erase commands,
745 * there is no need to take the LUN semaphore.
747 ret = pblk_submit_io(pblk, &rqd);
749 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
754 if (!wait_for_completion_io_timeout(&wait,
755 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
756 pr_err("pblk: smeta I/O timed out\n");
761 pblk_log_write_err(pblk, &rqd);
763 pblk_log_read_err(pblk, &rqd);
767 nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
772 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
774 u64 bpaddr = pblk_line_smeta_start(pblk, line);
776 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
779 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line)
781 return pblk_line_submit_emeta_io(pblk, line, line->emeta_ssec, READ);
784 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
787 rqd->opcode = NVM_OP_ERASE;
790 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
794 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
798 DECLARE_COMPLETION_ONSTACK(wait);
800 memset(&rqd, 0, sizeof(struct nvm_rq));
802 pblk_setup_e_rq(pblk, &rqd, ppa);
804 rqd.end_io = pblk_end_io_sync;
807 /* The write thread schedules erases so that it minimizes disturbances
808 * with writes. Thus, there is no need to take the LUN semaphore.
810 ret = pblk_submit_io(pblk, &rqd);
812 struct nvm_tgt_dev *dev = pblk->dev;
813 struct nvm_geo *geo = &dev->geo;
815 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
816 pblk_dev_ppa_to_line(ppa),
817 pblk_dev_ppa_to_pos(geo, ppa));
823 if (!wait_for_completion_io_timeout(&wait,
824 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
825 pr_err("pblk: sync erase timed out\n");
830 __pblk_end_io_erase(pblk, &rqd);
835 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
837 struct pblk_line_meta *lm = &pblk->lm;
841 /* Erase only good blocks, one at a time */
843 spin_lock(&line->lock);
844 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
846 if (bit >= lm->blk_per_line) {
847 spin_unlock(&line->lock);
851 ppa = pblk->luns[bit].bppa; /* set ch and lun */
852 ppa.g.blk = line->id;
854 atomic_dec(&line->left_eblks);
855 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
856 spin_unlock(&line->lock);
858 if (pblk_blk_erase_sync(pblk, ppa)) {
859 pr_err("pblk: failed to erase line %d\n", line->id);
867 /* For now lines are always assumed full lines. Thus, smeta former and current
868 * lun bitmaps are omitted.
870 static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
871 struct pblk_line *cur)
873 struct nvm_tgt_dev *dev = pblk->dev;
874 struct nvm_geo *geo = &dev->geo;
875 struct pblk_line_meta *lm = &pblk->lm;
876 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
877 struct line_smeta *smeta = line->smeta;
878 struct line_emeta *emeta = line->emeta;
881 /* After erasing the line, new bad blocks might appear and we risk
882 * having an invalid line
884 nr_blk_line = lm->blk_per_line -
885 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
886 if (nr_blk_line < lm->min_blk_line) {
887 spin_lock(&l_mg->free_lock);
888 spin_lock(&line->lock);
889 line->state = PBLK_LINESTATE_BAD;
890 spin_unlock(&line->lock);
892 list_add_tail(&line->list, &l_mg->bad_list);
893 spin_unlock(&l_mg->free_lock);
895 pr_debug("pblk: line %d is bad\n", line->id);
900 /* Run-time metadata */
901 line->lun_bitmap = ((void *)(smeta)) + sizeof(struct line_smeta);
903 /* Mark LUNs allocated in this line (all for now) */
904 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
906 smeta->header.identifier = cpu_to_le32(PBLK_MAGIC);
907 memcpy(smeta->header.uuid, pblk->instance_uuid, 16);
908 smeta->header.id = cpu_to_le32(line->id);
909 smeta->header.type = cpu_to_le16(line->type);
910 smeta->header.version = cpu_to_le16(1);
913 smeta->seq_nr = cpu_to_le64(line->seq_nr);
914 smeta->window_wr_lun = cpu_to_le32(geo->nr_luns);
916 /* Fill metadata among lines */
918 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
919 smeta->prev_id = cpu_to_le32(cur->id);
920 cur->emeta->next_id = cpu_to_le32(line->id);
922 smeta->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
925 /* All smeta must be set at this point */
926 smeta->header.crc = cpu_to_le32(pblk_calc_meta_header_crc(pblk, smeta));
927 smeta->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta));
930 memcpy(&emeta->header, &smeta->header, sizeof(struct line_header));
931 emeta->seq_nr = cpu_to_le64(line->seq_nr);
932 emeta->nr_lbas = cpu_to_le64(line->sec_in_line);
933 emeta->nr_valid_lbas = cpu_to_le64(0);
934 emeta->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
935 emeta->crc = cpu_to_le32(0);
936 emeta->prev_id = smeta->prev_id;
941 /* For now lines are always assumed full lines. Thus, smeta former and current
942 * lun bitmaps are omitted.
944 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
947 struct nvm_tgt_dev *dev = pblk->dev;
948 struct nvm_geo *geo = &dev->geo;
949 struct pblk_line_meta *lm = &pblk->lm;
950 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
955 line->sec_in_line = lm->sec_per_line;
957 /* Capture bad block information on line mapping bitmaps */
958 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
959 bit + 1)) < lm->blk_per_line) {
960 off = bit * geo->sec_per_pl;
961 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
963 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
965 line->sec_in_line -= geo->sec_per_blk;
966 if (bit >= lm->emeta_bb)
970 /* Mark smeta metadata sectors as bad sectors */
971 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
972 off = bit * geo->sec_per_pl;
974 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
975 line->sec_in_line -= lm->smeta_sec;
976 line->smeta_ssec = off;
977 line->cur_sec = off + lm->smeta_sec;
979 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
980 pr_debug("pblk: line smeta I/O failed. Retry\n");
981 off += geo->sec_per_pl;
985 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
987 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
988 * blocks to make sure that there are enough sectors to store emeta
990 bit = lm->sec_per_line;
991 off = lm->sec_per_line - lm->emeta_sec;
992 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec);
994 off -= geo->sec_per_pl;
995 if (!test_bit(off, line->invalid_bitmap)) {
996 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1001 line->sec_in_line -= lm->emeta_sec;
1002 line->emeta_ssec = off;
1003 line->vsc = line->left_ssecs = line->left_msecs = line->sec_in_line;
1005 if (lm->sec_per_line - line->sec_in_line !=
1006 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1007 spin_lock(&line->lock);
1008 line->state = PBLK_LINESTATE_BAD;
1009 spin_unlock(&line->lock);
1011 list_add_tail(&line->list, &l_mg->bad_list);
1012 pr_err("pblk: unexpected line %d is bad\n", line->id);
1020 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1022 struct pblk_line_meta *lm = &pblk->lm;
1023 int blk_in_line = atomic_read(&line->blk_in_line);
1025 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1026 if (!line->map_bitmap)
1028 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1030 /* invalid_bitmap is special since it is used when line is closed. No
1031 * need to zeroized; it will be initialized using bb info form
1034 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1035 if (!line->invalid_bitmap) {
1036 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1040 spin_lock(&line->lock);
1041 if (line->state != PBLK_LINESTATE_FREE) {
1042 spin_unlock(&line->lock);
1043 WARN(1, "pblk: corrupted line state\n");
1046 line->state = PBLK_LINESTATE_OPEN;
1048 atomic_set(&line->left_eblks, blk_in_line);
1049 atomic_set(&line->left_seblks, blk_in_line);
1050 spin_unlock(&line->lock);
1052 /* Bad blocks do not need to be erased */
1053 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1055 kref_init(&line->ref);
1060 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1062 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1065 spin_lock(&l_mg->free_lock);
1066 l_mg->data_line = line;
1067 list_del(&line->list);
1069 ret = pblk_line_prepare(pblk, line);
1071 list_add(&line->list, &l_mg->free_list);
1072 spin_unlock(&l_mg->free_lock);
1075 spin_unlock(&l_mg->free_lock);
1077 pblk_rl_free_lines_dec(&pblk->rl, line);
1079 if (!pblk_line_init_bb(pblk, line, 0)) {
1080 list_add(&line->list, &l_mg->free_list);
1087 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1089 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1090 line->map_bitmap = NULL;
1095 struct pblk_line *pblk_line_get(struct pblk *pblk)
1097 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1098 struct pblk_line_meta *lm = &pblk->lm;
1099 struct pblk_line *line = NULL;
1102 lockdep_assert_held(&l_mg->free_lock);
1105 if (list_empty(&l_mg->free_list)) {
1106 pr_err("pblk: no free lines\n");
1110 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1111 list_del(&line->list);
1112 l_mg->nr_free_lines--;
1114 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1115 if (unlikely(bit >= lm->blk_per_line)) {
1116 spin_lock(&line->lock);
1117 line->state = PBLK_LINESTATE_BAD;
1118 spin_unlock(&line->lock);
1120 list_add_tail(&line->list, &l_mg->bad_list);
1122 pr_debug("pblk: line %d is bad\n", line->id);
1126 if (pblk_line_prepare(pblk, line)) {
1127 pr_err("pblk: failed to prepare line %d\n", line->id);
1128 list_add(&line->list, &l_mg->free_list);
1136 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1137 struct pblk_line *line)
1139 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1140 struct pblk_line *retry_line;
1142 spin_lock(&l_mg->free_lock);
1143 retry_line = pblk_line_get(pblk);
1145 l_mg->data_line = NULL;
1146 spin_unlock(&l_mg->free_lock);
1150 retry_line->smeta = line->smeta;
1151 retry_line->emeta = line->emeta;
1152 retry_line->meta_line = line->meta_line;
1154 pblk_line_free(pblk, line);
1155 l_mg->data_line = retry_line;
1156 spin_unlock(&l_mg->free_lock);
1158 if (pblk_line_erase(pblk, retry_line)) {
1159 spin_lock(&l_mg->free_lock);
1160 l_mg->data_line = NULL;
1161 spin_unlock(&l_mg->free_lock);
1165 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1170 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1172 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1173 struct pblk_line *line;
1177 spin_lock(&l_mg->free_lock);
1178 line = pblk_line_get(pblk);
1180 spin_unlock(&l_mg->free_lock);
1184 line->seq_nr = l_mg->d_seq_nr++;
1185 line->type = PBLK_LINETYPE_DATA;
1186 l_mg->data_line = line;
1188 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1189 set_bit(meta_line, &l_mg->meta_bitmap);
1190 line->smeta = l_mg->sline_meta[meta_line].meta;
1191 line->emeta = l_mg->eline_meta[meta_line].meta;
1192 line->meta_line = meta_line;
1194 /* Allocate next line for preparation */
1195 l_mg->data_next = pblk_line_get(pblk);
1196 if (l_mg->data_next) {
1197 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1198 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1201 spin_unlock(&l_mg->free_lock);
1203 pblk_rl_free_lines_dec(&pblk->rl, line);
1205 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1207 if (pblk_line_erase(pblk, line))
1211 if (!pblk_line_set_metadata(pblk, line, NULL)) {
1212 line = pblk_line_retry(pblk, line);
1219 if (!pblk_line_init_bb(pblk, line, 1)) {
1220 line = pblk_line_retry(pblk, line);
1230 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1232 struct pblk_line_meta *lm = &pblk->lm;
1233 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1234 struct pblk_line *cur, *new;
1235 unsigned int left_seblks;
1239 cur = l_mg->data_line;
1240 new = l_mg->data_next;
1243 l_mg->data_line = new;
1246 left_seblks = atomic_read(&new->left_seblks);
1248 /* If line is not fully erased, erase it */
1249 if (atomic_read(&new->left_eblks)) {
1250 if (pblk_line_erase(pblk, new))
1258 spin_lock(&l_mg->free_lock);
1259 /* Allocate next line for preparation */
1260 l_mg->data_next = pblk_line_get(pblk);
1261 if (l_mg->data_next) {
1262 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1263 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1268 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1269 if (meta_line == PBLK_DATA_LINES) {
1270 spin_unlock(&l_mg->free_lock);
1272 spin_lock(&l_mg->free_lock);
1276 set_bit(meta_line, &l_mg->meta_bitmap);
1277 new->smeta = l_mg->sline_meta[meta_line].meta;
1278 new->emeta = l_mg->eline_meta[meta_line].meta;
1279 new->meta_line = meta_line;
1281 memset(new->smeta, 0, lm->smeta_len);
1282 memset(new->emeta, 0, lm->emeta_len);
1283 spin_unlock(&l_mg->free_lock);
1286 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1289 if (!pblk_line_set_metadata(pblk, new, cur)) {
1290 new = pblk_line_retry(pblk, new);
1297 if (!pblk_line_init_bb(pblk, new, 1)) {
1298 new = pblk_line_retry(pblk, new);
1308 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1310 if (line->map_bitmap)
1311 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1312 if (line->invalid_bitmap)
1313 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1315 line->map_bitmap = NULL;
1316 line->invalid_bitmap = NULL;
1321 void pblk_line_put(struct kref *ref)
1323 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1324 struct pblk *pblk = line->pblk;
1325 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1327 spin_lock(&line->lock);
1328 WARN_ON(line->state != PBLK_LINESTATE_GC);
1329 line->state = PBLK_LINESTATE_FREE;
1330 line->gc_group = PBLK_LINEGC_NONE;
1331 pblk_line_free(pblk, line);
1332 spin_unlock(&line->lock);
1334 spin_lock(&l_mg->free_lock);
1335 list_add_tail(&line->list, &l_mg->free_list);
1336 l_mg->nr_free_lines++;
1337 spin_unlock(&l_mg->free_lock);
1339 pblk_rl_free_lines_inc(&pblk->rl, line);
1342 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1347 rqd = mempool_alloc(pblk->r_rq_pool, GFP_KERNEL);
1348 memset(rqd, 0, pblk_r_rq_size);
1350 pblk_setup_e_rq(pblk, rqd, ppa);
1352 rqd->end_io = pblk_end_io_erase;
1353 rqd->private = pblk;
1355 /* The write thread schedules erases so that it minimizes disturbances
1356 * with writes. Thus, there is no need to take the LUN semaphore.
1358 err = pblk_submit_io(pblk, rqd);
1360 struct nvm_tgt_dev *dev = pblk->dev;
1361 struct nvm_geo *geo = &dev->geo;
1363 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1364 pblk_dev_ppa_to_line(ppa),
1365 pblk_dev_ppa_to_pos(geo, ppa));
1371 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1373 return pblk->l_mg.data_line;
1376 struct pblk_line *pblk_line_get_data_next(struct pblk *pblk)
1378 return pblk->l_mg.data_next;
1381 int pblk_line_is_full(struct pblk_line *line)
1383 return (line->left_msecs == 0);
1386 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1388 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1389 struct list_head *move_list;
1391 line->emeta->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, line->emeta));
1393 if (pblk_line_submit_emeta_io(pblk, line, line->cur_sec, WRITE))
1394 pr_err("pblk: line %d close I/O failed\n", line->id);
1396 WARN(!bitmap_full(line->map_bitmap, line->sec_in_line),
1397 "pblk: corrupt closed line %d\n", line->id);
1399 spin_lock(&l_mg->free_lock);
1400 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1401 spin_unlock(&l_mg->free_lock);
1403 spin_lock(&l_mg->gc_lock);
1404 spin_lock(&line->lock);
1405 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1406 line->state = PBLK_LINESTATE_CLOSED;
1407 move_list = pblk_line_gc_list(pblk, line);
1409 list_add_tail(&line->list, move_list);
1411 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1412 line->map_bitmap = NULL;
1416 spin_unlock(&line->lock);
1417 spin_unlock(&l_mg->gc_lock);
1420 void pblk_line_close_ws(struct work_struct *work)
1422 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1424 struct pblk *pblk = line_ws->pblk;
1425 struct pblk_line *line = line_ws->line;
1427 pblk_line_close(pblk, line);
1428 mempool_free(line_ws, pblk->line_ws_pool);
1431 void pblk_line_mark_bb(struct work_struct *work)
1433 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1435 struct pblk *pblk = line_ws->pblk;
1436 struct nvm_tgt_dev *dev = pblk->dev;
1437 struct ppa_addr *ppa = line_ws->priv;
1440 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1442 struct pblk_line *line;
1445 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1446 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1448 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1453 mempool_free(line_ws, pblk->line_ws_pool);
1456 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1457 void (*work)(struct work_struct *))
1459 struct pblk_line_ws *line_ws;
1461 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1465 line_ws->pblk = pblk;
1466 line_ws->line = line;
1467 line_ws->priv = priv;
1469 INIT_WORK(&line_ws->ws, work);
1470 queue_work(pblk->kw_wq, &line_ws->ws);
1473 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1474 unsigned long *lun_bitmap)
1476 struct nvm_tgt_dev *dev = pblk->dev;
1477 struct nvm_geo *geo = &dev->geo;
1478 struct pblk_lun *rlun;
1479 int lun_id = ppa_list[0].g.ch * geo->luns_per_chnl + ppa_list[0].g.lun;
1483 * Only send one inflight I/O per LUN. Since we map at a page
1484 * granurality, all ppas in the I/O will map to the same LUN
1486 #ifdef CONFIG_NVM_DEBUG
1489 for (i = 1; i < nr_ppas; i++)
1490 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1491 ppa_list[0].g.ch != ppa_list[i].g.ch);
1493 /* If the LUN has been locked for this same request, do no attempt to
1496 if (test_and_set_bit(lun_id, lun_bitmap))
1499 rlun = &pblk->luns[lun_id];
1500 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1504 pr_err("pblk: lun semaphore timed out\n");
1507 pr_err("pblk: lun semaphore timed out\n");
1513 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1514 unsigned long *lun_bitmap)
1516 struct nvm_tgt_dev *dev = pblk->dev;
1517 struct nvm_geo *geo = &dev->geo;
1518 struct pblk_lun *rlun;
1519 int nr_luns = geo->nr_luns;
1522 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1523 rlun = &pblk->luns[bit];
1530 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1532 struct ppa_addr l2p_ppa;
1534 /* logic error: lba out-of-bounds. Ignore update */
1535 if (!(lba < pblk->rl.nr_secs)) {
1536 WARN(1, "pblk: corrupted L2P map request\n");
1540 spin_lock(&pblk->trans_lock);
1541 l2p_ppa = pblk_trans_map_get(pblk, lba);
1543 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1544 pblk_map_invalidate(pblk, l2p_ppa);
1546 pblk_trans_map_set(pblk, lba, ppa);
1547 spin_unlock(&pblk->trans_lock);
1550 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1552 #ifdef CONFIG_NVM_DEBUG
1553 /* Callers must ensure that the ppa points to a cache address */
1554 BUG_ON(!pblk_addr_in_cache(ppa));
1555 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1558 pblk_update_map(pblk, lba, ppa);
1561 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1562 struct pblk_line *gc_line)
1564 struct ppa_addr l2p_ppa;
1567 #ifdef CONFIG_NVM_DEBUG
1568 /* Callers must ensure that the ppa points to a cache address */
1569 BUG_ON(!pblk_addr_in_cache(ppa));
1570 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1573 /* logic error: lba out-of-bounds. Ignore update */
1574 if (!(lba < pblk->rl.nr_secs)) {
1575 WARN(1, "pblk: corrupted L2P map request\n");
1579 spin_lock(&pblk->trans_lock);
1580 l2p_ppa = pblk_trans_map_get(pblk, lba);
1582 /* Prevent updated entries to be overwritten by GC */
1583 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1584 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1589 pblk_trans_map_set(pblk, lba, ppa);
1591 spin_unlock(&pblk->trans_lock);
1595 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1596 struct ppa_addr entry_line)
1598 struct ppa_addr l2p_line;
1600 #ifdef CONFIG_NVM_DEBUG
1601 /* Callers must ensure that the ppa points to a device address */
1602 BUG_ON(pblk_addr_in_cache(ppa));
1604 /* Invalidate and discard padded entries */
1605 if (lba == ADDR_EMPTY) {
1606 #ifdef CONFIG_NVM_DEBUG
1607 atomic_long_inc(&pblk->padded_wb);
1609 pblk_map_invalidate(pblk, ppa);
1613 /* logic error: lba out-of-bounds. Ignore update */
1614 if (!(lba < pblk->rl.nr_secs)) {
1615 WARN(1, "pblk: corrupted L2P map request\n");
1619 spin_lock(&pblk->trans_lock);
1620 l2p_line = pblk_trans_map_get(pblk, lba);
1622 /* Do not update L2P if the cacheline has been updated. In this case,
1623 * the mapped ppa must be invalidated
1625 if (l2p_line.ppa != entry_line.ppa) {
1626 if (!pblk_ppa_empty(ppa))
1627 pblk_map_invalidate(pblk, ppa);
1631 #ifdef CONFIG_NVM_DEBUG
1632 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1635 pblk_trans_map_set(pblk, lba, ppa);
1637 spin_unlock(&pblk->trans_lock);
1640 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1641 sector_t blba, int nr_secs)
1645 spin_lock(&pblk->trans_lock);
1646 for (i = 0; i < nr_secs; i++)
1647 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1648 spin_unlock(&pblk->trans_lock);
1651 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1652 u64 *lba_list, int nr_secs)
1657 spin_lock(&pblk->trans_lock);
1658 for (i = 0; i < nr_secs; i++) {
1660 if (lba == ADDR_EMPTY) {
1661 ppas[i].ppa = ADDR_EMPTY;
1663 /* logic error: lba out-of-bounds. Ignore update */
1664 if (!(lba < pblk->rl.nr_secs)) {
1665 WARN(1, "pblk: corrupted L2P map request\n");
1668 ppas[i] = pblk_trans_map_get(pblk, lba);
1671 spin_unlock(&pblk->trans_lock);