2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
21 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
24 struct nvm_tgt_dev *dev = pblk->dev;
25 struct nvm_geo *geo = &dev->geo;
26 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
28 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29 atomic_long_inc(&pblk->erase_failed);
31 atomic_dec(&line->blk_in_line);
32 if (test_and_set_bit(pos, line->blk_bitmap))
33 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
36 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41 struct pblk_line *line;
43 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44 atomic_dec(&line->left_seblks);
49 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
54 pblk_mark_bb(pblk, line, ppa);
58 /* Erase completion assumes that only one block is erased at the time */
59 static void pblk_end_io_erase(struct nvm_rq *rqd)
61 struct pblk *pblk = rqd->private;
63 __pblk_end_io_erase(pblk, rqd);
64 mempool_free(rqd, pblk->g_rq_pool);
67 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
70 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
71 struct list_head *move_list = NULL;
73 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
74 * table is modified with reclaimed sectors, a check is done to endure
75 * that newer updates are not overwritten.
77 spin_lock(&line->lock);
78 if (line->state == PBLK_LINESTATE_GC ||
79 line->state == PBLK_LINESTATE_FREE) {
80 spin_unlock(&line->lock);
84 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
85 WARN_ONCE(1, "pblk: double invalidate\n");
86 spin_unlock(&line->lock);
89 le32_add_cpu(line->vsc, -1);
91 if (line->state == PBLK_LINESTATE_CLOSED)
92 move_list = pblk_line_gc_list(pblk, line);
93 spin_unlock(&line->lock);
96 spin_lock(&l_mg->gc_lock);
97 spin_lock(&line->lock);
98 /* Prevent moving a line that has just been chosen for GC */
99 if (line->state == PBLK_LINESTATE_GC ||
100 line->state == PBLK_LINESTATE_FREE) {
101 spin_unlock(&line->lock);
102 spin_unlock(&l_mg->gc_lock);
105 spin_unlock(&line->lock);
107 list_move_tail(&line->list, move_list);
108 spin_unlock(&l_mg->gc_lock);
112 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
114 struct pblk_line *line;
118 #ifdef CONFIG_NVM_DEBUG
119 /* Callers must ensure that the ppa points to a device address */
120 BUG_ON(pblk_addr_in_cache(ppa));
121 BUG_ON(pblk_ppa_empty(ppa));
124 line_id = pblk_tgt_ppa_to_line(ppa);
125 line = &pblk->lines[line_id];
126 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
128 __pblk_map_invalidate(pblk, line, paddr);
131 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
132 unsigned int nr_secs)
136 spin_lock(&pblk->trans_lock);
137 for (lba = slba; lba < slba + nr_secs; lba++) {
140 ppa = pblk_trans_map_get(pblk, lba);
142 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
143 pblk_map_invalidate(pblk, ppa);
145 pblk_ppa_set_empty(&ppa);
146 pblk_trans_map_set(pblk, lba, ppa);
148 spin_unlock(&pblk->trans_lock);
151 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
158 pool = pblk->w_rq_pool;
159 rq_size = pblk_w_rq_size;
161 pool = pblk->g_rq_pool;
162 rq_size = pblk_g_rq_size;
165 rqd = mempool_alloc(pool, GFP_KERNEL);
166 memset(rqd, 0, rq_size);
171 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
176 pool = pblk->w_rq_pool;
178 pool = pblk->g_rq_pool;
180 mempool_free(rqd, pool);
183 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
189 WARN_ON(off + nr_pages != bio->bi_vcnt);
191 bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
192 for (i = off; i < nr_pages + off; i++) {
193 bv = bio->bi_io_vec[i];
194 mempool_free(bv.bv_page, pblk->page_pool);
198 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
201 struct request_queue *q = pblk->dev->q;
205 for (i = 0; i < nr_pages; i++) {
206 page = mempool_alloc(pblk->page_pool, flags);
210 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
211 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
212 pr_err("pblk: could not add page to bio\n");
213 mempool_free(page, pblk->page_pool);
220 pblk_bio_free_pages(pblk, bio, 0, i - 1);
224 static void pblk_write_kick(struct pblk *pblk)
226 wake_up_process(pblk->writer_ts);
227 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
230 void pblk_write_timer_fn(unsigned long data)
232 struct pblk *pblk = (struct pblk *)data;
234 /* kick the write thread every tick to flush outstanding data */
235 pblk_write_kick(pblk);
238 void pblk_write_should_kick(struct pblk *pblk)
240 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
242 if (secs_avail >= pblk->min_write_pgs)
243 pblk_write_kick(pblk);
246 void pblk_end_bio_sync(struct bio *bio)
248 struct completion *waiting = bio->bi_private;
253 void pblk_end_io_sync(struct nvm_rq *rqd)
255 struct completion *waiting = rqd->private;
260 void pblk_flush_writer(struct pblk *pblk)
264 DECLARE_COMPLETION_ONSTACK(wait);
266 bio = bio_alloc(GFP_KERNEL, 1);
270 bio->bi_iter.bi_sector = 0; /* internal bio */
271 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
272 bio->bi_private = &wait;
273 bio->bi_end_io = pblk_end_bio_sync;
275 ret = pblk_write_to_cache(pblk, bio, 0);
276 if (ret == NVM_IO_OK) {
277 if (!wait_for_completion_io_timeout(&wait,
278 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
279 pr_err("pblk: flush cache timed out\n");
281 } else if (ret != NVM_IO_DONE) {
282 pr_err("pblk: tear down bio failed\n");
286 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
291 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
293 struct pblk_line_meta *lm = &pblk->lm;
294 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
295 struct list_head *move_list = NULL;
296 int vsc = le32_to_cpu(*line->vsc);
299 if (line->gc_group != PBLK_LINEGC_FULL) {
300 line->gc_group = PBLK_LINEGC_FULL;
301 move_list = &l_mg->gc_full_list;
303 } else if (vsc < lm->mid_thrs) {
304 if (line->gc_group != PBLK_LINEGC_HIGH) {
305 line->gc_group = PBLK_LINEGC_HIGH;
306 move_list = &l_mg->gc_high_list;
308 } else if (vsc < lm->high_thrs) {
309 if (line->gc_group != PBLK_LINEGC_MID) {
310 line->gc_group = PBLK_LINEGC_MID;
311 move_list = &l_mg->gc_mid_list;
313 } else if (vsc < line->sec_in_line) {
314 if (line->gc_group != PBLK_LINEGC_LOW) {
315 line->gc_group = PBLK_LINEGC_LOW;
316 move_list = &l_mg->gc_low_list;
318 } else if (vsc == line->sec_in_line) {
319 if (line->gc_group != PBLK_LINEGC_EMPTY) {
320 line->gc_group = PBLK_LINEGC_EMPTY;
321 move_list = &l_mg->gc_empty_list;
324 line->state = PBLK_LINESTATE_CORRUPT;
325 line->gc_group = PBLK_LINEGC_NONE;
326 move_list = &l_mg->corrupt_list;
327 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
330 lm->high_thrs, lm->mid_thrs);
336 void pblk_discard(struct pblk *pblk, struct bio *bio)
338 sector_t slba = pblk_get_lba(bio);
339 sector_t nr_secs = pblk_get_secs(bio);
341 pblk_invalidate_range(pblk, slba, nr_secs);
344 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
348 spin_lock(&pblk->trans_lock);
349 ppa = pblk_trans_map_get(pblk, lba);
350 spin_unlock(&pblk->trans_lock);
355 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
357 atomic_long_inc(&pblk->write_failed);
358 #ifdef CONFIG_NVM_DEBUG
359 pblk_print_failed_rqd(pblk, rqd, rqd->error);
363 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
365 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
366 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
367 atomic_long_inc(&pblk->read_empty);
371 switch (rqd->error) {
372 case NVM_RSP_WARN_HIGHECC:
373 atomic_long_inc(&pblk->read_high_ecc);
375 case NVM_RSP_ERR_FAILECC:
376 case NVM_RSP_ERR_FAILCRC:
377 atomic_long_inc(&pblk->read_failed);
380 pr_err("pblk: unknown read error:%d\n", rqd->error);
382 #ifdef CONFIG_NVM_DEBUG
383 pblk_print_failed_rqd(pblk, rqd, rqd->error);
387 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
389 pblk->sec_per_write = sec_per_write;
392 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
394 struct nvm_tgt_dev *dev = pblk->dev;
396 #ifdef CONFIG_NVM_DEBUG
397 struct ppa_addr *ppa_list;
399 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
400 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
405 if (rqd->opcode == NVM_OP_PWRITE) {
406 struct pblk_line *line;
410 for (i = 0; i < rqd->nr_ppas; i++) {
412 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
414 spin_lock(&line->lock);
415 if (line->state != PBLK_LINESTATE_OPEN) {
416 pr_err("pblk: bad ppa: line:%d,state:%d\n",
417 line->id, line->state);
419 spin_unlock(&line->lock);
422 spin_unlock(&line->lock);
426 return nvm_submit_io(dev, rqd);
429 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
430 unsigned int nr_secs, unsigned int len,
433 struct nvm_tgt_dev *dev = pblk->dev;
434 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
440 if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
441 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
443 bio = bio_kmalloc(gfp_mask, nr_secs);
445 return ERR_PTR(-ENOMEM);
447 for (i = 0; i < nr_secs; i++) {
448 page = vmalloc_to_page(kaddr);
450 pr_err("pblk: could not map vmalloc bio\n");
452 bio = ERR_PTR(-ENOMEM);
456 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
457 if (ret != PAGE_SIZE) {
458 pr_err("pblk: could not add page to bio\n");
460 bio = ERR_PTR(-ENOMEM);
470 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
471 unsigned long secs_to_flush)
473 int max = pblk->sec_per_write;
474 int min = pblk->min_write_pgs;
475 int secs_to_sync = 0;
477 if (secs_avail >= max)
479 else if (secs_avail >= min)
480 secs_to_sync = min * (secs_avail / min);
481 else if (secs_to_flush)
487 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
492 addr = find_next_zero_bit(line->map_bitmap,
493 pblk->lm.sec_per_line, line->cur_sec);
494 line->cur_sec = addr - nr_secs;
496 for (i = 0; i < nr_secs; i++, line->cur_sec--)
497 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
500 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
505 /* logic error: ppa out-of-bounds. Prevent generating bad address */
506 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
507 WARN(1, "pblk: page allocation out of bounds\n");
508 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
511 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
512 pblk->lm.sec_per_line, line->cur_sec);
513 for (i = 0; i < nr_secs; i++, line->cur_sec++)
514 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
519 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
523 /* Lock needed in case a write fails and a recovery needs to remap
524 * failed write buffer entries
526 spin_lock(&line->lock);
527 addr = __pblk_alloc_page(pblk, line, nr_secs);
528 line->left_msecs -= nr_secs;
529 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
530 spin_unlock(&line->lock);
535 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
539 spin_lock(&line->lock);
540 paddr = find_next_zero_bit(line->map_bitmap,
541 pblk->lm.sec_per_line, line->cur_sec);
542 spin_unlock(&line->lock);
548 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
549 * taking the per LUN semaphore.
551 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
552 void *emeta_buf, u64 paddr, int dir)
554 struct nvm_tgt_dev *dev = pblk->dev;
555 struct nvm_geo *geo = &dev->geo;
556 struct pblk_line_meta *lm = &pblk->lm;
557 void *ppa_list, *meta_list;
560 dma_addr_t dma_ppa_list, dma_meta_list;
561 int min = pblk->min_write_pgs;
562 int left_ppas = lm->emeta_sec[0];
568 DECLARE_COMPLETION_ONSTACK(wait);
571 bio_op = REQ_OP_WRITE;
572 cmd_op = NVM_OP_PWRITE;
573 } else if (dir == READ) {
574 bio_op = REQ_OP_READ;
575 cmd_op = NVM_OP_PREAD;
579 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
584 ppa_list = meta_list + pblk_dma_meta_size;
585 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
588 memset(&rqd, 0, sizeof(struct nvm_rq));
590 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
591 rq_len = rq_ppas * geo->sec_size;
593 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, GFP_KERNEL);
599 bio->bi_iter.bi_sector = 0; /* internal bio */
600 bio_set_op_attrs(bio, bio_op, 0);
603 rqd.meta_list = meta_list;
604 rqd.ppa_list = ppa_list;
605 rqd.dma_meta_list = dma_meta_list;
606 rqd.dma_ppa_list = dma_ppa_list;
608 rqd.nr_ppas = rq_ppas;
609 rqd.end_io = pblk_end_io_sync;
613 struct pblk_sec_meta *meta_list = rqd.meta_list;
615 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
616 for (i = 0; i < rqd.nr_ppas; ) {
617 spin_lock(&line->lock);
618 paddr = __pblk_alloc_page(pblk, line, min);
619 spin_unlock(&line->lock);
620 for (j = 0; j < min; j++, i++, paddr++) {
621 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
623 addr_to_gen_ppa(pblk, paddr, id);
627 for (i = 0; i < rqd.nr_ppas; ) {
628 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
629 int pos = pblk_dev_ppa_to_pos(geo, ppa);
630 int read_type = PBLK_READ_RANDOM;
632 if (pblk_io_aligned(pblk, rq_ppas))
633 read_type = PBLK_READ_SEQUENTIAL;
634 rqd.flags = pblk_set_read_mode(pblk, read_type);
636 while (test_bit(pos, line->blk_bitmap)) {
638 if (pblk_boundary_paddr_checks(pblk, paddr)) {
639 pr_err("pblk: corrupt emeta line:%d\n",
646 ppa = addr_to_gen_ppa(pblk, paddr, id);
647 pos = pblk_dev_ppa_to_pos(geo, ppa);
650 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
651 pr_err("pblk: corrupt emeta line:%d\n",
658 for (j = 0; j < min; j++, i++, paddr++)
660 addr_to_gen_ppa(pblk, paddr, line->id);
664 ret = pblk_submit_io(pblk, &rqd);
666 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
671 if (!wait_for_completion_io_timeout(&wait,
672 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
673 pr_err("pblk: emeta I/O timed out\n");
675 reinit_completion(&wait);
677 if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
682 pblk_log_write_err(pblk, &rqd);
684 pblk_log_read_err(pblk, &rqd);
688 left_ppas -= rq_ppas;
692 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
696 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
698 struct nvm_tgt_dev *dev = pblk->dev;
699 struct nvm_geo *geo = &dev->geo;
700 struct pblk_line_meta *lm = &pblk->lm;
703 /* This usually only happens on bad lines */
704 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
705 if (bit >= lm->blk_per_line)
708 return bit * geo->sec_per_pl;
711 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
714 struct nvm_tgt_dev *dev = pblk->dev;
715 struct pblk_line_meta *lm = &pblk->lm;
718 __le64 *lba_list = NULL;
722 DECLARE_COMPLETION_ONSTACK(wait);
725 bio_op = REQ_OP_WRITE;
726 cmd_op = NVM_OP_PWRITE;
727 flags = pblk_set_progr_mode(pblk, WRITE);
728 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
729 } else if (dir == READ) {
730 bio_op = REQ_OP_READ;
731 cmd_op = NVM_OP_PREAD;
732 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
736 memset(&rqd, 0, sizeof(struct nvm_rq));
738 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
743 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
744 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
746 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
752 bio->bi_iter.bi_sector = 0; /* internal bio */
753 bio_set_op_attrs(bio, bio_op, 0);
758 rqd.nr_ppas = lm->smeta_sec;
759 rqd.end_io = pblk_end_io_sync;
762 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
763 struct pblk_sec_meta *meta_list = rqd.meta_list;
765 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
768 u64 addr_empty = cpu_to_le64(ADDR_EMPTY);
770 meta_list[i].lba = lba_list[paddr] = addr_empty;
775 * This I/O is sent by the write thread when a line is replace. Since
776 * the write thread is the only one sending write and erase commands,
777 * there is no need to take the LUN semaphore.
779 ret = pblk_submit_io(pblk, &rqd);
781 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
786 if (!wait_for_completion_io_timeout(&wait,
787 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
788 pr_err("pblk: smeta I/O timed out\n");
793 pblk_log_write_err(pblk, &rqd);
795 pblk_log_read_err(pblk, &rqd);
799 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
804 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
806 u64 bpaddr = pblk_line_smeta_start(pblk, line);
808 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
811 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
814 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
815 line->emeta_ssec, READ);
818 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
821 rqd->opcode = NVM_OP_ERASE;
824 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
828 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
832 DECLARE_COMPLETION_ONSTACK(wait);
834 memset(&rqd, 0, sizeof(struct nvm_rq));
836 pblk_setup_e_rq(pblk, &rqd, ppa);
838 rqd.end_io = pblk_end_io_sync;
841 /* The write thread schedules erases so that it minimizes disturbances
842 * with writes. Thus, there is no need to take the LUN semaphore.
844 ret = pblk_submit_io(pblk, &rqd);
846 struct nvm_tgt_dev *dev = pblk->dev;
847 struct nvm_geo *geo = &dev->geo;
849 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
850 pblk_dev_ppa_to_line(ppa),
851 pblk_dev_ppa_to_pos(geo, ppa));
857 if (!wait_for_completion_io_timeout(&wait,
858 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
859 pr_err("pblk: sync erase timed out\n");
864 __pblk_end_io_erase(pblk, &rqd);
869 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
871 struct pblk_line_meta *lm = &pblk->lm;
875 /* Erase only good blocks, one at a time */
877 spin_lock(&line->lock);
878 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
880 if (bit >= lm->blk_per_line) {
881 spin_unlock(&line->lock);
885 ppa = pblk->luns[bit].bppa; /* set ch and lun */
886 ppa.g.blk = line->id;
888 atomic_dec(&line->left_eblks);
889 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
890 spin_unlock(&line->lock);
892 if (pblk_blk_erase_sync(pblk, ppa)) {
893 pr_err("pblk: failed to erase line %d\n", line->id);
901 static void pblk_line_setup_metadata(struct pblk_line *line,
902 struct pblk_line_mgmt *l_mg,
903 struct pblk_line_meta *lm)
908 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
909 if (meta_line == PBLK_DATA_LINES) {
910 spin_unlock(&l_mg->free_lock);
912 spin_lock(&l_mg->free_lock);
916 set_bit(meta_line, &l_mg->meta_bitmap);
917 line->meta_line = meta_line;
919 line->smeta = l_mg->sline_meta[meta_line];
920 line->emeta = l_mg->eline_meta[meta_line];
922 memset(line->smeta, 0, lm->smeta_len);
923 memset(line->emeta->buf, 0, lm->emeta_len[0]);
925 line->emeta->mem = 0;
926 atomic_set(&line->emeta->sync, 0);
929 /* For now lines are always assumed full lines. Thus, smeta former and current
930 * lun bitmaps are omitted.
932 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
933 struct pblk_line *cur)
935 struct nvm_tgt_dev *dev = pblk->dev;
936 struct nvm_geo *geo = &dev->geo;
937 struct pblk_line_meta *lm = &pblk->lm;
938 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
939 struct pblk_emeta *emeta = line->emeta;
940 struct line_emeta *emeta_buf = emeta->buf;
941 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
944 /* After erasing the line, new bad blocks might appear and we risk
945 * having an invalid line
947 nr_blk_line = lm->blk_per_line -
948 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
949 if (nr_blk_line < lm->min_blk_line) {
950 spin_lock(&l_mg->free_lock);
951 spin_lock(&line->lock);
952 line->state = PBLK_LINESTATE_BAD;
953 spin_unlock(&line->lock);
955 list_add_tail(&line->list, &l_mg->bad_list);
956 spin_unlock(&l_mg->free_lock);
958 pr_debug("pblk: line %d is bad\n", line->id);
963 /* Run-time metadata */
964 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
966 /* Mark LUNs allocated in this line (all for now) */
967 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
969 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
970 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
971 smeta_buf->header.id = cpu_to_le32(line->id);
972 smeta_buf->header.type = cpu_to_le16(line->type);
973 smeta_buf->header.version = cpu_to_le16(1);
976 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
977 smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
979 /* Fill metadata among lines */
981 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
982 smeta_buf->prev_id = cpu_to_le32(cur->id);
983 cur->emeta->buf->next_id = cpu_to_le32(line->id);
985 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
988 /* All smeta must be set at this point */
989 smeta_buf->header.crc = cpu_to_le32(
990 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
991 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
994 memcpy(&emeta_buf->header, &smeta_buf->header,
995 sizeof(struct line_header));
996 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
997 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
998 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
999 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1000 emeta_buf->crc = cpu_to_le32(0);
1001 emeta_buf->prev_id = smeta_buf->prev_id;
1006 /* For now lines are always assumed full lines. Thus, smeta former and current
1007 * lun bitmaps are omitted.
1009 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1012 struct nvm_tgt_dev *dev = pblk->dev;
1013 struct nvm_geo *geo = &dev->geo;
1014 struct pblk_line_meta *lm = &pblk->lm;
1015 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1020 line->sec_in_line = lm->sec_per_line;
1022 /* Capture bad block information on line mapping bitmaps */
1023 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1024 bit + 1)) < lm->blk_per_line) {
1025 off = bit * geo->sec_per_pl;
1026 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1028 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1030 line->sec_in_line -= geo->sec_per_blk;
1031 if (bit >= lm->emeta_bb)
1035 /* Mark smeta metadata sectors as bad sectors */
1036 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1037 off = bit * geo->sec_per_pl;
1039 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1040 line->sec_in_line -= lm->smeta_sec;
1041 line->smeta_ssec = off;
1042 line->cur_sec = off + lm->smeta_sec;
1044 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1045 pr_debug("pblk: line smeta I/O failed. Retry\n");
1046 off += geo->sec_per_pl;
1050 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1052 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1053 * blocks to make sure that there are enough sectors to store emeta
1055 bit = lm->sec_per_line;
1056 off = lm->sec_per_line - lm->emeta_sec[0];
1057 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1059 off -= geo->sec_per_pl;
1060 if (!test_bit(off, line->invalid_bitmap)) {
1061 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1066 line->sec_in_line -= lm->emeta_sec[0];
1067 line->emeta_ssec = off;
1068 line->nr_valid_lbas = 0;
1069 line->left_msecs = line->sec_in_line;
1070 *line->vsc = cpu_to_le32(line->sec_in_line);
1072 if (lm->sec_per_line - line->sec_in_line !=
1073 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1074 spin_lock(&line->lock);
1075 line->state = PBLK_LINESTATE_BAD;
1076 spin_unlock(&line->lock);
1078 list_add_tail(&line->list, &l_mg->bad_list);
1079 pr_err("pblk: unexpected line %d is bad\n", line->id);
1087 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1089 struct pblk_line_meta *lm = &pblk->lm;
1090 int blk_in_line = atomic_read(&line->blk_in_line);
1092 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1093 if (!line->map_bitmap)
1095 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1097 /* invalid_bitmap is special since it is used when line is closed. No
1098 * need to zeroized; it will be initialized using bb info form
1101 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1102 if (!line->invalid_bitmap) {
1103 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1107 spin_lock(&line->lock);
1108 if (line->state != PBLK_LINESTATE_FREE) {
1109 spin_unlock(&line->lock);
1110 WARN(1, "pblk: corrupted line state\n");
1113 line->state = PBLK_LINESTATE_OPEN;
1115 atomic_set(&line->left_eblks, blk_in_line);
1116 atomic_set(&line->left_seblks, blk_in_line);
1118 line->meta_distance = lm->meta_distance;
1119 spin_unlock(&line->lock);
1121 /* Bad blocks do not need to be erased */
1122 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1124 kref_init(&line->ref);
1129 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1131 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1134 spin_lock(&l_mg->free_lock);
1135 l_mg->data_line = line;
1136 list_del(&line->list);
1138 ret = pblk_line_prepare(pblk, line);
1140 list_add(&line->list, &l_mg->free_list);
1141 spin_unlock(&l_mg->free_lock);
1144 spin_unlock(&l_mg->free_lock);
1146 pblk_rl_free_lines_dec(&pblk->rl, line);
1148 if (!pblk_line_init_bb(pblk, line, 0)) {
1149 list_add(&line->list, &l_mg->free_list);
1156 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1158 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1159 line->map_bitmap = NULL;
1164 struct pblk_line *pblk_line_get(struct pblk *pblk)
1166 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1167 struct pblk_line_meta *lm = &pblk->lm;
1168 struct pblk_line *line = NULL;
1171 lockdep_assert_held(&l_mg->free_lock);
1174 if (list_empty(&l_mg->free_list)) {
1175 pr_err("pblk: no free lines\n");
1179 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1180 list_del(&line->list);
1181 l_mg->nr_free_lines--;
1183 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1184 if (unlikely(bit >= lm->blk_per_line)) {
1185 spin_lock(&line->lock);
1186 line->state = PBLK_LINESTATE_BAD;
1187 spin_unlock(&line->lock);
1189 list_add_tail(&line->list, &l_mg->bad_list);
1191 pr_debug("pblk: line %d is bad\n", line->id);
1195 if (pblk_line_prepare(pblk, line)) {
1196 pr_err("pblk: failed to prepare line %d\n", line->id);
1197 list_add(&line->list, &l_mg->free_list);
1205 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1206 struct pblk_line *line)
1208 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1209 struct pblk_line *retry_line;
1211 spin_lock(&l_mg->free_lock);
1212 retry_line = pblk_line_get(pblk);
1214 l_mg->data_line = NULL;
1215 spin_unlock(&l_mg->free_lock);
1219 retry_line->smeta = line->smeta;
1220 retry_line->emeta = line->emeta;
1221 retry_line->meta_line = line->meta_line;
1223 pblk_line_free(pblk, line);
1224 l_mg->data_line = retry_line;
1225 spin_unlock(&l_mg->free_lock);
1227 if (pblk_line_erase(pblk, retry_line)) {
1228 spin_lock(&l_mg->free_lock);
1229 l_mg->data_line = NULL;
1230 spin_unlock(&l_mg->free_lock);
1234 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1239 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1241 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1242 struct pblk_line *line;
1245 spin_lock(&l_mg->free_lock);
1246 line = pblk_line_get(pblk);
1248 spin_unlock(&l_mg->free_lock);
1252 line->seq_nr = l_mg->d_seq_nr++;
1253 line->type = PBLK_LINETYPE_DATA;
1254 l_mg->data_line = line;
1256 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1258 /* Allocate next line for preparation */
1259 l_mg->data_next = pblk_line_get(pblk);
1260 if (l_mg->data_next) {
1261 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1262 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1265 spin_unlock(&l_mg->free_lock);
1267 pblk_rl_free_lines_dec(&pblk->rl, line);
1269 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1271 if (pblk_line_erase(pblk, line))
1275 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1276 line = pblk_line_retry(pblk, line);
1283 if (!pblk_line_init_bb(pblk, line, 1)) {
1284 line = pblk_line_retry(pblk, line);
1294 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1296 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1297 struct pblk_line *cur, *new;
1298 unsigned int left_seblks;
1301 cur = l_mg->data_line;
1302 new = l_mg->data_next;
1305 l_mg->data_line = new;
1308 left_seblks = atomic_read(&new->left_seblks);
1310 /* If line is not fully erased, erase it */
1311 if (atomic_read(&new->left_eblks)) {
1312 if (pblk_line_erase(pblk, new))
1320 spin_lock(&l_mg->free_lock);
1321 /* Allocate next line for preparation */
1322 l_mg->data_next = pblk_line_get(pblk);
1323 if (l_mg->data_next) {
1324 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1325 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1329 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1330 spin_unlock(&l_mg->free_lock);
1333 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1336 if (!pblk_line_init_metadata(pblk, new, cur)) {
1337 new = pblk_line_retry(pblk, new);
1344 if (!pblk_line_init_bb(pblk, new, 1)) {
1345 new = pblk_line_retry(pblk, new);
1355 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1357 if (line->map_bitmap)
1358 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1359 if (line->invalid_bitmap)
1360 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1362 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1364 line->map_bitmap = NULL;
1365 line->invalid_bitmap = NULL;
1370 void pblk_line_put(struct kref *ref)
1372 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1373 struct pblk *pblk = line->pblk;
1374 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1376 spin_lock(&line->lock);
1377 WARN_ON(line->state != PBLK_LINESTATE_GC);
1378 line->state = PBLK_LINESTATE_FREE;
1379 line->gc_group = PBLK_LINEGC_NONE;
1380 pblk_line_free(pblk, line);
1381 spin_unlock(&line->lock);
1383 spin_lock(&l_mg->free_lock);
1384 list_add_tail(&line->list, &l_mg->free_list);
1385 l_mg->nr_free_lines++;
1386 spin_unlock(&l_mg->free_lock);
1388 pblk_rl_free_lines_inc(&pblk->rl, line);
1391 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1396 rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1397 memset(rqd, 0, pblk_g_rq_size);
1399 pblk_setup_e_rq(pblk, rqd, ppa);
1401 rqd->end_io = pblk_end_io_erase;
1402 rqd->private = pblk;
1404 /* The write thread schedules erases so that it minimizes disturbances
1405 * with writes. Thus, there is no need to take the LUN semaphore.
1407 err = pblk_submit_io(pblk, rqd);
1409 struct nvm_tgt_dev *dev = pblk->dev;
1410 struct nvm_geo *geo = &dev->geo;
1412 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1413 pblk_dev_ppa_to_line(ppa),
1414 pblk_dev_ppa_to_pos(geo, ppa));
1420 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1422 return pblk->l_mg.data_line;
1425 /* For now, always erase next line */
1426 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1428 return pblk->l_mg.data_next;
1431 int pblk_line_is_full(struct pblk_line *line)
1433 return (line->left_msecs == 0);
1436 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1438 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1439 struct pblk_line_meta *lm = &pblk->lm;
1440 struct list_head *move_list;
1442 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1443 "pblk: corrupt closed line %d\n", line->id);
1445 spin_lock(&l_mg->free_lock);
1446 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1447 spin_unlock(&l_mg->free_lock);
1449 spin_lock(&l_mg->gc_lock);
1450 spin_lock(&line->lock);
1451 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1452 line->state = PBLK_LINESTATE_CLOSED;
1453 move_list = pblk_line_gc_list(pblk, line);
1455 list_add_tail(&line->list, move_list);
1457 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1458 line->map_bitmap = NULL;
1462 spin_unlock(&line->lock);
1463 spin_unlock(&l_mg->gc_lock);
1466 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1468 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1469 struct pblk_line_meta *lm = &pblk->lm;
1470 struct pblk_emeta *emeta = line->emeta;
1471 struct line_emeta *emeta_buf = emeta->buf;
1473 /* No need for exact vsc value; avoid a big line lock and tak aprox. */
1474 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1475 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1477 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1478 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1480 spin_lock(&l_mg->close_lock);
1481 spin_lock(&line->lock);
1482 list_add_tail(&line->list, &l_mg->emeta_list);
1483 spin_unlock(&line->lock);
1484 spin_unlock(&l_mg->close_lock);
1487 void pblk_line_close_ws(struct work_struct *work)
1489 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1491 struct pblk *pblk = line_ws->pblk;
1492 struct pblk_line *line = line_ws->line;
1494 pblk_line_close(pblk, line);
1495 mempool_free(line_ws, pblk->line_ws_pool);
1498 void pblk_line_mark_bb(struct work_struct *work)
1500 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1502 struct pblk *pblk = line_ws->pblk;
1503 struct nvm_tgt_dev *dev = pblk->dev;
1504 struct ppa_addr *ppa = line_ws->priv;
1507 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1509 struct pblk_line *line;
1512 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1513 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1515 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1520 mempool_free(line_ws, pblk->line_ws_pool);
1523 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1524 void (*work)(struct work_struct *))
1526 struct pblk_line_ws *line_ws;
1528 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1532 line_ws->pblk = pblk;
1533 line_ws->line = line;
1534 line_ws->priv = priv;
1536 INIT_WORK(&line_ws->ws, work);
1537 queue_work(pblk->kw_wq, &line_ws->ws);
1540 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1541 unsigned long *lun_bitmap)
1543 struct nvm_tgt_dev *dev = pblk->dev;
1544 struct nvm_geo *geo = &dev->geo;
1545 struct pblk_lun *rlun;
1546 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1550 * Only send one inflight I/O per LUN. Since we map at a page
1551 * granurality, all ppas in the I/O will map to the same LUN
1553 #ifdef CONFIG_NVM_DEBUG
1556 for (i = 1; i < nr_ppas; i++)
1557 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1558 ppa_list[0].g.ch != ppa_list[i].g.ch);
1560 /* If the LUN has been locked for this same request, do no attempt to
1563 if (test_and_set_bit(pos, lun_bitmap))
1566 rlun = &pblk->luns[pos];
1567 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1571 pr_err("pblk: lun semaphore timed out\n");
1574 pr_err("pblk: lun semaphore timed out\n");
1580 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1581 unsigned long *lun_bitmap)
1583 struct nvm_tgt_dev *dev = pblk->dev;
1584 struct nvm_geo *geo = &dev->geo;
1585 struct pblk_lun *rlun;
1586 int nr_luns = geo->nr_luns;
1589 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1590 rlun = &pblk->luns[bit];
1597 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1599 struct ppa_addr l2p_ppa;
1601 /* logic error: lba out-of-bounds. Ignore update */
1602 if (!(lba < pblk->rl.nr_secs)) {
1603 WARN(1, "pblk: corrupted L2P map request\n");
1607 spin_lock(&pblk->trans_lock);
1608 l2p_ppa = pblk_trans_map_get(pblk, lba);
1610 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1611 pblk_map_invalidate(pblk, l2p_ppa);
1613 pblk_trans_map_set(pblk, lba, ppa);
1614 spin_unlock(&pblk->trans_lock);
1617 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1619 #ifdef CONFIG_NVM_DEBUG
1620 /* Callers must ensure that the ppa points to a cache address */
1621 BUG_ON(!pblk_addr_in_cache(ppa));
1622 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1625 pblk_update_map(pblk, lba, ppa);
1628 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1629 struct pblk_line *gc_line)
1631 struct ppa_addr l2p_ppa;
1634 #ifdef CONFIG_NVM_DEBUG
1635 /* Callers must ensure that the ppa points to a cache address */
1636 BUG_ON(!pblk_addr_in_cache(ppa));
1637 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1640 /* logic error: lba out-of-bounds. Ignore update */
1641 if (!(lba < pblk->rl.nr_secs)) {
1642 WARN(1, "pblk: corrupted L2P map request\n");
1646 spin_lock(&pblk->trans_lock);
1647 l2p_ppa = pblk_trans_map_get(pblk, lba);
1649 /* Prevent updated entries to be overwritten by GC */
1650 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1651 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1656 pblk_trans_map_set(pblk, lba, ppa);
1658 spin_unlock(&pblk->trans_lock);
1662 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1663 struct ppa_addr entry_line)
1665 struct ppa_addr l2p_line;
1667 #ifdef CONFIG_NVM_DEBUG
1668 /* Callers must ensure that the ppa points to a device address */
1669 BUG_ON(pblk_addr_in_cache(ppa));
1671 /* Invalidate and discard padded entries */
1672 if (lba == ADDR_EMPTY) {
1673 #ifdef CONFIG_NVM_DEBUG
1674 atomic_long_inc(&pblk->padded_wb);
1676 pblk_map_invalidate(pblk, ppa);
1680 /* logic error: lba out-of-bounds. Ignore update */
1681 if (!(lba < pblk->rl.nr_secs)) {
1682 WARN(1, "pblk: corrupted L2P map request\n");
1686 spin_lock(&pblk->trans_lock);
1687 l2p_line = pblk_trans_map_get(pblk, lba);
1689 /* Do not update L2P if the cacheline has been updated. In this case,
1690 * the mapped ppa must be invalidated
1692 if (l2p_line.ppa != entry_line.ppa) {
1693 if (!pblk_ppa_empty(ppa))
1694 pblk_map_invalidate(pblk, ppa);
1698 #ifdef CONFIG_NVM_DEBUG
1699 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1702 pblk_trans_map_set(pblk, lba, ppa);
1704 spin_unlock(&pblk->trans_lock);
1707 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1708 sector_t blba, int nr_secs)
1712 spin_lock(&pblk->trans_lock);
1713 for (i = 0; i < nr_secs; i++)
1714 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1715 spin_unlock(&pblk->trans_lock);
1718 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1719 u64 *lba_list, int nr_secs)
1724 spin_lock(&pblk->trans_lock);
1725 for (i = 0; i < nr_secs; i++) {
1727 if (lba == ADDR_EMPTY) {
1728 ppas[i].ppa = ADDR_EMPTY;
1730 /* logic error: lba out-of-bounds. Ignore update */
1731 if (!(lba < pblk->rl.nr_secs)) {
1732 WARN(1, "pblk: corrupted L2P map request\n");
1735 ppas[i] = pblk_trans_map_get(pblk, lba);
1738 spin_unlock(&pblk->trans_lock);