2 * Copyright (C) 2016 CNEX Labs
3 * Initial: Javier Gonzalez <javier@cnexlabs.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * pblk-recovery.c - pblk's recovery path
19 int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
23 crc = pblk_calc_emeta_crc(pblk, emeta_buf);
24 if (le32_to_cpu(emeta_buf->crc) != crc)
27 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
33 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
35 struct nvm_tgt_dev *dev = pblk->dev;
36 struct nvm_geo *geo = &dev->geo;
37 struct pblk_line_meta *lm = &pblk->lm;
38 struct pblk_emeta *emeta = line->emeta;
39 struct line_emeta *emeta_buf = emeta->buf;
41 u64 data_start, data_end;
42 u64 nr_valid_lbas, nr_lbas = 0;
45 lba_list = emeta_to_lbas(pblk, emeta_buf);
49 data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
50 data_end = line->emeta_ssec;
51 nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
53 for (i = data_start; i < data_end; i++) {
57 ppa = addr_to_gen_ppa(pblk, i, line->id);
58 pos = pblk_ppa_to_pos(geo, ppa);
60 /* Do not update bad blocks */
61 if (test_bit(pos, line->blk_bitmap))
64 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
65 spin_lock(&line->lock);
66 if (test_and_set_bit(i, line->invalid_bitmap))
67 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
69 le32_add_cpu(line->vsc, -1);
70 spin_unlock(&line->lock);
75 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
79 if (nr_valid_lbas != nr_lbas)
80 pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n",
81 line->id, nr_valid_lbas, nr_lbas);
88 static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
90 struct nvm_tgt_dev *dev = pblk->dev;
91 struct nvm_geo *geo = &dev->geo;
92 struct pblk_line_meta *lm = &pblk->lm;
93 int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
95 return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
99 struct pblk_recov_alloc {
100 struct ppa_addr *ppa_list;
101 struct pblk_sec_meta *meta_list;
104 dma_addr_t dma_ppa_list;
105 dma_addr_t dma_meta_list;
108 static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
109 struct pblk_recov_alloc p, u64 r_ptr)
111 struct nvm_tgt_dev *dev = pblk->dev;
112 struct nvm_geo *geo = &dev->geo;
113 struct ppa_addr *ppa_list;
114 struct pblk_sec_meta *meta_list;
118 dma_addr_t dma_ppa_list, dma_meta_list;
125 ppa_list = p.ppa_list;
126 meta_list = p.meta_list;
129 dma_ppa_list = p.dma_ppa_list;
130 dma_meta_list = p.dma_meta_list;
132 left_ppas = line->cur_sec - r_ptr;
139 memset(rqd, 0, pblk_g_rq_size);
141 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
143 rq_ppas = pblk->min_write_pgs;
144 rq_len = rq_ppas * geo->csecs;
146 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
150 bio->bi_iter.bi_sector = 0; /* internal bio */
151 bio_set_op_attrs(bio, REQ_OP_READ, 0);
154 rqd->opcode = NVM_OP_PREAD;
155 rqd->meta_list = meta_list;
156 rqd->nr_ppas = rq_ppas;
157 rqd->ppa_list = ppa_list;
158 rqd->dma_ppa_list = dma_ppa_list;
159 rqd->dma_meta_list = dma_meta_list;
161 if (pblk_io_aligned(pblk, rq_ppas))
162 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
164 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
166 for (i = 0; i < rqd->nr_ppas; ) {
170 ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
171 pos = pblk_ppa_to_pos(geo, ppa);
173 while (test_bit(pos, line->blk_bitmap)) {
174 r_ptr_int += pblk->min_write_pgs;
175 ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
176 pos = pblk_ppa_to_pos(geo, ppa);
179 for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
181 addr_to_gen_ppa(pblk, r_ptr_int, line->id);
184 /* If read fails, more padding is needed */
185 ret = pblk_submit_io_sync(pblk, rqd);
187 pblk_err(pblk, "I/O submission failed: %d\n", ret);
191 atomic_dec(&pblk->inflight_io);
193 /* At this point, the read should not fail. If it does, it is a problem
194 * we cannot recover from here. Need FTL log.
196 if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
197 pblk_err(pblk, "L2P recovery failed (%d)\n", rqd->error);
201 for (i = 0; i < rqd->nr_ppas; i++) {
202 u64 lba = le64_to_cpu(meta_list[i].lba);
204 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
207 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
210 left_ppas -= rq_ppas;
217 static void pblk_recov_complete(struct kref *ref)
219 struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
221 complete(&pad_rq->wait);
224 static void pblk_end_io_recov(struct nvm_rq *rqd)
226 struct pblk_pad_rq *pad_rq = rqd->private;
227 struct pblk *pblk = pad_rq->pblk;
229 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
231 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
233 atomic_dec(&pblk->inflight_io);
234 kref_put(&pad_rq->ref, pblk_recov_complete);
237 static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
240 struct nvm_tgt_dev *dev = pblk->dev;
241 struct nvm_geo *geo = &dev->geo;
242 struct ppa_addr *ppa_list;
243 struct pblk_sec_meta *meta_list;
244 struct pblk_pad_rq *pad_rq;
248 dma_addr_t dma_ppa_list, dma_meta_list;
249 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
250 u64 w_ptr = line->cur_sec;
251 int left_line_ppas, rq_ppas, rq_len;
255 spin_lock(&line->lock);
256 left_line_ppas = line->left_msecs;
257 spin_unlock(&line->lock);
259 pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
263 data = vzalloc(array_size(pblk->max_write_pgs, geo->csecs));
270 init_completion(&pad_rq->wait);
271 kref_init(&pad_rq->ref);
274 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
275 if (rq_ppas < pblk->min_write_pgs) {
276 pblk_err(pblk, "corrupted pad line %d\n", line->id);
280 rq_len = rq_ppas * geo->csecs;
282 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
288 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
289 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
291 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
292 PBLK_VMALLOC_META, GFP_KERNEL);
298 bio->bi_iter.bi_sector = 0; /* internal bio */
299 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
301 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
304 rqd->opcode = NVM_OP_PWRITE;
305 rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
306 rqd->meta_list = meta_list;
307 rqd->nr_ppas = rq_ppas;
308 rqd->ppa_list = ppa_list;
309 rqd->dma_ppa_list = dma_ppa_list;
310 rqd->dma_meta_list = dma_meta_list;
311 rqd->end_io = pblk_end_io_recov;
312 rqd->private = pad_rq;
314 for (i = 0; i < rqd->nr_ppas; ) {
318 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
319 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
320 pos = pblk_ppa_to_pos(geo, ppa);
322 while (test_bit(pos, line->blk_bitmap)) {
323 w_ptr += pblk->min_write_pgs;
324 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
325 pos = pblk_ppa_to_pos(geo, ppa);
328 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
329 struct ppa_addr dev_ppa;
330 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
332 dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
334 pblk_map_invalidate(pblk, dev_ppa);
335 lba_list[w_ptr] = meta_list[i].lba = addr_empty;
336 rqd->ppa_list[i] = dev_ppa;
340 kref_get(&pad_rq->ref);
341 pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
343 ret = pblk_submit_io(pblk, rqd);
345 pblk_err(pblk, "I/O submission failed: %d\n", ret);
346 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
350 left_line_ppas -= rq_ppas;
351 left_ppas -= rq_ppas;
352 if (left_ppas && left_line_ppas)
355 kref_put(&pad_rq->ref, pblk_recov_complete);
357 if (!wait_for_completion_io_timeout(&pad_rq->wait,
358 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
359 pblk_err(pblk, "pad write timed out\n");
363 if (!pblk_line_is_full(line))
364 pblk_err(pblk, "corrupted padded line: %d\n", line->id);
374 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
381 /* When this function is called, it means that not all upper pages have been
382 * written in a page that contains valid data. In order to recover this data, we
383 * first find the write pointer on the device, then we pad all necessary
384 * sectors, and finally attempt to read the valid data
386 static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
387 struct pblk_recov_alloc p)
389 struct nvm_tgt_dev *dev = pblk->dev;
390 struct nvm_geo *geo = &dev->geo;
391 struct ppa_addr *ppa_list;
392 struct pblk_sec_meta *meta_list;
396 dma_addr_t dma_ppa_list, dma_meta_list;
397 u64 w_ptr = 0, r_ptr;
402 int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
404 ppa_list = p.ppa_list;
405 meta_list = p.meta_list;
408 dma_ppa_list = p.dma_ppa_list;
409 dma_meta_list = p.dma_meta_list;
411 /* we could recover up until the line write pointer */
412 r_ptr = line->cur_sec;
416 memset(rqd, 0, pblk_g_rq_size);
418 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
420 rq_ppas = pblk->min_write_pgs;
421 rq_len = rq_ppas * geo->csecs;
423 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
427 bio->bi_iter.bi_sector = 0; /* internal bio */
428 bio_set_op_attrs(bio, REQ_OP_READ, 0);
431 rqd->opcode = NVM_OP_PREAD;
432 rqd->meta_list = meta_list;
433 rqd->nr_ppas = rq_ppas;
434 rqd->ppa_list = ppa_list;
435 rqd->dma_ppa_list = dma_ppa_list;
436 rqd->dma_meta_list = dma_meta_list;
438 if (pblk_io_aligned(pblk, rq_ppas))
439 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
441 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
443 for (i = 0; i < rqd->nr_ppas; ) {
447 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
448 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
449 pos = pblk_ppa_to_pos(geo, ppa);
451 while (test_bit(pos, line->blk_bitmap)) {
452 w_ptr += pblk->min_write_pgs;
453 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
454 pos = pblk_ppa_to_pos(geo, ppa);
457 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
459 addr_to_gen_ppa(pblk, w_ptr, line->id);
462 ret = pblk_submit_io_sync(pblk, rqd);
464 pblk_err(pblk, "I/O submission failed: %d\n", ret);
468 atomic_dec(&pblk->inflight_io);
470 /* This should not happen since the read failed during normal recovery,
471 * but the media works funny sometimes...
473 if (!rec_round++ && !rqd->error) {
475 for (i = 0; i < rqd->nr_ppas; i++, r_ptr++) {
476 u64 lba = le64_to_cpu(meta_list[i].lba);
478 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
481 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
485 /* Reached the end of the written line */
486 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
487 int pad_secs, nr_error_bits, bit;
490 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
491 nr_error_bits = rqd->nr_ppas - bit;
493 /* Roll back failed sectors */
494 line->cur_sec -= nr_error_bits;
495 line->left_msecs += nr_error_bits;
496 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
498 pad_secs = pblk_pad_distance(pblk);
499 if (pad_secs > line->left_msecs)
500 pad_secs = line->left_msecs;
502 ret = pblk_recov_pad_oob(pblk, line, pad_secs);
504 pblk_err(pblk, "OOB padding failed (err:%d)\n", ret);
506 ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
508 pblk_err(pblk, "OOB read failed (err:%d)\n", ret);
513 left_ppas -= rq_ppas;
520 static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
521 struct pblk_recov_alloc p, int *done)
523 struct nvm_tgt_dev *dev = pblk->dev;
524 struct nvm_geo *geo = &dev->geo;
525 struct ppa_addr *ppa_list;
526 struct pblk_sec_meta *meta_list;
530 dma_addr_t dma_ppa_list, dma_meta_list;
535 int left_ppas = pblk_calc_sec_in_line(pblk, line);
537 ppa_list = p.ppa_list;
538 meta_list = p.meta_list;
541 dma_ppa_list = p.dma_ppa_list;
542 dma_meta_list = p.dma_meta_list;
547 memset(rqd, 0, pblk_g_rq_size);
549 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
551 rq_ppas = pblk->min_write_pgs;
552 rq_len = rq_ppas * geo->csecs;
554 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
558 bio->bi_iter.bi_sector = 0; /* internal bio */
559 bio_set_op_attrs(bio, REQ_OP_READ, 0);
562 rqd->opcode = NVM_OP_PREAD;
563 rqd->meta_list = meta_list;
564 rqd->nr_ppas = rq_ppas;
565 rqd->ppa_list = ppa_list;
566 rqd->dma_ppa_list = dma_ppa_list;
567 rqd->dma_meta_list = dma_meta_list;
569 if (pblk_io_aligned(pblk, rq_ppas))
570 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
572 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
574 for (i = 0; i < rqd->nr_ppas; ) {
578 paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
579 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
580 pos = pblk_ppa_to_pos(geo, ppa);
582 while (test_bit(pos, line->blk_bitmap)) {
583 paddr += pblk->min_write_pgs;
584 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
585 pos = pblk_ppa_to_pos(geo, ppa);
588 for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
590 addr_to_gen_ppa(pblk, paddr, line->id);
593 ret = pblk_submit_io_sync(pblk, rqd);
595 pblk_err(pblk, "I/O submission failed: %d\n", ret);
600 atomic_dec(&pblk->inflight_io);
602 /* Reached the end of the written line */
604 int nr_error_bits, bit;
606 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
607 nr_error_bits = rqd->nr_ppas - bit;
609 /* Roll back failed sectors */
610 line->cur_sec -= nr_error_bits;
611 line->left_msecs += nr_error_bits;
612 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
617 if (rqd->error != NVM_RSP_ERR_EMPTYPAGE)
621 for (i = 0; i < rqd->nr_ppas; i++) {
622 u64 lba = le64_to_cpu(meta_list[i].lba);
624 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
627 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
630 left_ppas -= rq_ppas;
637 /* Scan line for lbas on out of bound area */
638 static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
640 struct nvm_tgt_dev *dev = pblk->dev;
641 struct nvm_geo *geo = &dev->geo;
643 struct ppa_addr *ppa_list;
644 struct pblk_sec_meta *meta_list;
645 struct pblk_recov_alloc p;
647 dma_addr_t dma_ppa_list, dma_meta_list;
650 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
654 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
655 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
657 data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
663 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
665 p.ppa_list = ppa_list;
666 p.meta_list = meta_list;
669 p.dma_ppa_list = dma_ppa_list;
670 p.dma_meta_list = dma_meta_list;
672 ret = pblk_recov_scan_oob(pblk, line, p, &done);
674 pblk_err(pblk, "could not recover L2P from OOB\n");
679 ret = pblk_recov_scan_all_oob(pblk, line, p);
681 pblk_err(pblk, "could not recover L2P from OOB\n");
686 if (pblk_line_is_full(line))
687 pblk_line_recov_close(pblk, line);
692 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
697 /* Insert lines ordered by sequence number (seq_num) on list */
698 static void pblk_recov_line_add_ordered(struct list_head *head,
699 struct pblk_line *line)
701 struct pblk_line *t = NULL;
703 list_for_each_entry(t, head, list)
704 if (t->seq_nr > line->seq_nr)
707 __list_add(&line->list, t->list.prev, &t->list);
710 static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
712 struct nvm_tgt_dev *dev = pblk->dev;
713 struct nvm_geo *geo = &dev->geo;
714 struct pblk_line_meta *lm = &pblk->lm;
715 unsigned int emeta_secs;
720 emeta_secs = lm->emeta_sec[0];
721 emeta_start = lm->sec_per_line;
725 ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
726 pos = pblk_ppa_to_pos(geo, ppa);
727 if (!test_bit(pos, line->blk_bitmap))
734 static int pblk_recov_check_line_version(struct pblk *pblk,
735 struct line_emeta *emeta)
737 struct line_header *header = &emeta->header;
739 if (header->version_major != EMETA_VERSION_MAJOR) {
740 pblk_err(pblk, "line major version mismatch: %d, expected: %d\n",
741 header->version_major, EMETA_VERSION_MAJOR);
745 #ifdef CONFIG_NVM_PBLK_DEBUG
746 if (header->version_minor > EMETA_VERSION_MINOR)
747 pblk_info(pblk, "newer line minor version found: %d\n",
748 header->version_minor);
754 static void pblk_recov_wa_counters(struct pblk *pblk,
755 struct line_emeta *emeta)
757 struct pblk_line_meta *lm = &pblk->lm;
758 struct line_header *header = &emeta->header;
759 struct wa_counters *wa = emeta_to_wa(lm, emeta);
761 /* WA counters were introduced in emeta version 0.2 */
762 if (header->version_major > 0 || header->version_minor >= 2) {
763 u64 user = le64_to_cpu(wa->user);
764 u64 pad = le64_to_cpu(wa->pad);
765 u64 gc = le64_to_cpu(wa->gc);
767 atomic64_set(&pblk->user_wa, user);
768 atomic64_set(&pblk->pad_wa, pad);
769 atomic64_set(&pblk->gc_wa, gc);
771 pblk->user_rst_wa = user;
772 pblk->pad_rst_wa = pad;
773 pblk->gc_rst_wa = gc;
777 static int pblk_line_was_written(struct pblk_line *line,
781 struct pblk_line_meta *lm = &pblk->lm;
782 struct nvm_tgt_dev *dev = pblk->dev;
783 struct nvm_geo *geo = &dev->geo;
784 struct nvm_chk_meta *chunk;
785 struct ppa_addr bppa;
788 if (line->state == PBLK_LINESTATE_BAD)
791 smeta_blk = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
792 if (smeta_blk >= lm->blk_per_line)
795 bppa = pblk->luns[smeta_blk].bppa;
796 chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
798 if (chunk->state & NVM_CHK_ST_FREE)
804 struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
806 struct pblk_line_meta *lm = &pblk->lm;
807 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
808 struct pblk_line *line, *tline, *data_line = NULL;
809 struct pblk_smeta *smeta;
810 struct pblk_emeta *emeta;
811 struct line_smeta *smeta_buf;
812 int found_lines = 0, recovered_lines = 0, open_lines = 0;
815 int i, valid_uuid = 0;
816 LIST_HEAD(recov_list);
818 /* TODO: Implement FTL snapshot */
820 /* Scan recovery - takes place when FTL snapshot fails */
821 spin_lock(&l_mg->free_lock);
822 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
823 set_bit(meta_line, &l_mg->meta_bitmap);
824 smeta = l_mg->sline_meta[meta_line];
825 emeta = l_mg->eline_meta[meta_line];
826 smeta_buf = (struct line_smeta *)smeta;
827 spin_unlock(&l_mg->free_lock);
829 /* Order data lines using their sequence number */
830 for (i = 0; i < l_mg->nr_lines; i++) {
833 line = &pblk->lines[i];
835 memset(smeta, 0, lm->smeta_len);
837 line->lun_bitmap = ((void *)(smeta_buf)) +
838 sizeof(struct line_smeta);
840 if (!pblk_line_was_written(line, pblk))
843 /* Lines that cannot be read are assumed as not written here */
844 if (pblk_line_read_smeta(pblk, line))
847 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
848 if (le32_to_cpu(smeta_buf->crc) != crc)
851 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
854 if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) {
855 pblk_err(pblk, "found incompatible line version %u\n",
856 smeta_buf->header.version_major);
857 return ERR_PTR(-EINVAL);
860 /* The first valid instance uuid is used for initialization */
862 memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
866 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
867 pblk_debug(pblk, "ignore line %u due to uuid mismatch\n",
872 /* Update line metadata */
873 spin_lock(&line->lock);
874 line->id = le32_to_cpu(smeta_buf->header.id);
875 line->type = le16_to_cpu(smeta_buf->header.type);
876 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
877 spin_unlock(&line->lock);
879 /* Update general metadata */
880 spin_lock(&l_mg->free_lock);
881 if (line->seq_nr >= l_mg->d_seq_nr)
882 l_mg->d_seq_nr = line->seq_nr + 1;
883 l_mg->nr_free_lines--;
884 spin_unlock(&l_mg->free_lock);
886 if (pblk_line_recov_alloc(pblk, line))
889 pblk_recov_line_add_ordered(&recov_list, line);
891 pblk_debug(pblk, "recovering data line %d, seq:%llu\n",
892 line->id, smeta_buf->seq_nr);
896 pblk_setup_uuid(pblk);
898 spin_lock(&l_mg->free_lock);
899 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
900 &l_mg->meta_bitmap));
901 spin_unlock(&l_mg->free_lock);
906 /* Verify closed blocks and recover this portion of L2P table*/
907 list_for_each_entry_safe(line, tline, &recov_list, list) {
910 line->emeta_ssec = pblk_line_emeta_start(pblk, line);
912 memset(line->emeta->buf, 0, lm->emeta_len[0]);
914 if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
915 pblk_recov_l2p_from_oob(pblk, line);
919 if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
920 pblk_recov_l2p_from_oob(pblk, line);
924 if (pblk_recov_check_line_version(pblk, line->emeta->buf))
925 return ERR_PTR(-EINVAL);
927 pblk_recov_wa_counters(pblk, line->emeta->buf);
929 if (pblk_recov_l2p_from_emeta(pblk, line))
930 pblk_recov_l2p_from_oob(pblk, line);
933 if (pblk_line_is_full(line)) {
934 struct list_head *move_list;
936 spin_lock(&line->lock);
937 line->state = PBLK_LINESTATE_CLOSED;
938 move_list = pblk_line_gc_list(pblk, line);
939 spin_unlock(&line->lock);
941 spin_lock(&l_mg->gc_lock);
942 list_move_tail(&line->list, move_list);
943 spin_unlock(&l_mg->gc_lock);
945 kfree(line->map_bitmap);
946 line->map_bitmap = NULL;
951 pblk_err(pblk, "failed to recover L2P\n");
954 line->meta_line = meta_line;
959 spin_lock(&l_mg->free_lock);
961 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
962 &l_mg->meta_bitmap));
963 pblk_line_replace_data(pblk);
965 /* Allocate next line for preparation */
966 l_mg->data_next = pblk_line_get(pblk);
967 if (l_mg->data_next) {
968 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
969 l_mg->data_next->type = PBLK_LINETYPE_DATA;
973 spin_unlock(&l_mg->free_lock);
976 pblk_line_erase(pblk, l_mg->data_next);
979 if (found_lines != recovered_lines)
980 pblk_err(pblk, "failed to recover all found lines %d/%d\n",
981 found_lines, recovered_lines);
989 int pblk_recov_pad(struct pblk *pblk)
991 struct pblk_line *line;
992 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
996 spin_lock(&l_mg->free_lock);
997 line = l_mg->data_line;
998 left_msecs = line->left_msecs;
999 spin_unlock(&l_mg->free_lock);
1001 ret = pblk_recov_pad_oob(pblk, line, left_msecs);
1003 pblk_err(pblk, "tear down padding failed (%d)\n", ret);
1007 pblk_line_close_meta(pblk, line);