2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
21 static void pblk_line_mark_bb(struct work_struct *work)
23 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
25 struct pblk *pblk = line_ws->pblk;
26 struct nvm_tgt_dev *dev = pblk->dev;
27 struct ppa_addr *ppa = line_ws->priv;
30 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
32 struct pblk_line *line;
35 line = pblk_ppa_to_line(pblk, *ppa);
36 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
38 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
43 mempool_free(line_ws, &pblk->gen_ws_pool);
46 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47 struct ppa_addr ppa_addr)
49 struct nvm_tgt_dev *dev = pblk->dev;
50 struct nvm_geo *geo = &dev->geo;
52 int pos = pblk_ppa_to_pos(geo, ppa_addr);
54 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
55 atomic_long_inc(&pblk->erase_failed);
57 atomic_dec(&line->blk_in_line);
58 if (test_and_set_bit(pos, line->blk_bitmap))
59 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
62 /* Not necessary to mark bad blocks on 2.0 spec. */
63 if (geo->version == NVM_OCSSD_SPEC_20)
66 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
72 GFP_ATOMIC, pblk->bb_wq);
75 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
77 struct nvm_tgt_dev *dev = pblk->dev;
78 struct nvm_geo *geo = &dev->geo;
79 struct nvm_chk_meta *chunk;
80 struct pblk_line *line;
83 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
84 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85 chunk = &line->chks[pos];
87 atomic_dec(&line->left_seblks);
90 chunk->state = NVM_CHK_ST_OFFLINE;
91 pblk_mark_bb(pblk, line, rqd->ppa_addr);
93 chunk->state = NVM_CHK_ST_FREE;
96 atomic_dec(&pblk->inflight_io);
99 /* Erase completion assumes that only one block is erased at the time */
100 static void pblk_end_io_erase(struct nvm_rq *rqd)
102 struct pblk *pblk = rqd->private;
104 __pblk_end_io_erase(pblk, rqd);
105 mempool_free(rqd, &pblk->e_rq_pool);
109 * Get information for all chunks from the device.
111 * The caller is responsible for freeing the returned structure
113 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
115 struct nvm_tgt_dev *dev = pblk->dev;
116 struct nvm_geo *geo = &dev->geo;
117 struct nvm_chk_meta *meta;
124 len = geo->all_chunks * sizeof(*meta);
125 meta = kzalloc(len, GFP_KERNEL);
127 return ERR_PTR(-ENOMEM);
129 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
132 return ERR_PTR(-EIO);
138 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139 struct nvm_chk_meta *meta,
142 struct nvm_tgt_dev *dev = pblk->dev;
143 struct nvm_geo *geo = &dev->geo;
144 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145 int lun_off = ppa.m.pu * geo->num_chk;
146 int chk_off = ppa.m.chk;
148 return meta + ch_off + lun_off + chk_off;
151 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
154 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
155 struct list_head *move_list = NULL;
157 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158 * table is modified with reclaimed sectors, a check is done to endure
159 * that newer updates are not overwritten.
161 spin_lock(&line->lock);
162 WARN_ON(line->state == PBLK_LINESTATE_FREE);
164 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
165 WARN_ONCE(1, "pblk: double invalidate\n");
166 spin_unlock(&line->lock);
169 le32_add_cpu(line->vsc, -1);
171 if (line->state == PBLK_LINESTATE_CLOSED)
172 move_list = pblk_line_gc_list(pblk, line);
173 spin_unlock(&line->lock);
176 spin_lock(&l_mg->gc_lock);
177 spin_lock(&line->lock);
178 /* Prevent moving a line that has just been chosen for GC */
179 if (line->state == PBLK_LINESTATE_GC) {
180 spin_unlock(&line->lock);
181 spin_unlock(&l_mg->gc_lock);
184 spin_unlock(&line->lock);
186 list_move_tail(&line->list, move_list);
187 spin_unlock(&l_mg->gc_lock);
191 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
193 struct pblk_line *line;
196 #ifdef CONFIG_NVM_PBLK_DEBUG
197 /* Callers must ensure that the ppa points to a device address */
198 BUG_ON(pblk_addr_in_cache(ppa));
199 BUG_ON(pblk_ppa_empty(ppa));
202 line = pblk_ppa_to_line(pblk, ppa);
203 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
205 __pblk_map_invalidate(pblk, line, paddr);
208 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
209 unsigned int nr_secs)
213 spin_lock(&pblk->trans_lock);
214 for (lba = slba; lba < slba + nr_secs; lba++) {
217 ppa = pblk_trans_map_get(pblk, lba);
219 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
220 pblk_map_invalidate(pblk, ppa);
222 pblk_ppa_set_empty(&ppa);
223 pblk_trans_map_set(pblk, lba, ppa);
225 spin_unlock(&pblk->trans_lock);
228 /* Caller must guarantee that the request is a valid type */
229 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
238 pool = &pblk->w_rq_pool;
239 rq_size = pblk_w_rq_size;
242 pool = &pblk->r_rq_pool;
243 rq_size = pblk_g_rq_size;
246 pool = &pblk->e_rq_pool;
247 rq_size = pblk_g_rq_size;
250 rqd = mempool_alloc(pool, GFP_KERNEL);
251 memset(rqd, 0, rq_size);
256 /* Typically used on completion path. Cannot guarantee request consistency */
257 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
259 struct nvm_tgt_dev *dev = pblk->dev;
264 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
267 pool = &pblk->w_rq_pool;
270 pool = &pblk->r_rq_pool;
273 pool = &pblk->e_rq_pool;
276 pblk_err(pblk, "trying to free unknown rqd type\n");
281 nvm_dev_dma_free(dev->parent, rqd->meta_list,
283 mempool_free(rqd, pool);
286 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
292 WARN_ON(off + nr_pages != bio->bi_vcnt);
294 for (i = off; i < nr_pages + off; i++) {
295 bv = bio->bi_io_vec[i];
296 mempool_free(bv.bv_page, &pblk->page_bio_pool);
300 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
303 struct request_queue *q = pblk->dev->q;
307 for (i = 0; i < nr_pages; i++) {
308 page = mempool_alloc(&pblk->page_bio_pool, flags);
310 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
311 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
312 pblk_err(pblk, "could not add page to bio\n");
313 mempool_free(page, &pblk->page_bio_pool);
320 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
324 void pblk_write_kick(struct pblk *pblk)
326 wake_up_process(pblk->writer_ts);
327 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
330 void pblk_write_timer_fn(struct timer_list *t)
332 struct pblk *pblk = from_timer(pblk, t, wtimer);
334 /* kick the write thread every tick to flush outstanding data */
335 pblk_write_kick(pblk);
338 void pblk_write_should_kick(struct pblk *pblk)
340 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
342 if (secs_avail >= pblk->min_write_pgs)
343 pblk_write_kick(pblk);
346 static void pblk_wait_for_meta(struct pblk *pblk)
349 if (!atomic_read(&pblk->inflight_io))
356 static void pblk_flush_writer(struct pblk *pblk)
358 pblk_rb_flush(&pblk->rwb);
360 if (!pblk_rb_sync_count(&pblk->rwb))
363 pblk_write_kick(pblk);
368 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
370 struct pblk_line_meta *lm = &pblk->lm;
371 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372 struct list_head *move_list = NULL;
373 int vsc = le32_to_cpu(*line->vsc);
375 lockdep_assert_held(&line->lock);
377 if (line->w_err_gc->has_write_err) {
378 if (line->gc_group != PBLK_LINEGC_WERR) {
379 line->gc_group = PBLK_LINEGC_WERR;
380 move_list = &l_mg->gc_werr_list;
381 pblk_rl_werr_line_in(&pblk->rl);
384 if (line->gc_group != PBLK_LINEGC_FULL) {
385 line->gc_group = PBLK_LINEGC_FULL;
386 move_list = &l_mg->gc_full_list;
388 } else if (vsc < lm->high_thrs) {
389 if (line->gc_group != PBLK_LINEGC_HIGH) {
390 line->gc_group = PBLK_LINEGC_HIGH;
391 move_list = &l_mg->gc_high_list;
393 } else if (vsc < lm->mid_thrs) {
394 if (line->gc_group != PBLK_LINEGC_MID) {
395 line->gc_group = PBLK_LINEGC_MID;
396 move_list = &l_mg->gc_mid_list;
398 } else if (vsc < line->sec_in_line) {
399 if (line->gc_group != PBLK_LINEGC_LOW) {
400 line->gc_group = PBLK_LINEGC_LOW;
401 move_list = &l_mg->gc_low_list;
403 } else if (vsc == line->sec_in_line) {
404 if (line->gc_group != PBLK_LINEGC_EMPTY) {
405 line->gc_group = PBLK_LINEGC_EMPTY;
406 move_list = &l_mg->gc_empty_list;
409 line->state = PBLK_LINESTATE_CORRUPT;
410 line->gc_group = PBLK_LINEGC_NONE;
411 move_list = &l_mg->corrupt_list;
412 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
415 lm->high_thrs, lm->mid_thrs);
421 void pblk_discard(struct pblk *pblk, struct bio *bio)
423 sector_t slba = pblk_get_lba(bio);
424 sector_t nr_secs = pblk_get_secs(bio);
426 pblk_invalidate_range(pblk, slba, nr_secs);
429 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
431 atomic_long_inc(&pblk->write_failed);
432 #ifdef CONFIG_NVM_PBLK_DEBUG
433 pblk_print_failed_rqd(pblk, rqd, rqd->error);
437 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
439 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
440 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
441 atomic_long_inc(&pblk->read_empty);
445 switch (rqd->error) {
446 case NVM_RSP_WARN_HIGHECC:
447 atomic_long_inc(&pblk->read_high_ecc);
449 case NVM_RSP_ERR_FAILECC:
450 case NVM_RSP_ERR_FAILCRC:
451 atomic_long_inc(&pblk->read_failed);
454 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
456 #ifdef CONFIG_NVM_PBLK_DEBUG
457 pblk_print_failed_rqd(pblk, rqd, rqd->error);
461 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
463 pblk->sec_per_write = sec_per_write;
466 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
468 struct nvm_tgt_dev *dev = pblk->dev;
470 atomic_inc(&pblk->inflight_io);
472 #ifdef CONFIG_NVM_PBLK_DEBUG
473 if (pblk_check_io(pblk, rqd))
477 return nvm_submit_io(dev, rqd);
480 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
482 struct nvm_tgt_dev *dev = pblk->dev;
484 atomic_inc(&pblk->inflight_io);
486 #ifdef CONFIG_NVM_PBLK_DEBUG
487 if (pblk_check_io(pblk, rqd))
491 return nvm_submit_io_sync(dev, rqd);
494 static void pblk_bio_map_addr_endio(struct bio *bio)
499 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
500 unsigned int nr_secs, unsigned int len,
501 int alloc_type, gfp_t gfp_mask)
503 struct nvm_tgt_dev *dev = pblk->dev;
509 if (alloc_type == PBLK_KMALLOC_META)
510 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
512 bio = bio_kmalloc(gfp_mask, nr_secs);
514 return ERR_PTR(-ENOMEM);
516 for (i = 0; i < nr_secs; i++) {
517 page = vmalloc_to_page(kaddr);
519 pblk_err(pblk, "could not map vmalloc bio\n");
521 bio = ERR_PTR(-ENOMEM);
525 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
526 if (ret != PAGE_SIZE) {
527 pblk_err(pblk, "could not add page to bio\n");
529 bio = ERR_PTR(-ENOMEM);
536 bio->bi_end_io = pblk_bio_map_addr_endio;
541 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
542 unsigned long secs_to_flush)
544 int max = pblk->sec_per_write;
545 int min = pblk->min_write_pgs;
546 int secs_to_sync = 0;
548 if (secs_avail >= max)
550 else if (secs_avail >= min)
551 secs_to_sync = min * (secs_avail / min);
552 else if (secs_to_flush)
558 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
563 spin_lock(&line->lock);
564 addr = find_next_zero_bit(line->map_bitmap,
565 pblk->lm.sec_per_line, line->cur_sec);
566 line->cur_sec = addr - nr_secs;
568 for (i = 0; i < nr_secs; i++, line->cur_sec--)
569 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
570 spin_unlock(&line->lock);
573 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
578 lockdep_assert_held(&line->lock);
580 /* logic error: ppa out-of-bounds. Prevent generating bad address */
581 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
582 WARN(1, "pblk: page allocation out of bounds\n");
583 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
586 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
587 pblk->lm.sec_per_line, line->cur_sec);
588 for (i = 0; i < nr_secs; i++, line->cur_sec++)
589 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
594 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
598 /* Lock needed in case a write fails and a recovery needs to remap
599 * failed write buffer entries
601 spin_lock(&line->lock);
602 addr = __pblk_alloc_page(pblk, line, nr_secs);
603 line->left_msecs -= nr_secs;
604 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
605 spin_unlock(&line->lock);
610 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
614 spin_lock(&line->lock);
615 paddr = find_next_zero_bit(line->map_bitmap,
616 pblk->lm.sec_per_line, line->cur_sec);
617 spin_unlock(&line->lock);
623 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
624 * taking the per LUN semaphore.
626 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
627 void *emeta_buf, u64 paddr, int dir)
629 struct nvm_tgt_dev *dev = pblk->dev;
630 struct nvm_geo *geo = &dev->geo;
631 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
632 struct pblk_line_meta *lm = &pblk->lm;
633 void *ppa_list, *meta_list;
636 dma_addr_t dma_ppa_list, dma_meta_list;
637 int min = pblk->min_write_pgs;
638 int left_ppas = lm->emeta_sec[0];
645 if (dir == PBLK_WRITE) {
646 bio_op = REQ_OP_WRITE;
647 cmd_op = NVM_OP_PWRITE;
648 } else if (dir == PBLK_READ) {
649 bio_op = REQ_OP_READ;
650 cmd_op = NVM_OP_PREAD;
654 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
659 ppa_list = meta_list + pblk_dma_meta_size;
660 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
663 memset(&rqd, 0, sizeof(struct nvm_rq));
665 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
666 rq_len = rq_ppas * geo->csecs;
668 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
669 l_mg->emeta_alloc_type, GFP_KERNEL);
675 bio->bi_iter.bi_sector = 0; /* internal bio */
676 bio_set_op_attrs(bio, bio_op, 0);
679 rqd.meta_list = meta_list;
680 rqd.ppa_list = ppa_list;
681 rqd.dma_meta_list = dma_meta_list;
682 rqd.dma_ppa_list = dma_ppa_list;
684 rqd.nr_ppas = rq_ppas;
686 if (dir == PBLK_WRITE) {
687 struct pblk_sec_meta *meta_list = rqd.meta_list;
690 for (i = 0; i < rqd.nr_ppas; ) {
691 spin_lock(&line->lock);
692 paddr = __pblk_alloc_page(pblk, line, min);
693 spin_unlock(&line->lock);
694 for (j = 0; j < min; j++, i++, paddr++) {
695 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
697 addr_to_gen_ppa(pblk, paddr, id);
701 for (i = 0; i < rqd.nr_ppas; ) {
702 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
703 int pos = pblk_ppa_to_pos(geo, ppa);
705 if (pblk_io_aligned(pblk, rq_ppas))
708 while (test_bit(pos, line->blk_bitmap)) {
710 if (pblk_boundary_paddr_checks(pblk, paddr)) {
711 pblk_err(pblk, "corrupt emeta line:%d\n",
718 ppa = addr_to_gen_ppa(pblk, paddr, id);
719 pos = pblk_ppa_to_pos(geo, ppa);
722 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
723 pblk_err(pblk, "corrupt emeta line:%d\n",
730 for (j = 0; j < min; j++, i++, paddr++)
732 addr_to_gen_ppa(pblk, paddr, line->id);
736 ret = pblk_submit_io_sync(pblk, &rqd);
738 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
743 atomic_dec(&pblk->inflight_io);
746 if (dir == PBLK_WRITE)
747 pblk_log_write_err(pblk, &rqd);
749 pblk_log_read_err(pblk, &rqd);
753 left_ppas -= rq_ppas;
757 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
761 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
763 struct nvm_tgt_dev *dev = pblk->dev;
764 struct nvm_geo *geo = &dev->geo;
765 struct pblk_line_meta *lm = &pblk->lm;
768 /* This usually only happens on bad lines */
769 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
770 if (bit >= lm->blk_per_line)
773 return bit * geo->ws_opt;
776 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
779 struct nvm_tgt_dev *dev = pblk->dev;
780 struct pblk_line_meta *lm = &pblk->lm;
783 __le64 *lba_list = NULL;
787 if (dir == PBLK_WRITE) {
788 bio_op = REQ_OP_WRITE;
789 cmd_op = NVM_OP_PWRITE;
790 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
791 } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
792 bio_op = REQ_OP_READ;
793 cmd_op = NVM_OP_PREAD;
797 memset(&rqd, 0, sizeof(struct nvm_rq));
799 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
804 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
805 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
807 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
813 bio->bi_iter.bi_sector = 0; /* internal bio */
814 bio_set_op_attrs(bio, bio_op, 0);
819 rqd.nr_ppas = lm->smeta_sec;
821 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
822 struct pblk_sec_meta *meta_list = rqd.meta_list;
824 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
826 if (dir == PBLK_WRITE) {
827 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
829 meta_list[i].lba = lba_list[paddr] = addr_empty;
834 * This I/O is sent by the write thread when a line is replace. Since
835 * the write thread is the only one sending write and erase commands,
836 * there is no need to take the LUN semaphore.
838 ret = pblk_submit_io_sync(pblk, &rqd);
840 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
845 atomic_dec(&pblk->inflight_io);
848 if (dir == PBLK_WRITE) {
849 pblk_log_write_err(pblk, &rqd);
851 } else if (dir == PBLK_READ)
852 pblk_log_read_err(pblk, &rqd);
856 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
861 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
863 u64 bpaddr = pblk_line_smeta_start(pblk, line);
865 return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
868 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
871 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
872 line->emeta_ssec, PBLK_READ);
875 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
878 rqd->opcode = NVM_OP_ERASE;
885 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
887 struct nvm_rq rqd = {NULL};
890 pblk_setup_e_rq(pblk, &rqd, ppa);
892 /* The write thread schedules erases so that it minimizes disturbances
893 * with writes. Thus, there is no need to take the LUN semaphore.
895 ret = pblk_submit_io_sync(pblk, &rqd);
897 __pblk_end_io_erase(pblk, &rqd);
902 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
904 struct pblk_line_meta *lm = &pblk->lm;
908 /* Erase only good blocks, one at a time */
910 spin_lock(&line->lock);
911 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
913 if (bit >= lm->blk_per_line) {
914 spin_unlock(&line->lock);
918 ppa = pblk->luns[bit].bppa; /* set ch and lun */
919 ppa.a.blk = line->id;
921 atomic_dec(&line->left_eblks);
922 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
923 spin_unlock(&line->lock);
925 ret = pblk_blk_erase_sync(pblk, ppa);
927 pblk_err(pblk, "failed to erase line %d\n", line->id);
935 static void pblk_line_setup_metadata(struct pblk_line *line,
936 struct pblk_line_mgmt *l_mg,
937 struct pblk_line_meta *lm)
941 lockdep_assert_held(&l_mg->free_lock);
944 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
945 if (meta_line == PBLK_DATA_LINES) {
946 spin_unlock(&l_mg->free_lock);
948 spin_lock(&l_mg->free_lock);
952 set_bit(meta_line, &l_mg->meta_bitmap);
953 line->meta_line = meta_line;
955 line->smeta = l_mg->sline_meta[meta_line];
956 line->emeta = l_mg->eline_meta[meta_line];
958 memset(line->smeta, 0, lm->smeta_len);
959 memset(line->emeta->buf, 0, lm->emeta_len[0]);
961 line->emeta->mem = 0;
962 atomic_set(&line->emeta->sync, 0);
965 /* For now lines are always assumed full lines. Thus, smeta former and current
966 * lun bitmaps are omitted.
968 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
969 struct pblk_line *cur)
971 struct nvm_tgt_dev *dev = pblk->dev;
972 struct nvm_geo *geo = &dev->geo;
973 struct pblk_line_meta *lm = &pblk->lm;
974 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
975 struct pblk_emeta *emeta = line->emeta;
976 struct line_emeta *emeta_buf = emeta->buf;
977 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
980 /* After erasing the line, new bad blocks might appear and we risk
981 * having an invalid line
983 nr_blk_line = lm->blk_per_line -
984 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
985 if (nr_blk_line < lm->min_blk_line) {
986 spin_lock(&l_mg->free_lock);
987 spin_lock(&line->lock);
988 line->state = PBLK_LINESTATE_BAD;
989 spin_unlock(&line->lock);
991 list_add_tail(&line->list, &l_mg->bad_list);
992 spin_unlock(&l_mg->free_lock);
994 pblk_debug(pblk, "line %d is bad\n", line->id);
999 /* Run-time metadata */
1000 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1002 /* Mark LUNs allocated in this line (all for now) */
1003 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1005 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1006 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1007 smeta_buf->header.id = cpu_to_le32(line->id);
1008 smeta_buf->header.type = cpu_to_le16(line->type);
1009 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1010 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1012 /* Start metadata */
1013 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1014 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1016 /* Fill metadata among lines */
1018 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1019 smeta_buf->prev_id = cpu_to_le32(cur->id);
1020 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1022 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1025 /* All smeta must be set at this point */
1026 smeta_buf->header.crc = cpu_to_le32(
1027 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1028 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1031 memcpy(&emeta_buf->header, &smeta_buf->header,
1032 sizeof(struct line_header));
1034 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1035 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1036 emeta_buf->header.crc = cpu_to_le32(
1037 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1039 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1040 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1041 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1042 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1043 emeta_buf->crc = cpu_to_le32(0);
1044 emeta_buf->prev_id = smeta_buf->prev_id;
1049 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1051 struct pblk_line_meta *lm = &pblk->lm;
1052 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1054 line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1055 if (!line->map_bitmap)
1058 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1060 /* will be initialized using bb info from map_bitmap */
1061 line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1062 if (!line->invalid_bitmap) {
1063 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1064 line->map_bitmap = NULL;
1071 /* For now lines are always assumed full lines. Thus, smeta former and current
1072 * lun bitmaps are omitted.
1074 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1077 struct nvm_tgt_dev *dev = pblk->dev;
1078 struct nvm_geo *geo = &dev->geo;
1079 struct pblk_line_meta *lm = &pblk->lm;
1080 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1085 line->sec_in_line = lm->sec_per_line;
1087 /* Capture bad block information on line mapping bitmaps */
1088 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1089 bit + 1)) < lm->blk_per_line) {
1090 off = bit * geo->ws_opt;
1091 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1093 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1095 line->sec_in_line -= geo->clba;
1098 /* Mark smeta metadata sectors as bad sectors */
1099 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1100 off = bit * geo->ws_opt;
1101 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1102 line->sec_in_line -= lm->smeta_sec;
1103 line->smeta_ssec = off;
1104 line->cur_sec = off + lm->smeta_sec;
1106 if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1107 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1111 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1113 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1114 * blocks to make sure that there are enough sectors to store emeta
1116 emeta_secs = lm->emeta_sec[0];
1117 off = lm->sec_per_line;
1118 while (emeta_secs) {
1120 if (!test_bit(off, line->invalid_bitmap)) {
1121 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1122 emeta_secs -= geo->ws_opt;
1126 line->emeta_ssec = off;
1127 line->sec_in_line -= lm->emeta_sec[0];
1128 line->nr_valid_lbas = 0;
1129 line->left_msecs = line->sec_in_line;
1130 *line->vsc = cpu_to_le32(line->sec_in_line);
1132 if (lm->sec_per_line - line->sec_in_line !=
1133 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1134 spin_lock(&line->lock);
1135 line->state = PBLK_LINESTATE_BAD;
1136 spin_unlock(&line->lock);
1138 list_add_tail(&line->list, &l_mg->bad_list);
1139 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1147 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1149 struct pblk_line_meta *lm = &pblk->lm;
1150 struct nvm_tgt_dev *dev = pblk->dev;
1151 struct nvm_geo *geo = &dev->geo;
1152 int blk_to_erase = atomic_read(&line->blk_in_line);
1155 for (i = 0; i < lm->blk_per_line; i++) {
1156 struct pblk_lun *rlun = &pblk->luns[i];
1157 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1158 int state = line->chks[pos].state;
1160 /* Free chunks should not be erased */
1161 if (state & NVM_CHK_ST_FREE) {
1162 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1163 line->erase_bitmap);
1168 return blk_to_erase;
1171 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1173 struct pblk_line_meta *lm = &pblk->lm;
1174 int blk_in_line = atomic_read(&line->blk_in_line);
1177 /* Bad blocks do not need to be erased */
1178 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1180 spin_lock(&line->lock);
1182 /* If we have not written to this line, we need to mark up free chunks
1185 if (line->state == PBLK_LINESTATE_NEW) {
1186 blk_to_erase = pblk_prepare_new_line(pblk, line);
1187 line->state = PBLK_LINESTATE_FREE;
1189 blk_to_erase = blk_in_line;
1192 if (blk_in_line < lm->min_blk_line) {
1193 spin_unlock(&line->lock);
1197 if (line->state != PBLK_LINESTATE_FREE) {
1198 WARN(1, "pblk: corrupted line %d, state %d\n",
1199 line->id, line->state);
1200 spin_unlock(&line->lock);
1204 line->state = PBLK_LINESTATE_OPEN;
1206 atomic_set(&line->left_eblks, blk_to_erase);
1207 atomic_set(&line->left_seblks, blk_to_erase);
1209 line->meta_distance = lm->meta_distance;
1210 spin_unlock(&line->lock);
1212 kref_init(&line->ref);
1217 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1219 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1222 spin_lock(&l_mg->free_lock);
1223 l_mg->data_line = line;
1224 list_del(&line->list);
1226 ret = pblk_line_prepare(pblk, line);
1228 list_add(&line->list, &l_mg->free_list);
1229 spin_unlock(&l_mg->free_lock);
1232 spin_unlock(&l_mg->free_lock);
1234 ret = pblk_line_alloc_bitmaps(pblk, line);
1238 if (!pblk_line_init_bb(pblk, line, 0)) {
1239 list_add(&line->list, &l_mg->free_list);
1243 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1247 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1249 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1251 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1252 line->map_bitmap = NULL;
1257 static void pblk_line_reinit(struct pblk_line *line)
1259 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1261 line->map_bitmap = NULL;
1262 line->invalid_bitmap = NULL;
1267 void pblk_line_free(struct pblk_line *line)
1269 struct pblk *pblk = line->pblk;
1270 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1272 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1273 mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1275 pblk_line_reinit(line);
1278 struct pblk_line *pblk_line_get(struct pblk *pblk)
1280 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1281 struct pblk_line_meta *lm = &pblk->lm;
1282 struct pblk_line *line;
1285 lockdep_assert_held(&l_mg->free_lock);
1288 if (list_empty(&l_mg->free_list)) {
1289 pblk_err(pblk, "no free lines\n");
1293 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1294 list_del(&line->list);
1295 l_mg->nr_free_lines--;
1297 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1298 if (unlikely(bit >= lm->blk_per_line)) {
1299 spin_lock(&line->lock);
1300 line->state = PBLK_LINESTATE_BAD;
1301 spin_unlock(&line->lock);
1303 list_add_tail(&line->list, &l_mg->bad_list);
1305 pblk_debug(pblk, "line %d is bad\n", line->id);
1309 ret = pblk_line_prepare(pblk, line);
1313 list_add(&line->list, &l_mg->bad_list);
1316 list_add(&line->list, &l_mg->corrupt_list);
1319 pblk_err(pblk, "failed to prepare line %d\n", line->id);
1320 list_add(&line->list, &l_mg->free_list);
1321 l_mg->nr_free_lines++;
1329 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1330 struct pblk_line *line)
1332 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1333 struct pblk_line *retry_line;
1336 spin_lock(&l_mg->free_lock);
1337 retry_line = pblk_line_get(pblk);
1339 l_mg->data_line = NULL;
1340 spin_unlock(&l_mg->free_lock);
1344 retry_line->map_bitmap = line->map_bitmap;
1345 retry_line->invalid_bitmap = line->invalid_bitmap;
1346 retry_line->smeta = line->smeta;
1347 retry_line->emeta = line->emeta;
1348 retry_line->meta_line = line->meta_line;
1350 pblk_line_reinit(line);
1352 l_mg->data_line = retry_line;
1353 spin_unlock(&l_mg->free_lock);
1355 pblk_rl_free_lines_dec(&pblk->rl, line, false);
1357 if (pblk_line_erase(pblk, retry_line))
1363 static void pblk_set_space_limit(struct pblk *pblk)
1365 struct pblk_rl *rl = &pblk->rl;
1367 atomic_set(&rl->rb_space, 0);
1370 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1372 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1373 struct pblk_line *line;
1375 spin_lock(&l_mg->free_lock);
1376 line = pblk_line_get(pblk);
1378 spin_unlock(&l_mg->free_lock);
1382 line->seq_nr = l_mg->d_seq_nr++;
1383 line->type = PBLK_LINETYPE_DATA;
1384 l_mg->data_line = line;
1386 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1388 /* Allocate next line for preparation */
1389 l_mg->data_next = pblk_line_get(pblk);
1390 if (!l_mg->data_next) {
1391 /* If we cannot get a new line, we need to stop the pipeline.
1392 * Only allow as many writes in as we can store safely and then
1395 pblk_set_space_limit(pblk);
1397 l_mg->data_next = NULL;
1399 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1400 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1402 spin_unlock(&l_mg->free_lock);
1404 if (pblk_line_alloc_bitmaps(pblk, line))
1407 if (pblk_line_erase(pblk, line)) {
1408 line = pblk_line_retry(pblk, line);
1414 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1415 line = pblk_line_retry(pblk, line);
1422 if (!pblk_line_init_bb(pblk, line, 1)) {
1423 line = pblk_line_retry(pblk, line);
1430 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1435 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1437 struct pblk_line *line;
1439 line = pblk_ppa_to_line(pblk, ppa);
1440 kref_put(&line->ref, pblk_line_put_wq);
1443 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1445 struct ppa_addr *ppa_list;
1448 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1450 for (i = 0; i < rqd->nr_ppas; i++)
1451 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1454 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1456 lockdep_assert_held(&pblk->l_mg.free_lock);
1458 pblk_set_space_limit(pblk);
1459 pblk->state = PBLK_STATE_STOPPING;
1462 static void pblk_line_close_meta_sync(struct pblk *pblk)
1464 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1465 struct pblk_line_meta *lm = &pblk->lm;
1466 struct pblk_line *line, *tline;
1469 spin_lock(&l_mg->close_lock);
1470 if (list_empty(&l_mg->emeta_list)) {
1471 spin_unlock(&l_mg->close_lock);
1475 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1476 spin_unlock(&l_mg->close_lock);
1478 list_for_each_entry_safe(line, tline, &list, list) {
1479 struct pblk_emeta *emeta = line->emeta;
1481 while (emeta->mem < lm->emeta_len[0]) {
1484 ret = pblk_submit_meta_io(pblk, line);
1486 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1493 pblk_wait_for_meta(pblk);
1494 flush_workqueue(pblk->close_wq);
1497 void __pblk_pipeline_flush(struct pblk *pblk)
1499 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1502 spin_lock(&l_mg->free_lock);
1503 if (pblk->state == PBLK_STATE_RECOVERING ||
1504 pblk->state == PBLK_STATE_STOPPED) {
1505 spin_unlock(&l_mg->free_lock);
1508 pblk->state = PBLK_STATE_RECOVERING;
1509 spin_unlock(&l_mg->free_lock);
1511 pblk_flush_writer(pblk);
1512 pblk_wait_for_meta(pblk);
1514 ret = pblk_recov_pad(pblk);
1516 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1520 flush_workqueue(pblk->bb_wq);
1521 pblk_line_close_meta_sync(pblk);
1524 void __pblk_pipeline_stop(struct pblk *pblk)
1526 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1528 spin_lock(&l_mg->free_lock);
1529 pblk->state = PBLK_STATE_STOPPED;
1530 l_mg->data_line = NULL;
1531 l_mg->data_next = NULL;
1532 spin_unlock(&l_mg->free_lock);
1535 void pblk_pipeline_stop(struct pblk *pblk)
1537 __pblk_pipeline_flush(pblk);
1538 __pblk_pipeline_stop(pblk);
1541 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1543 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1544 struct pblk_line *cur, *new = NULL;
1545 unsigned int left_seblks;
1547 cur = l_mg->data_line;
1548 new = l_mg->data_next;
1551 l_mg->data_line = new;
1553 spin_lock(&l_mg->free_lock);
1554 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1555 spin_unlock(&l_mg->free_lock);
1558 left_seblks = atomic_read(&new->left_seblks);
1560 /* If line is not fully erased, erase it */
1561 if (atomic_read(&new->left_eblks)) {
1562 if (pblk_line_erase(pblk, new))
1570 if (pblk_line_alloc_bitmaps(pblk, new))
1574 if (!pblk_line_init_metadata(pblk, new, cur)) {
1575 new = pblk_line_retry(pblk, new);
1582 if (!pblk_line_init_bb(pblk, new, 1)) {
1583 new = pblk_line_retry(pblk, new);
1590 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1592 /* Allocate next line for preparation */
1593 spin_lock(&l_mg->free_lock);
1594 l_mg->data_next = pblk_line_get(pblk);
1595 if (!l_mg->data_next) {
1596 /* If we cannot get a new line, we need to stop the pipeline.
1597 * Only allow as many writes in as we can store safely and then
1600 pblk_stop_writes(pblk, new);
1601 l_mg->data_next = NULL;
1603 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1604 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1606 spin_unlock(&l_mg->free_lock);
1612 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1614 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1615 struct pblk_gc *gc = &pblk->gc;
1617 spin_lock(&line->lock);
1618 WARN_ON(line->state != PBLK_LINESTATE_GC);
1619 line->state = PBLK_LINESTATE_FREE;
1620 line->gc_group = PBLK_LINEGC_NONE;
1621 pblk_line_free(line);
1623 if (line->w_err_gc->has_write_err) {
1624 pblk_rl_werr_line_out(&pblk->rl);
1625 line->w_err_gc->has_write_err = 0;
1628 spin_unlock(&line->lock);
1629 atomic_dec(&gc->pipeline_gc);
1631 spin_lock(&l_mg->free_lock);
1632 list_add_tail(&line->list, &l_mg->free_list);
1633 l_mg->nr_free_lines++;
1634 spin_unlock(&l_mg->free_lock);
1636 pblk_rl_free_lines_inc(&pblk->rl, line);
1639 static void pblk_line_put_ws(struct work_struct *work)
1641 struct pblk_line_ws *line_put_ws = container_of(work,
1642 struct pblk_line_ws, ws);
1643 struct pblk *pblk = line_put_ws->pblk;
1644 struct pblk_line *line = line_put_ws->line;
1646 __pblk_line_put(pblk, line);
1647 mempool_free(line_put_ws, &pblk->gen_ws_pool);
1650 void pblk_line_put(struct kref *ref)
1652 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1653 struct pblk *pblk = line->pblk;
1655 __pblk_line_put(pblk, line);
1658 void pblk_line_put_wq(struct kref *ref)
1660 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1661 struct pblk *pblk = line->pblk;
1662 struct pblk_line_ws *line_put_ws;
1664 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1668 line_put_ws->pblk = pblk;
1669 line_put_ws->line = line;
1670 line_put_ws->priv = NULL;
1672 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1673 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1676 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1681 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1683 pblk_setup_e_rq(pblk, rqd, ppa);
1685 rqd->end_io = pblk_end_io_erase;
1686 rqd->private = pblk;
1688 /* The write thread schedules erases so that it minimizes disturbances
1689 * with writes. Thus, there is no need to take the LUN semaphore.
1691 err = pblk_submit_io(pblk, rqd);
1693 struct nvm_tgt_dev *dev = pblk->dev;
1694 struct nvm_geo *geo = &dev->geo;
1696 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1697 pblk_ppa_to_line_id(ppa),
1698 pblk_ppa_to_pos(geo, ppa));
1704 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1706 return pblk->l_mg.data_line;
1709 /* For now, always erase next line */
1710 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1712 return pblk->l_mg.data_next;
1715 int pblk_line_is_full(struct pblk_line *line)
1717 return (line->left_msecs == 0);
1720 static void pblk_line_should_sync_meta(struct pblk *pblk)
1722 if (pblk_rl_is_limit(&pblk->rl))
1723 pblk_line_close_meta_sync(pblk);
1726 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1728 struct nvm_tgt_dev *dev = pblk->dev;
1729 struct nvm_geo *geo = &dev->geo;
1730 struct pblk_line_meta *lm = &pblk->lm;
1731 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1732 struct list_head *move_list;
1735 #ifdef CONFIG_NVM_PBLK_DEBUG
1736 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1737 "pblk: corrupt closed line %d\n", line->id);
1740 spin_lock(&l_mg->free_lock);
1741 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1742 spin_unlock(&l_mg->free_lock);
1744 spin_lock(&l_mg->gc_lock);
1745 spin_lock(&line->lock);
1746 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1747 line->state = PBLK_LINESTATE_CLOSED;
1748 move_list = pblk_line_gc_list(pblk, line);
1750 list_add_tail(&line->list, move_list);
1752 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1753 line->map_bitmap = NULL;
1757 for (i = 0; i < lm->blk_per_line; i++) {
1758 struct pblk_lun *rlun = &pblk->luns[i];
1759 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1760 int state = line->chks[pos].state;
1762 if (!(state & NVM_CHK_ST_OFFLINE))
1763 state = NVM_CHK_ST_CLOSED;
1766 spin_unlock(&line->lock);
1767 spin_unlock(&l_mg->gc_lock);
1770 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1772 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1773 struct pblk_line_meta *lm = &pblk->lm;
1774 struct pblk_emeta *emeta = line->emeta;
1775 struct line_emeta *emeta_buf = emeta->buf;
1776 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1778 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1779 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1780 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1782 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1783 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1784 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1786 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1787 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1788 memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1789 emeta_buf->header.id = cpu_to_le32(line->id);
1790 emeta_buf->header.type = cpu_to_le16(line->type);
1791 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1792 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1793 emeta_buf->header.crc = cpu_to_le32(
1794 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1797 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1798 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1800 spin_lock(&l_mg->close_lock);
1801 spin_lock(&line->lock);
1803 /* Update the in-memory start address for emeta, in case it has
1804 * shifted due to write errors
1806 if (line->emeta_ssec != line->cur_sec)
1807 line->emeta_ssec = line->cur_sec;
1809 list_add_tail(&line->list, &l_mg->emeta_list);
1810 spin_unlock(&line->lock);
1811 spin_unlock(&l_mg->close_lock);
1813 pblk_line_should_sync_meta(pblk);
1816 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1818 struct pblk_line_meta *lm = &pblk->lm;
1819 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1820 unsigned int lba_list_size = lm->emeta_len[2];
1821 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1822 struct pblk_emeta *emeta = line->emeta;
1824 w_err_gc->lba_list = pblk_malloc(lba_list_size,
1825 l_mg->emeta_alloc_type, GFP_KERNEL);
1826 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1830 void pblk_line_close_ws(struct work_struct *work)
1832 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1834 struct pblk *pblk = line_ws->pblk;
1835 struct pblk_line *line = line_ws->line;
1836 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1838 /* Write errors makes the emeta start address stored in smeta invalid,
1839 * so keep a copy of the lba list until we've gc'd the line
1841 if (w_err_gc->has_write_err)
1842 pblk_save_lba_list(pblk, line);
1844 pblk_line_close(pblk, line);
1845 mempool_free(line_ws, &pblk->gen_ws_pool);
1848 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1849 void (*work)(struct work_struct *), gfp_t gfp_mask,
1850 struct workqueue_struct *wq)
1852 struct pblk_line_ws *line_ws;
1854 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1856 line_ws->pblk = pblk;
1857 line_ws->line = line;
1858 line_ws->priv = priv;
1860 INIT_WORK(&line_ws->ws, work);
1861 queue_work(wq, &line_ws->ws);
1864 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1866 struct pblk_lun *rlun = &pblk->luns[pos];
1870 * Only send one inflight I/O per LUN. Since we map at a page
1871 * granurality, all ppas in the I/O will map to the same LUN
1874 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1875 if (ret == -ETIME || ret == -EINTR)
1876 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1880 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1882 struct nvm_tgt_dev *dev = pblk->dev;
1883 struct nvm_geo *geo = &dev->geo;
1884 int pos = pblk_ppa_to_pos(geo, ppa);
1886 __pblk_down_chunk(pblk, pos);
1889 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1890 unsigned long *lun_bitmap)
1892 struct nvm_tgt_dev *dev = pblk->dev;
1893 struct nvm_geo *geo = &dev->geo;
1894 int pos = pblk_ppa_to_pos(geo, ppa);
1896 /* If the LUN has been locked for this same request, do no attempt to
1899 if (test_and_set_bit(pos, lun_bitmap))
1902 __pblk_down_chunk(pblk, pos);
1905 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1907 struct nvm_tgt_dev *dev = pblk->dev;
1908 struct nvm_geo *geo = &dev->geo;
1909 struct pblk_lun *rlun;
1910 int pos = pblk_ppa_to_pos(geo, ppa);
1912 rlun = &pblk->luns[pos];
1916 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
1918 struct nvm_tgt_dev *dev = pblk->dev;
1919 struct nvm_geo *geo = &dev->geo;
1920 struct pblk_lun *rlun;
1921 int num_lun = geo->all_luns;
1924 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1925 rlun = &pblk->luns[bit];
1930 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1932 struct ppa_addr ppa_l2p;
1934 /* logic error: lba out-of-bounds. Ignore update */
1935 if (!(lba < pblk->rl.nr_secs)) {
1936 WARN(1, "pblk: corrupted L2P map request\n");
1940 spin_lock(&pblk->trans_lock);
1941 ppa_l2p = pblk_trans_map_get(pblk, lba);
1943 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1944 pblk_map_invalidate(pblk, ppa_l2p);
1946 pblk_trans_map_set(pblk, lba, ppa);
1947 spin_unlock(&pblk->trans_lock);
1950 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1953 #ifdef CONFIG_NVM_PBLK_DEBUG
1954 /* Callers must ensure that the ppa points to a cache address */
1955 BUG_ON(!pblk_addr_in_cache(ppa));
1956 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1959 pblk_update_map(pblk, lba, ppa);
1962 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1963 struct pblk_line *gc_line, u64 paddr_gc)
1965 struct ppa_addr ppa_l2p, ppa_gc;
1968 #ifdef CONFIG_NVM_PBLK_DEBUG
1969 /* Callers must ensure that the ppa points to a cache address */
1970 BUG_ON(!pblk_addr_in_cache(ppa_new));
1971 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1974 /* logic error: lba out-of-bounds. Ignore update */
1975 if (!(lba < pblk->rl.nr_secs)) {
1976 WARN(1, "pblk: corrupted L2P map request\n");
1980 spin_lock(&pblk->trans_lock);
1981 ppa_l2p = pblk_trans_map_get(pblk, lba);
1982 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1984 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1985 spin_lock(&gc_line->lock);
1986 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1987 "pblk: corrupted GC update");
1988 spin_unlock(&gc_line->lock);
1994 pblk_trans_map_set(pblk, lba, ppa_new);
1996 spin_unlock(&pblk->trans_lock);
2000 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2001 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2003 struct ppa_addr ppa_l2p;
2005 #ifdef CONFIG_NVM_PBLK_DEBUG
2006 /* Callers must ensure that the ppa points to a device address */
2007 BUG_ON(pblk_addr_in_cache(ppa_mapped));
2009 /* Invalidate and discard padded entries */
2010 if (lba == ADDR_EMPTY) {
2011 atomic64_inc(&pblk->pad_wa);
2012 #ifdef CONFIG_NVM_PBLK_DEBUG
2013 atomic_long_inc(&pblk->padded_wb);
2015 if (!pblk_ppa_empty(ppa_mapped))
2016 pblk_map_invalidate(pblk, ppa_mapped);
2020 /* logic error: lba out-of-bounds. Ignore update */
2021 if (!(lba < pblk->rl.nr_secs)) {
2022 WARN(1, "pblk: corrupted L2P map request\n");
2026 spin_lock(&pblk->trans_lock);
2027 ppa_l2p = pblk_trans_map_get(pblk, lba);
2029 /* Do not update L2P if the cacheline has been updated. In this case,
2030 * the mapped ppa must be invalidated
2032 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2033 if (!pblk_ppa_empty(ppa_mapped))
2034 pblk_map_invalidate(pblk, ppa_mapped);
2038 #ifdef CONFIG_NVM_PBLK_DEBUG
2039 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2042 pblk_trans_map_set(pblk, lba, ppa_mapped);
2044 spin_unlock(&pblk->trans_lock);
2047 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2048 sector_t blba, int nr_secs)
2052 spin_lock(&pblk->trans_lock);
2053 for (i = 0; i < nr_secs; i++) {
2054 struct ppa_addr ppa;
2056 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2058 /* If the L2P entry maps to a line, the reference is valid */
2059 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2060 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2062 kref_get(&line->ref);
2065 spin_unlock(&pblk->trans_lock);
2068 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2069 u64 *lba_list, int nr_secs)
2074 spin_lock(&pblk->trans_lock);
2075 for (i = 0; i < nr_secs; i++) {
2077 if (lba != ADDR_EMPTY) {
2078 /* logic error: lba out-of-bounds. Ignore update */
2079 if (!(lba < pblk->rl.nr_secs)) {
2080 WARN(1, "pblk: corrupted L2P map request\n");
2083 ppas[i] = pblk_trans_map_get(pblk, lba);
2086 spin_unlock(&pblk->trans_lock);