]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
lightnvm: pblk: remove debug from pblk_[down/up]_page
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_line_mark_bb(struct work_struct *work)
22 {
23         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24                                                                         ws);
25         struct pblk *pblk = line_ws->pblk;
26         struct nvm_tgt_dev *dev = pblk->dev;
27         struct ppa_addr *ppa = line_ws->priv;
28         int ret;
29
30         ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31         if (ret) {
32                 struct pblk_line *line;
33                 int pos;
34
35                 line = pblk_ppa_to_line(pblk, *ppa);
36                 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
37
38                 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
39                                 line->id, pos);
40         }
41
42         kfree(ppa);
43         mempool_free(line_ws, &pblk->gen_ws_pool);
44 }
45
46 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47                          struct ppa_addr ppa_addr)
48 {
49         struct nvm_tgt_dev *dev = pblk->dev;
50         struct nvm_geo *geo = &dev->geo;
51         struct ppa_addr *ppa;
52         int pos = pblk_ppa_to_pos(geo, ppa_addr);
53
54         pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
55         atomic_long_inc(&pblk->erase_failed);
56
57         atomic_dec(&line->blk_in_line);
58         if (test_and_set_bit(pos, line->blk_bitmap))
59                 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
60                                                         line->id, pos);
61
62         /* Not necessary to mark bad blocks on 2.0 spec. */
63         if (geo->version == NVM_OCSSD_SPEC_20)
64                 return;
65
66         ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
67         if (!ppa)
68                 return;
69
70         *ppa = ppa_addr;
71         pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
72                                                 GFP_ATOMIC, pblk->bb_wq);
73 }
74
75 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
76 {
77         struct nvm_tgt_dev *dev = pblk->dev;
78         struct nvm_geo *geo = &dev->geo;
79         struct nvm_chk_meta *chunk;
80         struct pblk_line *line;
81         int pos;
82
83         line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
84         pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85         chunk = &line->chks[pos];
86
87         atomic_dec(&line->left_seblks);
88
89         if (rqd->error) {
90                 chunk->state = NVM_CHK_ST_OFFLINE;
91                 pblk_mark_bb(pblk, line, rqd->ppa_addr);
92         } else {
93                 chunk->state = NVM_CHK_ST_FREE;
94         }
95
96         atomic_dec(&pblk->inflight_io);
97 }
98
99 /* Erase completion assumes that only one block is erased at the time */
100 static void pblk_end_io_erase(struct nvm_rq *rqd)
101 {
102         struct pblk *pblk = rqd->private;
103
104         __pblk_end_io_erase(pblk, rqd);
105         mempool_free(rqd, &pblk->e_rq_pool);
106 }
107
108 /*
109  * Get information for all chunks from the device.
110  *
111  * The caller is responsible for freeing the returned structure
112  */
113 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
114 {
115         struct nvm_tgt_dev *dev = pblk->dev;
116         struct nvm_geo *geo = &dev->geo;
117         struct nvm_chk_meta *meta;
118         struct ppa_addr ppa;
119         unsigned long len;
120         int ret;
121
122         ppa.ppa = 0;
123
124         len = geo->all_chunks * sizeof(*meta);
125         meta = kzalloc(len, GFP_KERNEL);
126         if (!meta)
127                 return ERR_PTR(-ENOMEM);
128
129         ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
130         if (ret) {
131                 kfree(meta);
132                 return ERR_PTR(-EIO);
133         }
134
135         return meta;
136 }
137
138 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139                                               struct nvm_chk_meta *meta,
140                                               struct ppa_addr ppa)
141 {
142         struct nvm_tgt_dev *dev = pblk->dev;
143         struct nvm_geo *geo = &dev->geo;
144         int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145         int lun_off = ppa.m.pu * geo->num_chk;
146         int chk_off = ppa.m.chk;
147
148         return meta + ch_off + lun_off + chk_off;
149 }
150
151 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
152                            u64 paddr)
153 {
154         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
155         struct list_head *move_list = NULL;
156
157         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158          * table is modified with reclaimed sectors, a check is done to endure
159          * that newer updates are not overwritten.
160          */
161         spin_lock(&line->lock);
162         WARN_ON(line->state == PBLK_LINESTATE_FREE);
163
164         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
165                 WARN_ONCE(1, "pblk: double invalidate\n");
166                 spin_unlock(&line->lock);
167                 return;
168         }
169         le32_add_cpu(line->vsc, -1);
170
171         if (line->state == PBLK_LINESTATE_CLOSED)
172                 move_list = pblk_line_gc_list(pblk, line);
173         spin_unlock(&line->lock);
174
175         if (move_list) {
176                 spin_lock(&l_mg->gc_lock);
177                 spin_lock(&line->lock);
178                 /* Prevent moving a line that has just been chosen for GC */
179                 if (line->state == PBLK_LINESTATE_GC) {
180                         spin_unlock(&line->lock);
181                         spin_unlock(&l_mg->gc_lock);
182                         return;
183                 }
184                 spin_unlock(&line->lock);
185
186                 list_move_tail(&line->list, move_list);
187                 spin_unlock(&l_mg->gc_lock);
188         }
189 }
190
191 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
192 {
193         struct pblk_line *line;
194         u64 paddr;
195
196 #ifdef CONFIG_NVM_PBLK_DEBUG
197         /* Callers must ensure that the ppa points to a device address */
198         BUG_ON(pblk_addr_in_cache(ppa));
199         BUG_ON(pblk_ppa_empty(ppa));
200 #endif
201
202         line = pblk_ppa_to_line(pblk, ppa);
203         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
204
205         __pblk_map_invalidate(pblk, line, paddr);
206 }
207
208 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
209                                   unsigned int nr_secs)
210 {
211         sector_t lba;
212
213         spin_lock(&pblk->trans_lock);
214         for (lba = slba; lba < slba + nr_secs; lba++) {
215                 struct ppa_addr ppa;
216
217                 ppa = pblk_trans_map_get(pblk, lba);
218
219                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
220                         pblk_map_invalidate(pblk, ppa);
221
222                 pblk_ppa_set_empty(&ppa);
223                 pblk_trans_map_set(pblk, lba, ppa);
224         }
225         spin_unlock(&pblk->trans_lock);
226 }
227
228 /* Caller must guarantee that the request is a valid type */
229 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
230 {
231         mempool_t *pool;
232         struct nvm_rq *rqd;
233         int rq_size;
234
235         switch (type) {
236         case PBLK_WRITE:
237         case PBLK_WRITE_INT:
238                 pool = &pblk->w_rq_pool;
239                 rq_size = pblk_w_rq_size;
240                 break;
241         case PBLK_READ:
242                 pool = &pblk->r_rq_pool;
243                 rq_size = pblk_g_rq_size;
244                 break;
245         default:
246                 pool = &pblk->e_rq_pool;
247                 rq_size = pblk_g_rq_size;
248         }
249
250         rqd = mempool_alloc(pool, GFP_KERNEL);
251         memset(rqd, 0, rq_size);
252
253         return rqd;
254 }
255
256 /* Typically used on completion path. Cannot guarantee request consistency */
257 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
258 {
259         struct nvm_tgt_dev *dev = pblk->dev;
260         mempool_t *pool;
261
262         switch (type) {
263         case PBLK_WRITE:
264                 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
265                 /* fall through */
266         case PBLK_WRITE_INT:
267                 pool = &pblk->w_rq_pool;
268                 break;
269         case PBLK_READ:
270                 pool = &pblk->r_rq_pool;
271                 break;
272         case PBLK_ERASE:
273                 pool = &pblk->e_rq_pool;
274                 break;
275         default:
276                 pblk_err(pblk, "trying to free unknown rqd type\n");
277                 return;
278         }
279
280         if (rqd->meta_list)
281                 nvm_dev_dma_free(dev->parent, rqd->meta_list,
282                                 rqd->dma_meta_list);
283         mempool_free(rqd, pool);
284 }
285
286 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
287                          int nr_pages)
288 {
289         struct bio_vec bv;
290         int i;
291
292         WARN_ON(off + nr_pages != bio->bi_vcnt);
293
294         for (i = off; i < nr_pages + off; i++) {
295                 bv = bio->bi_io_vec[i];
296                 mempool_free(bv.bv_page, &pblk->page_bio_pool);
297         }
298 }
299
300 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
301                        int nr_pages)
302 {
303         struct request_queue *q = pblk->dev->q;
304         struct page *page;
305         int i, ret;
306
307         for (i = 0; i < nr_pages; i++) {
308                 page = mempool_alloc(&pblk->page_bio_pool, flags);
309
310                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
311                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
312                         pblk_err(pblk, "could not add page to bio\n");
313                         mempool_free(page, &pblk->page_bio_pool);
314                         goto err;
315                 }
316         }
317
318         return 0;
319 err:
320         pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
321         return -1;
322 }
323
324 void pblk_write_kick(struct pblk *pblk)
325 {
326         wake_up_process(pblk->writer_ts);
327         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
328 }
329
330 void pblk_write_timer_fn(struct timer_list *t)
331 {
332         struct pblk *pblk = from_timer(pblk, t, wtimer);
333
334         /* kick the write thread every tick to flush outstanding data */
335         pblk_write_kick(pblk);
336 }
337
338 void pblk_write_should_kick(struct pblk *pblk)
339 {
340         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
341
342         if (secs_avail >= pblk->min_write_pgs)
343                 pblk_write_kick(pblk);
344 }
345
346 static void pblk_wait_for_meta(struct pblk *pblk)
347 {
348         do {
349                 if (!atomic_read(&pblk->inflight_io))
350                         break;
351
352                 schedule();
353         } while (1);
354 }
355
356 static void pblk_flush_writer(struct pblk *pblk)
357 {
358         pblk_rb_flush(&pblk->rwb);
359         do {
360                 if (!pblk_rb_sync_count(&pblk->rwb))
361                         break;
362
363                 pblk_write_kick(pblk);
364                 schedule();
365         } while (1);
366 }
367
368 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
369 {
370         struct pblk_line_meta *lm = &pblk->lm;
371         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372         struct list_head *move_list = NULL;
373         int vsc = le32_to_cpu(*line->vsc);
374
375         lockdep_assert_held(&line->lock);
376
377         if (line->w_err_gc->has_write_err) {
378                 if (line->gc_group != PBLK_LINEGC_WERR) {
379                         line->gc_group = PBLK_LINEGC_WERR;
380                         move_list = &l_mg->gc_werr_list;
381                         pblk_rl_werr_line_in(&pblk->rl);
382                 }
383         } else if (!vsc) {
384                 if (line->gc_group != PBLK_LINEGC_FULL) {
385                         line->gc_group = PBLK_LINEGC_FULL;
386                         move_list = &l_mg->gc_full_list;
387                 }
388         } else if (vsc < lm->high_thrs) {
389                 if (line->gc_group != PBLK_LINEGC_HIGH) {
390                         line->gc_group = PBLK_LINEGC_HIGH;
391                         move_list = &l_mg->gc_high_list;
392                 }
393         } else if (vsc < lm->mid_thrs) {
394                 if (line->gc_group != PBLK_LINEGC_MID) {
395                         line->gc_group = PBLK_LINEGC_MID;
396                         move_list = &l_mg->gc_mid_list;
397                 }
398         } else if (vsc < line->sec_in_line) {
399                 if (line->gc_group != PBLK_LINEGC_LOW) {
400                         line->gc_group = PBLK_LINEGC_LOW;
401                         move_list = &l_mg->gc_low_list;
402                 }
403         } else if (vsc == line->sec_in_line) {
404                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
405                         line->gc_group = PBLK_LINEGC_EMPTY;
406                         move_list = &l_mg->gc_empty_list;
407                 }
408         } else {
409                 line->state = PBLK_LINESTATE_CORRUPT;
410                 line->gc_group = PBLK_LINEGC_NONE;
411                 move_list =  &l_mg->corrupt_list;
412                 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
413                                                 line->id, vsc,
414                                                 line->sec_in_line,
415                                                 lm->high_thrs, lm->mid_thrs);
416         }
417
418         return move_list;
419 }
420
421 void pblk_discard(struct pblk *pblk, struct bio *bio)
422 {
423         sector_t slba = pblk_get_lba(bio);
424         sector_t nr_secs = pblk_get_secs(bio);
425
426         pblk_invalidate_range(pblk, slba, nr_secs);
427 }
428
429 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
430 {
431         atomic_long_inc(&pblk->write_failed);
432 #ifdef CONFIG_NVM_PBLK_DEBUG
433         pblk_print_failed_rqd(pblk, rqd, rqd->error);
434 #endif
435 }
436
437 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
438 {
439         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
440         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
441                 atomic_long_inc(&pblk->read_empty);
442                 return;
443         }
444
445         switch (rqd->error) {
446         case NVM_RSP_WARN_HIGHECC:
447                 atomic_long_inc(&pblk->read_high_ecc);
448                 break;
449         case NVM_RSP_ERR_FAILECC:
450         case NVM_RSP_ERR_FAILCRC:
451                 atomic_long_inc(&pblk->read_failed);
452                 break;
453         default:
454                 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
455         }
456 #ifdef CONFIG_NVM_PBLK_DEBUG
457         pblk_print_failed_rqd(pblk, rqd, rqd->error);
458 #endif
459 }
460
461 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
462 {
463         pblk->sec_per_write = sec_per_write;
464 }
465
466 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
467 {
468         struct nvm_tgt_dev *dev = pblk->dev;
469
470         atomic_inc(&pblk->inflight_io);
471
472 #ifdef CONFIG_NVM_PBLK_DEBUG
473         if (pblk_check_io(pblk, rqd))
474                 return NVM_IO_ERR;
475 #endif
476
477         return nvm_submit_io(dev, rqd);
478 }
479
480 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
481 {
482         struct nvm_tgt_dev *dev = pblk->dev;
483
484         atomic_inc(&pblk->inflight_io);
485
486 #ifdef CONFIG_NVM_PBLK_DEBUG
487         if (pblk_check_io(pblk, rqd))
488                 return NVM_IO_ERR;
489 #endif
490
491         return nvm_submit_io_sync(dev, rqd);
492 }
493
494 static void pblk_bio_map_addr_endio(struct bio *bio)
495 {
496         bio_put(bio);
497 }
498
499 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
500                               unsigned int nr_secs, unsigned int len,
501                               int alloc_type, gfp_t gfp_mask)
502 {
503         struct nvm_tgt_dev *dev = pblk->dev;
504         void *kaddr = data;
505         struct page *page;
506         struct bio *bio;
507         int i, ret;
508
509         if (alloc_type == PBLK_KMALLOC_META)
510                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
511
512         bio = bio_kmalloc(gfp_mask, nr_secs);
513         if (!bio)
514                 return ERR_PTR(-ENOMEM);
515
516         for (i = 0; i < nr_secs; i++) {
517                 page = vmalloc_to_page(kaddr);
518                 if (!page) {
519                         pblk_err(pblk, "could not map vmalloc bio\n");
520                         bio_put(bio);
521                         bio = ERR_PTR(-ENOMEM);
522                         goto out;
523                 }
524
525                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
526                 if (ret != PAGE_SIZE) {
527                         pblk_err(pblk, "could not add page to bio\n");
528                         bio_put(bio);
529                         bio = ERR_PTR(-ENOMEM);
530                         goto out;
531                 }
532
533                 kaddr += PAGE_SIZE;
534         }
535
536         bio->bi_end_io = pblk_bio_map_addr_endio;
537 out:
538         return bio;
539 }
540
541 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
542                    unsigned long secs_to_flush)
543 {
544         int max = pblk->sec_per_write;
545         int min = pblk->min_write_pgs;
546         int secs_to_sync = 0;
547
548         if (secs_avail >= max)
549                 secs_to_sync = max;
550         else if (secs_avail >= min)
551                 secs_to_sync = min * (secs_avail / min);
552         else if (secs_to_flush)
553                 secs_to_sync = min;
554
555         return secs_to_sync;
556 }
557
558 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
559 {
560         u64 addr;
561         int i;
562
563         spin_lock(&line->lock);
564         addr = find_next_zero_bit(line->map_bitmap,
565                                         pblk->lm.sec_per_line, line->cur_sec);
566         line->cur_sec = addr - nr_secs;
567
568         for (i = 0; i < nr_secs; i++, line->cur_sec--)
569                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
570         spin_unlock(&line->lock);
571 }
572
573 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
574 {
575         u64 addr;
576         int i;
577
578         lockdep_assert_held(&line->lock);
579
580         /* logic error: ppa out-of-bounds. Prevent generating bad address */
581         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
582                 WARN(1, "pblk: page allocation out of bounds\n");
583                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
584         }
585
586         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
587                                         pblk->lm.sec_per_line, line->cur_sec);
588         for (i = 0; i < nr_secs; i++, line->cur_sec++)
589                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
590
591         return addr;
592 }
593
594 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
595 {
596         u64 addr;
597
598         /* Lock needed in case a write fails and a recovery needs to remap
599          * failed write buffer entries
600          */
601         spin_lock(&line->lock);
602         addr = __pblk_alloc_page(pblk, line, nr_secs);
603         line->left_msecs -= nr_secs;
604         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
605         spin_unlock(&line->lock);
606
607         return addr;
608 }
609
610 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
611 {
612         u64 paddr;
613
614         spin_lock(&line->lock);
615         paddr = find_next_zero_bit(line->map_bitmap,
616                                         pblk->lm.sec_per_line, line->cur_sec);
617         spin_unlock(&line->lock);
618
619         return paddr;
620 }
621
622 /*
623  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
624  * taking the per LUN semaphore.
625  */
626 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
627                                      void *emeta_buf, u64 paddr, int dir)
628 {
629         struct nvm_tgt_dev *dev = pblk->dev;
630         struct nvm_geo *geo = &dev->geo;
631         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
632         struct pblk_line_meta *lm = &pblk->lm;
633         void *ppa_list, *meta_list;
634         struct bio *bio;
635         struct nvm_rq rqd;
636         dma_addr_t dma_ppa_list, dma_meta_list;
637         int min = pblk->min_write_pgs;
638         int left_ppas = lm->emeta_sec[0];
639         int id = line->id;
640         int rq_ppas, rq_len;
641         int cmd_op, bio_op;
642         int i, j;
643         int ret;
644
645         if (dir == PBLK_WRITE) {
646                 bio_op = REQ_OP_WRITE;
647                 cmd_op = NVM_OP_PWRITE;
648         } else if (dir == PBLK_READ) {
649                 bio_op = REQ_OP_READ;
650                 cmd_op = NVM_OP_PREAD;
651         } else
652                 return -EINVAL;
653
654         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
655                                                         &dma_meta_list);
656         if (!meta_list)
657                 return -ENOMEM;
658
659         ppa_list = meta_list + pblk_dma_meta_size;
660         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
661
662 next_rq:
663         memset(&rqd, 0, sizeof(struct nvm_rq));
664
665         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
666         rq_len = rq_ppas * geo->csecs;
667
668         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
669                                         l_mg->emeta_alloc_type, GFP_KERNEL);
670         if (IS_ERR(bio)) {
671                 ret = PTR_ERR(bio);
672                 goto free_rqd_dma;
673         }
674
675         bio->bi_iter.bi_sector = 0; /* internal bio */
676         bio_set_op_attrs(bio, bio_op, 0);
677
678         rqd.bio = bio;
679         rqd.meta_list = meta_list;
680         rqd.ppa_list = ppa_list;
681         rqd.dma_meta_list = dma_meta_list;
682         rqd.dma_ppa_list = dma_ppa_list;
683         rqd.opcode = cmd_op;
684         rqd.nr_ppas = rq_ppas;
685
686         if (dir == PBLK_WRITE) {
687                 struct pblk_sec_meta *meta_list = rqd.meta_list;
688
689                 rqd.is_seq = 1;
690                 for (i = 0; i < rqd.nr_ppas; ) {
691                         spin_lock(&line->lock);
692                         paddr = __pblk_alloc_page(pblk, line, min);
693                         spin_unlock(&line->lock);
694                         for (j = 0; j < min; j++, i++, paddr++) {
695                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
696                                 rqd.ppa_list[i] =
697                                         addr_to_gen_ppa(pblk, paddr, id);
698                         }
699                 }
700         } else {
701                 for (i = 0; i < rqd.nr_ppas; ) {
702                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
703                         int pos = pblk_ppa_to_pos(geo, ppa);
704
705                         if (pblk_io_aligned(pblk, rq_ppas))
706                                 rqd.is_seq = 1;
707
708                         while (test_bit(pos, line->blk_bitmap)) {
709                                 paddr += min;
710                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
711                                         pblk_err(pblk, "corrupt emeta line:%d\n",
712                                                                 line->id);
713                                         bio_put(bio);
714                                         ret = -EINTR;
715                                         goto free_rqd_dma;
716                                 }
717
718                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
719                                 pos = pblk_ppa_to_pos(geo, ppa);
720                         }
721
722                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
723                                 pblk_err(pblk, "corrupt emeta line:%d\n",
724                                                                 line->id);
725                                 bio_put(bio);
726                                 ret = -EINTR;
727                                 goto free_rqd_dma;
728                         }
729
730                         for (j = 0; j < min; j++, i++, paddr++)
731                                 rqd.ppa_list[i] =
732                                         addr_to_gen_ppa(pblk, paddr, line->id);
733                 }
734         }
735
736         ret = pblk_submit_io_sync(pblk, &rqd);
737         if (ret) {
738                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
739                 bio_put(bio);
740                 goto free_rqd_dma;
741         }
742
743         atomic_dec(&pblk->inflight_io);
744
745         if (rqd.error) {
746                 if (dir == PBLK_WRITE)
747                         pblk_log_write_err(pblk, &rqd);
748                 else
749                         pblk_log_read_err(pblk, &rqd);
750         }
751
752         emeta_buf += rq_len;
753         left_ppas -= rq_ppas;
754         if (left_ppas)
755                 goto next_rq;
756 free_rqd_dma:
757         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
758         return ret;
759 }
760
761 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
762 {
763         struct nvm_tgt_dev *dev = pblk->dev;
764         struct nvm_geo *geo = &dev->geo;
765         struct pblk_line_meta *lm = &pblk->lm;
766         int bit;
767
768         /* This usually only happens on bad lines */
769         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
770         if (bit >= lm->blk_per_line)
771                 return -1;
772
773         return bit * geo->ws_opt;
774 }
775
776 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
777                                      u64 paddr, int dir)
778 {
779         struct nvm_tgt_dev *dev = pblk->dev;
780         struct pblk_line_meta *lm = &pblk->lm;
781         struct bio *bio;
782         struct nvm_rq rqd;
783         __le64 *lba_list = NULL;
784         int i, ret;
785         int cmd_op, bio_op;
786
787         if (dir == PBLK_WRITE) {
788                 bio_op = REQ_OP_WRITE;
789                 cmd_op = NVM_OP_PWRITE;
790                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
791         } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
792                 bio_op = REQ_OP_READ;
793                 cmd_op = NVM_OP_PREAD;
794         } else
795                 return -EINVAL;
796
797         memset(&rqd, 0, sizeof(struct nvm_rq));
798
799         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
800                                                         &rqd.dma_meta_list);
801         if (!rqd.meta_list)
802                 return -ENOMEM;
803
804         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
805         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
806
807         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
808         if (IS_ERR(bio)) {
809                 ret = PTR_ERR(bio);
810                 goto free_ppa_list;
811         }
812
813         bio->bi_iter.bi_sector = 0; /* internal bio */
814         bio_set_op_attrs(bio, bio_op, 0);
815
816         rqd.bio = bio;
817         rqd.opcode = cmd_op;
818         rqd.is_seq = 1;
819         rqd.nr_ppas = lm->smeta_sec;
820
821         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
822                 struct pblk_sec_meta *meta_list = rqd.meta_list;
823
824                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
825
826                 if (dir == PBLK_WRITE) {
827                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
828
829                         meta_list[i].lba = lba_list[paddr] = addr_empty;
830                 }
831         }
832
833         /*
834          * This I/O is sent by the write thread when a line is replace. Since
835          * the write thread is the only one sending write and erase commands,
836          * there is no need to take the LUN semaphore.
837          */
838         ret = pblk_submit_io_sync(pblk, &rqd);
839         if (ret) {
840                 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
841                 bio_put(bio);
842                 goto free_ppa_list;
843         }
844
845         atomic_dec(&pblk->inflight_io);
846
847         if (rqd.error) {
848                 if (dir == PBLK_WRITE) {
849                         pblk_log_write_err(pblk, &rqd);
850                         ret = 1;
851                 } else if (dir == PBLK_READ)
852                         pblk_log_read_err(pblk, &rqd);
853         }
854
855 free_ppa_list:
856         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
857
858         return ret;
859 }
860
861 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
862 {
863         u64 bpaddr = pblk_line_smeta_start(pblk, line);
864
865         return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
866 }
867
868 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
869                          void *emeta_buf)
870 {
871         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
872                                                 line->emeta_ssec, PBLK_READ);
873 }
874
875 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
876                             struct ppa_addr ppa)
877 {
878         rqd->opcode = NVM_OP_ERASE;
879         rqd->ppa_addr = ppa;
880         rqd->nr_ppas = 1;
881         rqd->is_seq = 1;
882         rqd->bio = NULL;
883 }
884
885 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
886 {
887         struct nvm_rq rqd = {NULL};
888         int ret;
889
890         pblk_setup_e_rq(pblk, &rqd, ppa);
891
892         /* The write thread schedules erases so that it minimizes disturbances
893          * with writes. Thus, there is no need to take the LUN semaphore.
894          */
895         ret = pblk_submit_io_sync(pblk, &rqd);
896         rqd.private = pblk;
897         __pblk_end_io_erase(pblk, &rqd);
898
899         return ret;
900 }
901
902 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
903 {
904         struct pblk_line_meta *lm = &pblk->lm;
905         struct ppa_addr ppa;
906         int ret, bit = -1;
907
908         /* Erase only good blocks, one at a time */
909         do {
910                 spin_lock(&line->lock);
911                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
912                                                                 bit + 1);
913                 if (bit >= lm->blk_per_line) {
914                         spin_unlock(&line->lock);
915                         break;
916                 }
917
918                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
919                 ppa.a.blk = line->id;
920
921                 atomic_dec(&line->left_eblks);
922                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
923                 spin_unlock(&line->lock);
924
925                 ret = pblk_blk_erase_sync(pblk, ppa);
926                 if (ret) {
927                         pblk_err(pblk, "failed to erase line %d\n", line->id);
928                         return ret;
929                 }
930         } while (1);
931
932         return 0;
933 }
934
935 static void pblk_line_setup_metadata(struct pblk_line *line,
936                                      struct pblk_line_mgmt *l_mg,
937                                      struct pblk_line_meta *lm)
938 {
939         int meta_line;
940
941         lockdep_assert_held(&l_mg->free_lock);
942
943 retry_meta:
944         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
945         if (meta_line == PBLK_DATA_LINES) {
946                 spin_unlock(&l_mg->free_lock);
947                 io_schedule();
948                 spin_lock(&l_mg->free_lock);
949                 goto retry_meta;
950         }
951
952         set_bit(meta_line, &l_mg->meta_bitmap);
953         line->meta_line = meta_line;
954
955         line->smeta = l_mg->sline_meta[meta_line];
956         line->emeta = l_mg->eline_meta[meta_line];
957
958         memset(line->smeta, 0, lm->smeta_len);
959         memset(line->emeta->buf, 0, lm->emeta_len[0]);
960
961         line->emeta->mem = 0;
962         atomic_set(&line->emeta->sync, 0);
963 }
964
965 /* For now lines are always assumed full lines. Thus, smeta former and current
966  * lun bitmaps are omitted.
967  */
968 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
969                                   struct pblk_line *cur)
970 {
971         struct nvm_tgt_dev *dev = pblk->dev;
972         struct nvm_geo *geo = &dev->geo;
973         struct pblk_line_meta *lm = &pblk->lm;
974         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
975         struct pblk_emeta *emeta = line->emeta;
976         struct line_emeta *emeta_buf = emeta->buf;
977         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
978         int nr_blk_line;
979
980         /* After erasing the line, new bad blocks might appear and we risk
981          * having an invalid line
982          */
983         nr_blk_line = lm->blk_per_line -
984                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
985         if (nr_blk_line < lm->min_blk_line) {
986                 spin_lock(&l_mg->free_lock);
987                 spin_lock(&line->lock);
988                 line->state = PBLK_LINESTATE_BAD;
989                 spin_unlock(&line->lock);
990
991                 list_add_tail(&line->list, &l_mg->bad_list);
992                 spin_unlock(&l_mg->free_lock);
993
994                 pblk_debug(pblk, "line %d is bad\n", line->id);
995
996                 return 0;
997         }
998
999         /* Run-time metadata */
1000         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1001
1002         /* Mark LUNs allocated in this line (all for now) */
1003         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1004
1005         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1006         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1007         smeta_buf->header.id = cpu_to_le32(line->id);
1008         smeta_buf->header.type = cpu_to_le16(line->type);
1009         smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1010         smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1011
1012         /* Start metadata */
1013         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1014         smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1015
1016         /* Fill metadata among lines */
1017         if (cur) {
1018                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1019                 smeta_buf->prev_id = cpu_to_le32(cur->id);
1020                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1021         } else {
1022                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1023         }
1024
1025         /* All smeta must be set at this point */
1026         smeta_buf->header.crc = cpu_to_le32(
1027                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1028         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1029
1030         /* End metadata */
1031         memcpy(&emeta_buf->header, &smeta_buf->header,
1032                                                 sizeof(struct line_header));
1033
1034         emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1035         emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1036         emeta_buf->header.crc = cpu_to_le32(
1037                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1038
1039         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1040         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1041         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1042         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1043         emeta_buf->crc = cpu_to_le32(0);
1044         emeta_buf->prev_id = smeta_buf->prev_id;
1045
1046         return 1;
1047 }
1048
1049 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1050 {
1051         struct pblk_line_meta *lm = &pblk->lm;
1052         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1053
1054         line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1055         if (!line->map_bitmap)
1056                 return -ENOMEM;
1057
1058         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1059
1060         /* will be initialized using bb info from map_bitmap */
1061         line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1062         if (!line->invalid_bitmap) {
1063                 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1064                 line->map_bitmap = NULL;
1065                 return -ENOMEM;
1066         }
1067
1068         return 0;
1069 }
1070
1071 /* For now lines are always assumed full lines. Thus, smeta former and current
1072  * lun bitmaps are omitted.
1073  */
1074 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1075                              int init)
1076 {
1077         struct nvm_tgt_dev *dev = pblk->dev;
1078         struct nvm_geo *geo = &dev->geo;
1079         struct pblk_line_meta *lm = &pblk->lm;
1080         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1081         u64 off;
1082         int bit = -1;
1083         int emeta_secs;
1084
1085         line->sec_in_line = lm->sec_per_line;
1086
1087         /* Capture bad block information on line mapping bitmaps */
1088         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1089                                         bit + 1)) < lm->blk_per_line) {
1090                 off = bit * geo->ws_opt;
1091                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1092                                                         lm->sec_per_line);
1093                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1094                                                         lm->sec_per_line);
1095                 line->sec_in_line -= geo->clba;
1096         }
1097
1098         /* Mark smeta metadata sectors as bad sectors */
1099         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1100         off = bit * geo->ws_opt;
1101         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1102         line->sec_in_line -= lm->smeta_sec;
1103         line->smeta_ssec = off;
1104         line->cur_sec = off + lm->smeta_sec;
1105
1106         if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1107                 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1108                 return 0;
1109         }
1110
1111         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1112
1113         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1114          * blocks to make sure that there are enough sectors to store emeta
1115          */
1116         emeta_secs = lm->emeta_sec[0];
1117         off = lm->sec_per_line;
1118         while (emeta_secs) {
1119                 off -= geo->ws_opt;
1120                 if (!test_bit(off, line->invalid_bitmap)) {
1121                         bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1122                         emeta_secs -= geo->ws_opt;
1123                 }
1124         }
1125
1126         line->emeta_ssec = off;
1127         line->sec_in_line -= lm->emeta_sec[0];
1128         line->nr_valid_lbas = 0;
1129         line->left_msecs = line->sec_in_line;
1130         *line->vsc = cpu_to_le32(line->sec_in_line);
1131
1132         if (lm->sec_per_line - line->sec_in_line !=
1133                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1134                 spin_lock(&line->lock);
1135                 line->state = PBLK_LINESTATE_BAD;
1136                 spin_unlock(&line->lock);
1137
1138                 list_add_tail(&line->list, &l_mg->bad_list);
1139                 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1140
1141                 return 0;
1142         }
1143
1144         return 1;
1145 }
1146
1147 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1148 {
1149         struct pblk_line_meta *lm = &pblk->lm;
1150         struct nvm_tgt_dev *dev = pblk->dev;
1151         struct nvm_geo *geo = &dev->geo;
1152         int blk_to_erase = atomic_read(&line->blk_in_line);
1153         int i;
1154
1155         for (i = 0; i < lm->blk_per_line; i++) {
1156                 struct pblk_lun *rlun = &pblk->luns[i];
1157                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1158                 int state = line->chks[pos].state;
1159
1160                 /* Free chunks should not be erased */
1161                 if (state & NVM_CHK_ST_FREE) {
1162                         set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1163                                                         line->erase_bitmap);
1164                         blk_to_erase--;
1165                 }
1166         }
1167
1168         return blk_to_erase;
1169 }
1170
1171 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1172 {
1173         struct pblk_line_meta *lm = &pblk->lm;
1174         int blk_in_line = atomic_read(&line->blk_in_line);
1175         int blk_to_erase;
1176
1177         /* Bad blocks do not need to be erased */
1178         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1179
1180         spin_lock(&line->lock);
1181
1182         /* If we have not written to this line, we need to mark up free chunks
1183          * as already erased
1184          */
1185         if (line->state == PBLK_LINESTATE_NEW) {
1186                 blk_to_erase = pblk_prepare_new_line(pblk, line);
1187                 line->state = PBLK_LINESTATE_FREE;
1188         } else {
1189                 blk_to_erase = blk_in_line;
1190         }
1191
1192         if (blk_in_line < lm->min_blk_line) {
1193                 spin_unlock(&line->lock);
1194                 return -EAGAIN;
1195         }
1196
1197         if (line->state != PBLK_LINESTATE_FREE) {
1198                 WARN(1, "pblk: corrupted line %d, state %d\n",
1199                                                         line->id, line->state);
1200                 spin_unlock(&line->lock);
1201                 return -EINTR;
1202         }
1203
1204         line->state = PBLK_LINESTATE_OPEN;
1205
1206         atomic_set(&line->left_eblks, blk_to_erase);
1207         atomic_set(&line->left_seblks, blk_to_erase);
1208
1209         line->meta_distance = lm->meta_distance;
1210         spin_unlock(&line->lock);
1211
1212         kref_init(&line->ref);
1213
1214         return 0;
1215 }
1216
1217 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1218 {
1219         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1220         int ret;
1221
1222         spin_lock(&l_mg->free_lock);
1223         l_mg->data_line = line;
1224         list_del(&line->list);
1225
1226         ret = pblk_line_prepare(pblk, line);
1227         if (ret) {
1228                 list_add(&line->list, &l_mg->free_list);
1229                 spin_unlock(&l_mg->free_lock);
1230                 return ret;
1231         }
1232         spin_unlock(&l_mg->free_lock);
1233
1234         ret = pblk_line_alloc_bitmaps(pblk, line);
1235         if (ret)
1236                 return ret;
1237
1238         if (!pblk_line_init_bb(pblk, line, 0)) {
1239                 list_add(&line->list, &l_mg->free_list);
1240                 return -EINTR;
1241         }
1242
1243         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1244         return 0;
1245 }
1246
1247 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1248 {
1249         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1250
1251         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1252         line->map_bitmap = NULL;
1253         line->smeta = NULL;
1254         line->emeta = NULL;
1255 }
1256
1257 static void pblk_line_reinit(struct pblk_line *line)
1258 {
1259         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1260
1261         line->map_bitmap = NULL;
1262         line->invalid_bitmap = NULL;
1263         line->smeta = NULL;
1264         line->emeta = NULL;
1265 }
1266
1267 void pblk_line_free(struct pblk_line *line)
1268 {
1269         struct pblk *pblk = line->pblk;
1270         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1271
1272         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1273         mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1274
1275         pblk_line_reinit(line);
1276 }
1277
1278 struct pblk_line *pblk_line_get(struct pblk *pblk)
1279 {
1280         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1281         struct pblk_line_meta *lm = &pblk->lm;
1282         struct pblk_line *line;
1283         int ret, bit;
1284
1285         lockdep_assert_held(&l_mg->free_lock);
1286
1287 retry:
1288         if (list_empty(&l_mg->free_list)) {
1289                 pblk_err(pblk, "no free lines\n");
1290                 return NULL;
1291         }
1292
1293         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1294         list_del(&line->list);
1295         l_mg->nr_free_lines--;
1296
1297         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1298         if (unlikely(bit >= lm->blk_per_line)) {
1299                 spin_lock(&line->lock);
1300                 line->state = PBLK_LINESTATE_BAD;
1301                 spin_unlock(&line->lock);
1302
1303                 list_add_tail(&line->list, &l_mg->bad_list);
1304
1305                 pblk_debug(pblk, "line %d is bad\n", line->id);
1306                 goto retry;
1307         }
1308
1309         ret = pblk_line_prepare(pblk, line);
1310         if (ret) {
1311                 switch (ret) {
1312                 case -EAGAIN:
1313                         list_add(&line->list, &l_mg->bad_list);
1314                         goto retry;
1315                 case -EINTR:
1316                         list_add(&line->list, &l_mg->corrupt_list);
1317                         goto retry;
1318                 default:
1319                         pblk_err(pblk, "failed to prepare line %d\n", line->id);
1320                         list_add(&line->list, &l_mg->free_list);
1321                         l_mg->nr_free_lines++;
1322                         return NULL;
1323                 }
1324         }
1325
1326         return line;
1327 }
1328
1329 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1330                                          struct pblk_line *line)
1331 {
1332         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1333         struct pblk_line *retry_line;
1334
1335 retry:
1336         spin_lock(&l_mg->free_lock);
1337         retry_line = pblk_line_get(pblk);
1338         if (!retry_line) {
1339                 l_mg->data_line = NULL;
1340                 spin_unlock(&l_mg->free_lock);
1341                 return NULL;
1342         }
1343
1344         retry_line->map_bitmap = line->map_bitmap;
1345         retry_line->invalid_bitmap = line->invalid_bitmap;
1346         retry_line->smeta = line->smeta;
1347         retry_line->emeta = line->emeta;
1348         retry_line->meta_line = line->meta_line;
1349
1350         pblk_line_reinit(line);
1351
1352         l_mg->data_line = retry_line;
1353         spin_unlock(&l_mg->free_lock);
1354
1355         pblk_rl_free_lines_dec(&pblk->rl, line, false);
1356
1357         if (pblk_line_erase(pblk, retry_line))
1358                 goto retry;
1359
1360         return retry_line;
1361 }
1362
1363 static void pblk_set_space_limit(struct pblk *pblk)
1364 {
1365         struct pblk_rl *rl = &pblk->rl;
1366
1367         atomic_set(&rl->rb_space, 0);
1368 }
1369
1370 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1371 {
1372         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1373         struct pblk_line *line;
1374
1375         spin_lock(&l_mg->free_lock);
1376         line = pblk_line_get(pblk);
1377         if (!line) {
1378                 spin_unlock(&l_mg->free_lock);
1379                 return NULL;
1380         }
1381
1382         line->seq_nr = l_mg->d_seq_nr++;
1383         line->type = PBLK_LINETYPE_DATA;
1384         l_mg->data_line = line;
1385
1386         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1387
1388         /* Allocate next line for preparation */
1389         l_mg->data_next = pblk_line_get(pblk);
1390         if (!l_mg->data_next) {
1391                 /* If we cannot get a new line, we need to stop the pipeline.
1392                  * Only allow as many writes in as we can store safely and then
1393                  * fail gracefully
1394                  */
1395                 pblk_set_space_limit(pblk);
1396
1397                 l_mg->data_next = NULL;
1398         } else {
1399                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1400                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1401         }
1402         spin_unlock(&l_mg->free_lock);
1403
1404         if (pblk_line_alloc_bitmaps(pblk, line))
1405                 return NULL;
1406
1407         if (pblk_line_erase(pblk, line)) {
1408                 line = pblk_line_retry(pblk, line);
1409                 if (!line)
1410                         return NULL;
1411         }
1412
1413 retry_setup:
1414         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1415                 line = pblk_line_retry(pblk, line);
1416                 if (!line)
1417                         return NULL;
1418
1419                 goto retry_setup;
1420         }
1421
1422         if (!pblk_line_init_bb(pblk, line, 1)) {
1423                 line = pblk_line_retry(pblk, line);
1424                 if (!line)
1425                         return NULL;
1426
1427                 goto retry_setup;
1428         }
1429
1430         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1431
1432         return line;
1433 }
1434
1435 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1436 {
1437         struct pblk_line *line;
1438
1439         line = pblk_ppa_to_line(pblk, ppa);
1440         kref_put(&line->ref, pblk_line_put_wq);
1441 }
1442
1443 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1444 {
1445         struct ppa_addr *ppa_list;
1446         int i;
1447
1448         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1449
1450         for (i = 0; i < rqd->nr_ppas; i++)
1451                 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1452 }
1453
1454 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1455 {
1456         lockdep_assert_held(&pblk->l_mg.free_lock);
1457
1458         pblk_set_space_limit(pblk);
1459         pblk->state = PBLK_STATE_STOPPING;
1460 }
1461
1462 static void pblk_line_close_meta_sync(struct pblk *pblk)
1463 {
1464         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1465         struct pblk_line_meta *lm = &pblk->lm;
1466         struct pblk_line *line, *tline;
1467         LIST_HEAD(list);
1468
1469         spin_lock(&l_mg->close_lock);
1470         if (list_empty(&l_mg->emeta_list)) {
1471                 spin_unlock(&l_mg->close_lock);
1472                 return;
1473         }
1474
1475         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1476         spin_unlock(&l_mg->close_lock);
1477
1478         list_for_each_entry_safe(line, tline, &list, list) {
1479                 struct pblk_emeta *emeta = line->emeta;
1480
1481                 while (emeta->mem < lm->emeta_len[0]) {
1482                         int ret;
1483
1484                         ret = pblk_submit_meta_io(pblk, line);
1485                         if (ret) {
1486                                 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1487                                                         line->id, ret);
1488                                 return;
1489                         }
1490                 }
1491         }
1492
1493         pblk_wait_for_meta(pblk);
1494         flush_workqueue(pblk->close_wq);
1495 }
1496
1497 void __pblk_pipeline_flush(struct pblk *pblk)
1498 {
1499         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1500         int ret;
1501
1502         spin_lock(&l_mg->free_lock);
1503         if (pblk->state == PBLK_STATE_RECOVERING ||
1504                                         pblk->state == PBLK_STATE_STOPPED) {
1505                 spin_unlock(&l_mg->free_lock);
1506                 return;
1507         }
1508         pblk->state = PBLK_STATE_RECOVERING;
1509         spin_unlock(&l_mg->free_lock);
1510
1511         pblk_flush_writer(pblk);
1512         pblk_wait_for_meta(pblk);
1513
1514         ret = pblk_recov_pad(pblk);
1515         if (ret) {
1516                 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1517                 return;
1518         }
1519
1520         flush_workqueue(pblk->bb_wq);
1521         pblk_line_close_meta_sync(pblk);
1522 }
1523
1524 void __pblk_pipeline_stop(struct pblk *pblk)
1525 {
1526         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1527
1528         spin_lock(&l_mg->free_lock);
1529         pblk->state = PBLK_STATE_STOPPED;
1530         l_mg->data_line = NULL;
1531         l_mg->data_next = NULL;
1532         spin_unlock(&l_mg->free_lock);
1533 }
1534
1535 void pblk_pipeline_stop(struct pblk *pblk)
1536 {
1537         __pblk_pipeline_flush(pblk);
1538         __pblk_pipeline_stop(pblk);
1539 }
1540
1541 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1542 {
1543         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1544         struct pblk_line *cur, *new = NULL;
1545         unsigned int left_seblks;
1546
1547         cur = l_mg->data_line;
1548         new = l_mg->data_next;
1549         if (!new)
1550                 goto out;
1551         l_mg->data_line = new;
1552
1553         spin_lock(&l_mg->free_lock);
1554         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1555         spin_unlock(&l_mg->free_lock);
1556
1557 retry_erase:
1558         left_seblks = atomic_read(&new->left_seblks);
1559         if (left_seblks) {
1560                 /* If line is not fully erased, erase it */
1561                 if (atomic_read(&new->left_eblks)) {
1562                         if (pblk_line_erase(pblk, new))
1563                                 goto out;
1564                 } else {
1565                         io_schedule();
1566                 }
1567                 goto retry_erase;
1568         }
1569
1570         if (pblk_line_alloc_bitmaps(pblk, new))
1571                 return NULL;
1572
1573 retry_setup:
1574         if (!pblk_line_init_metadata(pblk, new, cur)) {
1575                 new = pblk_line_retry(pblk, new);
1576                 if (!new)
1577                         goto out;
1578
1579                 goto retry_setup;
1580         }
1581
1582         if (!pblk_line_init_bb(pblk, new, 1)) {
1583                 new = pblk_line_retry(pblk, new);
1584                 if (!new)
1585                         goto out;
1586
1587                 goto retry_setup;
1588         }
1589
1590         pblk_rl_free_lines_dec(&pblk->rl, new, true);
1591
1592         /* Allocate next line for preparation */
1593         spin_lock(&l_mg->free_lock);
1594         l_mg->data_next = pblk_line_get(pblk);
1595         if (!l_mg->data_next) {
1596                 /* If we cannot get a new line, we need to stop the pipeline.
1597                  * Only allow as many writes in as we can store safely and then
1598                  * fail gracefully
1599                  */
1600                 pblk_stop_writes(pblk, new);
1601                 l_mg->data_next = NULL;
1602         } else {
1603                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1604                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1605         }
1606         spin_unlock(&l_mg->free_lock);
1607
1608 out:
1609         return new;
1610 }
1611
1612 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1613 {
1614         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1615         struct pblk_gc *gc = &pblk->gc;
1616
1617         spin_lock(&line->lock);
1618         WARN_ON(line->state != PBLK_LINESTATE_GC);
1619         line->state = PBLK_LINESTATE_FREE;
1620         line->gc_group = PBLK_LINEGC_NONE;
1621         pblk_line_free(line);
1622
1623         if (line->w_err_gc->has_write_err) {
1624                 pblk_rl_werr_line_out(&pblk->rl);
1625                 line->w_err_gc->has_write_err = 0;
1626         }
1627
1628         spin_unlock(&line->lock);
1629         atomic_dec(&gc->pipeline_gc);
1630
1631         spin_lock(&l_mg->free_lock);
1632         list_add_tail(&line->list, &l_mg->free_list);
1633         l_mg->nr_free_lines++;
1634         spin_unlock(&l_mg->free_lock);
1635
1636         pblk_rl_free_lines_inc(&pblk->rl, line);
1637 }
1638
1639 static void pblk_line_put_ws(struct work_struct *work)
1640 {
1641         struct pblk_line_ws *line_put_ws = container_of(work,
1642                                                 struct pblk_line_ws, ws);
1643         struct pblk *pblk = line_put_ws->pblk;
1644         struct pblk_line *line = line_put_ws->line;
1645
1646         __pblk_line_put(pblk, line);
1647         mempool_free(line_put_ws, &pblk->gen_ws_pool);
1648 }
1649
1650 void pblk_line_put(struct kref *ref)
1651 {
1652         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1653         struct pblk *pblk = line->pblk;
1654
1655         __pblk_line_put(pblk, line);
1656 }
1657
1658 void pblk_line_put_wq(struct kref *ref)
1659 {
1660         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1661         struct pblk *pblk = line->pblk;
1662         struct pblk_line_ws *line_put_ws;
1663
1664         line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1665         if (!line_put_ws)
1666                 return;
1667
1668         line_put_ws->pblk = pblk;
1669         line_put_ws->line = line;
1670         line_put_ws->priv = NULL;
1671
1672         INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1673         queue_work(pblk->r_end_wq, &line_put_ws->ws);
1674 }
1675
1676 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1677 {
1678         struct nvm_rq *rqd;
1679         int err;
1680
1681         rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1682
1683         pblk_setup_e_rq(pblk, rqd, ppa);
1684
1685         rqd->end_io = pblk_end_io_erase;
1686         rqd->private = pblk;
1687
1688         /* The write thread schedules erases so that it minimizes disturbances
1689          * with writes. Thus, there is no need to take the LUN semaphore.
1690          */
1691         err = pblk_submit_io(pblk, rqd);
1692         if (err) {
1693                 struct nvm_tgt_dev *dev = pblk->dev;
1694                 struct nvm_geo *geo = &dev->geo;
1695
1696                 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1697                                         pblk_ppa_to_line_id(ppa),
1698                                         pblk_ppa_to_pos(geo, ppa));
1699         }
1700
1701         return err;
1702 }
1703
1704 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1705 {
1706         return pblk->l_mg.data_line;
1707 }
1708
1709 /* For now, always erase next line */
1710 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1711 {
1712         return pblk->l_mg.data_next;
1713 }
1714
1715 int pblk_line_is_full(struct pblk_line *line)
1716 {
1717         return (line->left_msecs == 0);
1718 }
1719
1720 static void pblk_line_should_sync_meta(struct pblk *pblk)
1721 {
1722         if (pblk_rl_is_limit(&pblk->rl))
1723                 pblk_line_close_meta_sync(pblk);
1724 }
1725
1726 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1727 {
1728         struct nvm_tgt_dev *dev = pblk->dev;
1729         struct nvm_geo *geo = &dev->geo;
1730         struct pblk_line_meta *lm = &pblk->lm;
1731         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1732         struct list_head *move_list;
1733         int i;
1734
1735 #ifdef CONFIG_NVM_PBLK_DEBUG
1736         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1737                                 "pblk: corrupt closed line %d\n", line->id);
1738 #endif
1739
1740         spin_lock(&l_mg->free_lock);
1741         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1742         spin_unlock(&l_mg->free_lock);
1743
1744         spin_lock(&l_mg->gc_lock);
1745         spin_lock(&line->lock);
1746         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1747         line->state = PBLK_LINESTATE_CLOSED;
1748         move_list = pblk_line_gc_list(pblk, line);
1749
1750         list_add_tail(&line->list, move_list);
1751
1752         mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1753         line->map_bitmap = NULL;
1754         line->smeta = NULL;
1755         line->emeta = NULL;
1756
1757         for (i = 0; i < lm->blk_per_line; i++) {
1758                 struct pblk_lun *rlun = &pblk->luns[i];
1759                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1760                 int state = line->chks[pos].state;
1761
1762                 if (!(state & NVM_CHK_ST_OFFLINE))
1763                         state = NVM_CHK_ST_CLOSED;
1764         }
1765
1766         spin_unlock(&line->lock);
1767         spin_unlock(&l_mg->gc_lock);
1768 }
1769
1770 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1771 {
1772         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1773         struct pblk_line_meta *lm = &pblk->lm;
1774         struct pblk_emeta *emeta = line->emeta;
1775         struct line_emeta *emeta_buf = emeta->buf;
1776         struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1777
1778         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1779         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1780         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1781
1782         wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1783         wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1784         wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1785
1786         if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1787                 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1788                 memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1789                 emeta_buf->header.id = cpu_to_le32(line->id);
1790                 emeta_buf->header.type = cpu_to_le16(line->type);
1791                 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1792                 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1793                 emeta_buf->header.crc = cpu_to_le32(
1794                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1795         }
1796
1797         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1798         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1799
1800         spin_lock(&l_mg->close_lock);
1801         spin_lock(&line->lock);
1802
1803         /* Update the in-memory start address for emeta, in case it has
1804          * shifted due to write errors
1805          */
1806         if (line->emeta_ssec != line->cur_sec)
1807                 line->emeta_ssec = line->cur_sec;
1808
1809         list_add_tail(&line->list, &l_mg->emeta_list);
1810         spin_unlock(&line->lock);
1811         spin_unlock(&l_mg->close_lock);
1812
1813         pblk_line_should_sync_meta(pblk);
1814 }
1815
1816 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1817 {
1818         struct pblk_line_meta *lm = &pblk->lm;
1819         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1820         unsigned int lba_list_size = lm->emeta_len[2];
1821         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1822         struct pblk_emeta *emeta = line->emeta;
1823
1824         w_err_gc->lba_list = pblk_malloc(lba_list_size,
1825                                          l_mg->emeta_alloc_type, GFP_KERNEL);
1826         memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1827                                 lba_list_size);
1828 }
1829
1830 void pblk_line_close_ws(struct work_struct *work)
1831 {
1832         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1833                                                                         ws);
1834         struct pblk *pblk = line_ws->pblk;
1835         struct pblk_line *line = line_ws->line;
1836         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1837
1838         /* Write errors makes the emeta start address stored in smeta invalid,
1839          * so keep a copy of the lba list until we've gc'd the line
1840          */
1841         if (w_err_gc->has_write_err)
1842                 pblk_save_lba_list(pblk, line);
1843
1844         pblk_line_close(pblk, line);
1845         mempool_free(line_ws, &pblk->gen_ws_pool);
1846 }
1847
1848 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1849                       void (*work)(struct work_struct *), gfp_t gfp_mask,
1850                       struct workqueue_struct *wq)
1851 {
1852         struct pblk_line_ws *line_ws;
1853
1854         line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1855
1856         line_ws->pblk = pblk;
1857         line_ws->line = line;
1858         line_ws->priv = priv;
1859
1860         INIT_WORK(&line_ws->ws, work);
1861         queue_work(wq, &line_ws->ws);
1862 }
1863
1864 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1865 {
1866         struct pblk_lun *rlun = &pblk->luns[pos];
1867         int ret;
1868
1869         /*
1870          * Only send one inflight I/O per LUN. Since we map at a page
1871          * granurality, all ppas in the I/O will map to the same LUN
1872          */
1873
1874         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1875         if (ret == -ETIME || ret == -EINTR)
1876                 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1877                                 -ret);
1878 }
1879
1880 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1881 {
1882         struct nvm_tgt_dev *dev = pblk->dev;
1883         struct nvm_geo *geo = &dev->geo;
1884         int pos = pblk_ppa_to_pos(geo, ppa);
1885
1886         __pblk_down_chunk(pblk, pos);
1887 }
1888
1889 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1890                   unsigned long *lun_bitmap)
1891 {
1892         struct nvm_tgt_dev *dev = pblk->dev;
1893         struct nvm_geo *geo = &dev->geo;
1894         int pos = pblk_ppa_to_pos(geo, ppa);
1895
1896         /* If the LUN has been locked for this same request, do no attempt to
1897          * lock it again
1898          */
1899         if (test_and_set_bit(pos, lun_bitmap))
1900                 return;
1901
1902         __pblk_down_chunk(pblk, pos);
1903 }
1904
1905 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1906 {
1907         struct nvm_tgt_dev *dev = pblk->dev;
1908         struct nvm_geo *geo = &dev->geo;
1909         struct pblk_lun *rlun;
1910         int pos = pblk_ppa_to_pos(geo, ppa);
1911
1912         rlun = &pblk->luns[pos];
1913         up(&rlun->wr_sem);
1914 }
1915
1916 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
1917 {
1918         struct nvm_tgt_dev *dev = pblk->dev;
1919         struct nvm_geo *geo = &dev->geo;
1920         struct pblk_lun *rlun;
1921         int num_lun = geo->all_luns;
1922         int bit = -1;
1923
1924         while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1925                 rlun = &pblk->luns[bit];
1926                 up(&rlun->wr_sem);
1927         }
1928 }
1929
1930 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1931 {
1932         struct ppa_addr ppa_l2p;
1933
1934         /* logic error: lba out-of-bounds. Ignore update */
1935         if (!(lba < pblk->rl.nr_secs)) {
1936                 WARN(1, "pblk: corrupted L2P map request\n");
1937                 return;
1938         }
1939
1940         spin_lock(&pblk->trans_lock);
1941         ppa_l2p = pblk_trans_map_get(pblk, lba);
1942
1943         if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1944                 pblk_map_invalidate(pblk, ppa_l2p);
1945
1946         pblk_trans_map_set(pblk, lba, ppa);
1947         spin_unlock(&pblk->trans_lock);
1948 }
1949
1950 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1951 {
1952
1953 #ifdef CONFIG_NVM_PBLK_DEBUG
1954         /* Callers must ensure that the ppa points to a cache address */
1955         BUG_ON(!pblk_addr_in_cache(ppa));
1956         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1957 #endif
1958
1959         pblk_update_map(pblk, lba, ppa);
1960 }
1961
1962 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1963                        struct pblk_line *gc_line, u64 paddr_gc)
1964 {
1965         struct ppa_addr ppa_l2p, ppa_gc;
1966         int ret = 1;
1967
1968 #ifdef CONFIG_NVM_PBLK_DEBUG
1969         /* Callers must ensure that the ppa points to a cache address */
1970         BUG_ON(!pblk_addr_in_cache(ppa_new));
1971         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1972 #endif
1973
1974         /* logic error: lba out-of-bounds. Ignore update */
1975         if (!(lba < pblk->rl.nr_secs)) {
1976                 WARN(1, "pblk: corrupted L2P map request\n");
1977                 return 0;
1978         }
1979
1980         spin_lock(&pblk->trans_lock);
1981         ppa_l2p = pblk_trans_map_get(pblk, lba);
1982         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1983
1984         if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1985                 spin_lock(&gc_line->lock);
1986                 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1987                                                 "pblk: corrupted GC update");
1988                 spin_unlock(&gc_line->lock);
1989
1990                 ret = 0;
1991                 goto out;
1992         }
1993
1994         pblk_trans_map_set(pblk, lba, ppa_new);
1995 out:
1996         spin_unlock(&pblk->trans_lock);
1997         return ret;
1998 }
1999
2000 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2001                          struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2002 {
2003         struct ppa_addr ppa_l2p;
2004
2005 #ifdef CONFIG_NVM_PBLK_DEBUG
2006         /* Callers must ensure that the ppa points to a device address */
2007         BUG_ON(pblk_addr_in_cache(ppa_mapped));
2008 #endif
2009         /* Invalidate and discard padded entries */
2010         if (lba == ADDR_EMPTY) {
2011                 atomic64_inc(&pblk->pad_wa);
2012 #ifdef CONFIG_NVM_PBLK_DEBUG
2013                 atomic_long_inc(&pblk->padded_wb);
2014 #endif
2015                 if (!pblk_ppa_empty(ppa_mapped))
2016                         pblk_map_invalidate(pblk, ppa_mapped);
2017                 return;
2018         }
2019
2020         /* logic error: lba out-of-bounds. Ignore update */
2021         if (!(lba < pblk->rl.nr_secs)) {
2022                 WARN(1, "pblk: corrupted L2P map request\n");
2023                 return;
2024         }
2025
2026         spin_lock(&pblk->trans_lock);
2027         ppa_l2p = pblk_trans_map_get(pblk, lba);
2028
2029         /* Do not update L2P if the cacheline has been updated. In this case,
2030          * the mapped ppa must be invalidated
2031          */
2032         if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2033                 if (!pblk_ppa_empty(ppa_mapped))
2034                         pblk_map_invalidate(pblk, ppa_mapped);
2035                 goto out;
2036         }
2037
2038 #ifdef CONFIG_NVM_PBLK_DEBUG
2039         WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2040 #endif
2041
2042         pblk_trans_map_set(pblk, lba, ppa_mapped);
2043 out:
2044         spin_unlock(&pblk->trans_lock);
2045 }
2046
2047 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2048                          sector_t blba, int nr_secs)
2049 {
2050         int i;
2051
2052         spin_lock(&pblk->trans_lock);
2053         for (i = 0; i < nr_secs; i++) {
2054                 struct ppa_addr ppa;
2055
2056                 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2057
2058                 /* If the L2P entry maps to a line, the reference is valid */
2059                 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2060                         struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2061
2062                         kref_get(&line->ref);
2063                 }
2064         }
2065         spin_unlock(&pblk->trans_lock);
2066 }
2067
2068 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2069                           u64 *lba_list, int nr_secs)
2070 {
2071         u64 lba;
2072         int i;
2073
2074         spin_lock(&pblk->trans_lock);
2075         for (i = 0; i < nr_secs; i++) {
2076                 lba = lba_list[i];
2077                 if (lba != ADDR_EMPTY) {
2078                         /* logic error: lba out-of-bounds. Ignore update */
2079                         if (!(lba < pblk->rl.nr_secs)) {
2080                                 WARN(1, "pblk: corrupted L2P map request\n");
2081                                 continue;
2082                         }
2083                         ppas[i] = pblk_trans_map_get(pblk, lba);
2084                 }
2085         }
2086         spin_unlock(&pblk->trans_lock);
2087 }