]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
lightnvm: pblk: use vmalloc for GC data buffer
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
22                          struct ppa_addr *ppa)
23 {
24         struct nvm_tgt_dev *dev = pblk->dev;
25         struct nvm_geo *geo = &dev->geo;
26         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
27
28         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29         atomic_long_inc(&pblk->erase_failed);
30
31         atomic_dec(&line->blk_in_line);
32         if (test_and_set_bit(pos, line->blk_bitmap))
33                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
34                                                         line->id, pos);
35
36         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
37 }
38
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
40 {
41         struct pblk_line *line;
42
43         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44         atomic_dec(&line->left_seblks);
45
46         if (rqd->error) {
47                 struct ppa_addr *ppa;
48
49                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
50                 if (!ppa)
51                         return;
52
53                 *ppa = rqd->ppa_addr;
54                 pblk_mark_bb(pblk, line, ppa);
55         }
56
57         atomic_dec(&pblk->inflight_io);
58 }
59
60 /* Erase completion assumes that only one block is erased at the time */
61 static void pblk_end_io_erase(struct nvm_rq *rqd)
62 {
63         struct pblk *pblk = rqd->private;
64
65         __pblk_end_io_erase(pblk, rqd);
66         mempool_free(rqd, pblk->g_rq_pool);
67 }
68
69 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
70                            u64 paddr)
71 {
72         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73         struct list_head *move_list = NULL;
74
75         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76          * table is modified with reclaimed sectors, a check is done to endure
77          * that newer updates are not overwritten.
78          */
79         spin_lock(&line->lock);
80         if (line->state == PBLK_LINESTATE_GC ||
81                                         line->state == PBLK_LINESTATE_FREE) {
82                 spin_unlock(&line->lock);
83                 return;
84         }
85
86         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87                 WARN_ONCE(1, "pblk: double invalidate\n");
88                 spin_unlock(&line->lock);
89                 return;
90         }
91         le32_add_cpu(line->vsc, -1);
92
93         if (line->state == PBLK_LINESTATE_CLOSED)
94                 move_list = pblk_line_gc_list(pblk, line);
95         spin_unlock(&line->lock);
96
97         if (move_list) {
98                 spin_lock(&l_mg->gc_lock);
99                 spin_lock(&line->lock);
100                 /* Prevent moving a line that has just been chosen for GC */
101                 if (line->state == PBLK_LINESTATE_GC ||
102                                         line->state == PBLK_LINESTATE_FREE) {
103                         spin_unlock(&line->lock);
104                         spin_unlock(&l_mg->gc_lock);
105                         return;
106                 }
107                 spin_unlock(&line->lock);
108
109                 list_move_tail(&line->list, move_list);
110                 spin_unlock(&l_mg->gc_lock);
111         }
112 }
113
114 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115 {
116         struct pblk_line *line;
117         u64 paddr;
118         int line_id;
119
120 #ifdef CONFIG_NVM_DEBUG
121         /* Callers must ensure that the ppa points to a device address */
122         BUG_ON(pblk_addr_in_cache(ppa));
123         BUG_ON(pblk_ppa_empty(ppa));
124 #endif
125
126         line_id = pblk_tgt_ppa_to_line(ppa);
127         line = &pblk->lines[line_id];
128         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129
130         __pblk_map_invalidate(pblk, line, paddr);
131 }
132
133 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
134                                   unsigned int nr_secs)
135 {
136         sector_t lba;
137
138         spin_lock(&pblk->trans_lock);
139         for (lba = slba; lba < slba + nr_secs; lba++) {
140                 struct ppa_addr ppa;
141
142                 ppa = pblk_trans_map_get(pblk, lba);
143
144                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
145                         pblk_map_invalidate(pblk, ppa);
146
147                 pblk_ppa_set_empty(&ppa);
148                 pblk_trans_map_set(pblk, lba, ppa);
149         }
150         spin_unlock(&pblk->trans_lock);
151 }
152
153 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
154 {
155         mempool_t *pool;
156         struct nvm_rq *rqd;
157         int rq_size;
158
159         if (rw == WRITE) {
160                 pool = pblk->w_rq_pool;
161                 rq_size = pblk_w_rq_size;
162         } else {
163                 pool = pblk->g_rq_pool;
164                 rq_size = pblk_g_rq_size;
165         }
166
167         rqd = mempool_alloc(pool, GFP_KERNEL);
168         memset(rqd, 0, rq_size);
169
170         return rqd;
171 }
172
173 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
174 {
175         mempool_t *pool;
176
177         if (rw == WRITE)
178                 pool = pblk->w_rq_pool;
179         else
180                 pool = pblk->g_rq_pool;
181
182         mempool_free(rqd, pool);
183 }
184
185 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
186                          int nr_pages)
187 {
188         struct bio_vec bv;
189         int i;
190
191         WARN_ON(off + nr_pages != bio->bi_vcnt);
192
193         bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
194         for (i = off; i < nr_pages + off; i++) {
195                 bv = bio->bi_io_vec[i];
196                 mempool_free(bv.bv_page, pblk->page_pool);
197         }
198 }
199
200 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
201                        int nr_pages)
202 {
203         struct request_queue *q = pblk->dev->q;
204         struct page *page;
205         int i, ret;
206
207         for (i = 0; i < nr_pages; i++) {
208                 page = mempool_alloc(pblk->page_pool, flags);
209                 if (!page)
210                         goto err;
211
212                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
213                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
214                         pr_err("pblk: could not add page to bio\n");
215                         mempool_free(page, pblk->page_pool);
216                         goto err;
217                 }
218         }
219
220         return 0;
221 err:
222         pblk_bio_free_pages(pblk, bio, 0, i - 1);
223         return -1;
224 }
225
226 static void pblk_write_kick(struct pblk *pblk)
227 {
228         wake_up_process(pblk->writer_ts);
229         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
230 }
231
232 void pblk_write_timer_fn(unsigned long data)
233 {
234         struct pblk *pblk = (struct pblk *)data;
235
236         /* kick the write thread every tick to flush outstanding data */
237         pblk_write_kick(pblk);
238 }
239
240 void pblk_write_should_kick(struct pblk *pblk)
241 {
242         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
243
244         if (secs_avail >= pblk->min_write_pgs)
245                 pblk_write_kick(pblk);
246 }
247
248 void pblk_end_bio_sync(struct bio *bio)
249 {
250         struct completion *waiting = bio->bi_private;
251
252         complete(waiting);
253 }
254
255 void pblk_end_io_sync(struct nvm_rq *rqd)
256 {
257         struct completion *waiting = rqd->private;
258
259         complete(waiting);
260 }
261
262 void pblk_wait_for_meta(struct pblk *pblk)
263 {
264         do {
265                 if (!atomic_read(&pblk->inflight_io))
266                         break;
267
268                 schedule();
269         } while (1);
270 }
271
272 static void pblk_flush_writer(struct pblk *pblk)
273 {
274         pblk_rb_flush(&pblk->rwb);
275         do {
276                 if (!pblk_rb_read_count(&pblk->rwb))
277                         break;
278
279                 schedule();
280         } while (1);
281 }
282
283 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
284 {
285         struct pblk_line_meta *lm = &pblk->lm;
286         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
287         struct list_head *move_list = NULL;
288         int vsc = le32_to_cpu(*line->vsc);
289
290         lockdep_assert_held(&line->lock);
291
292         if (!vsc) {
293                 if (line->gc_group != PBLK_LINEGC_FULL) {
294                         line->gc_group = PBLK_LINEGC_FULL;
295                         move_list = &l_mg->gc_full_list;
296                 }
297         } else if (vsc < lm->high_thrs) {
298                 if (line->gc_group != PBLK_LINEGC_HIGH) {
299                         line->gc_group = PBLK_LINEGC_HIGH;
300                         move_list = &l_mg->gc_high_list;
301                 }
302         } else if (vsc < lm->mid_thrs) {
303                 if (line->gc_group != PBLK_LINEGC_MID) {
304                         line->gc_group = PBLK_LINEGC_MID;
305                         move_list = &l_mg->gc_mid_list;
306                 }
307         } else if (vsc < line->sec_in_line) {
308                 if (line->gc_group != PBLK_LINEGC_LOW) {
309                         line->gc_group = PBLK_LINEGC_LOW;
310                         move_list = &l_mg->gc_low_list;
311                 }
312         } else if (vsc == line->sec_in_line) {
313                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
314                         line->gc_group = PBLK_LINEGC_EMPTY;
315                         move_list = &l_mg->gc_empty_list;
316                 }
317         } else {
318                 line->state = PBLK_LINESTATE_CORRUPT;
319                 line->gc_group = PBLK_LINEGC_NONE;
320                 move_list =  &l_mg->corrupt_list;
321                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
322                                                 line->id, vsc,
323                                                 line->sec_in_line,
324                                                 lm->high_thrs, lm->mid_thrs);
325         }
326
327         return move_list;
328 }
329
330 void pblk_discard(struct pblk *pblk, struct bio *bio)
331 {
332         sector_t slba = pblk_get_lba(bio);
333         sector_t nr_secs = pblk_get_secs(bio);
334
335         pblk_invalidate_range(pblk, slba, nr_secs);
336 }
337
338 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
339 {
340         struct ppa_addr ppa;
341
342         spin_lock(&pblk->trans_lock);
343         ppa = pblk_trans_map_get(pblk, lba);
344         spin_unlock(&pblk->trans_lock);
345
346         return ppa;
347 }
348
349 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
350 {
351         atomic_long_inc(&pblk->write_failed);
352 #ifdef CONFIG_NVM_DEBUG
353         pblk_print_failed_rqd(pblk, rqd, rqd->error);
354 #endif
355 }
356
357 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
358 {
359         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
360         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
361                 atomic_long_inc(&pblk->read_empty);
362                 return;
363         }
364
365         switch (rqd->error) {
366         case NVM_RSP_WARN_HIGHECC:
367                 atomic_long_inc(&pblk->read_high_ecc);
368                 break;
369         case NVM_RSP_ERR_FAILECC:
370         case NVM_RSP_ERR_FAILCRC:
371                 atomic_long_inc(&pblk->read_failed);
372                 break;
373         default:
374                 pr_err("pblk: unknown read error:%d\n", rqd->error);
375         }
376 #ifdef CONFIG_NVM_DEBUG
377         pblk_print_failed_rqd(pblk, rqd, rqd->error);
378 #endif
379 }
380
381 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
382 {
383         pblk->sec_per_write = sec_per_write;
384 }
385
386 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
387 {
388         struct nvm_tgt_dev *dev = pblk->dev;
389
390 #ifdef CONFIG_NVM_DEBUG
391         struct ppa_addr *ppa_list;
392
393         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
394         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
395                 WARN_ON(1);
396                 return -EINVAL;
397         }
398
399         if (rqd->opcode == NVM_OP_PWRITE) {
400                 struct pblk_line *line;
401                 struct ppa_addr ppa;
402                 int i;
403
404                 for (i = 0; i < rqd->nr_ppas; i++) {
405                         ppa = ppa_list[i];
406                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
407
408                         spin_lock(&line->lock);
409                         if (line->state != PBLK_LINESTATE_OPEN) {
410                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
411                                                         line->id, line->state);
412                                 WARN_ON(1);
413                                 spin_unlock(&line->lock);
414                                 return -EINVAL;
415                         }
416                         spin_unlock(&line->lock);
417                 }
418         }
419 #endif
420
421         atomic_inc(&pblk->inflight_io);
422
423         return nvm_submit_io(dev, rqd);
424 }
425
426 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
427                               unsigned int nr_secs, unsigned int len,
428                               int alloc_type, gfp_t gfp_mask)
429 {
430         struct nvm_tgt_dev *dev = pblk->dev;
431         void *kaddr = data;
432         struct page *page;
433         struct bio *bio;
434         int i, ret;
435
436         if (alloc_type == PBLK_KMALLOC_META)
437                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
438
439         bio = bio_kmalloc(gfp_mask, nr_secs);
440         if (!bio)
441                 return ERR_PTR(-ENOMEM);
442
443         for (i = 0; i < nr_secs; i++) {
444                 page = vmalloc_to_page(kaddr);
445                 if (!page) {
446                         pr_err("pblk: could not map vmalloc bio\n");
447                         bio_put(bio);
448                         bio = ERR_PTR(-ENOMEM);
449                         goto out;
450                 }
451
452                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
453                 if (ret != PAGE_SIZE) {
454                         pr_err("pblk: could not add page to bio\n");
455                         bio_put(bio);
456                         bio = ERR_PTR(-ENOMEM);
457                         goto out;
458                 }
459
460                 kaddr += PAGE_SIZE;
461         }
462 out:
463         return bio;
464 }
465
466 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
467                    unsigned long secs_to_flush)
468 {
469         int max = pblk->sec_per_write;
470         int min = pblk->min_write_pgs;
471         int secs_to_sync = 0;
472
473         if (secs_avail >= max)
474                 secs_to_sync = max;
475         else if (secs_avail >= min)
476                 secs_to_sync = min * (secs_avail / min);
477         else if (secs_to_flush)
478                 secs_to_sync = min;
479
480         return secs_to_sync;
481 }
482
483 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
484 {
485         u64 addr;
486         int i;
487
488         addr = find_next_zero_bit(line->map_bitmap,
489                                         pblk->lm.sec_per_line, line->cur_sec);
490         line->cur_sec = addr - nr_secs;
491
492         for (i = 0; i < nr_secs; i++, line->cur_sec--)
493                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
494 }
495
496 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
497 {
498         u64 addr;
499         int i;
500
501         lockdep_assert_held(&line->lock);
502
503         /* logic error: ppa out-of-bounds. Prevent generating bad address */
504         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
505                 WARN(1, "pblk: page allocation out of bounds\n");
506                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
507         }
508
509         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
510                                         pblk->lm.sec_per_line, line->cur_sec);
511         for (i = 0; i < nr_secs; i++, line->cur_sec++)
512                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
513
514         return addr;
515 }
516
517 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
518 {
519         u64 addr;
520
521         /* Lock needed in case a write fails and a recovery needs to remap
522          * failed write buffer entries
523          */
524         spin_lock(&line->lock);
525         addr = __pblk_alloc_page(pblk, line, nr_secs);
526         line->left_msecs -= nr_secs;
527         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
528         spin_unlock(&line->lock);
529
530         return addr;
531 }
532
533 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
534 {
535         u64 paddr;
536
537         spin_lock(&line->lock);
538         paddr = find_next_zero_bit(line->map_bitmap,
539                                         pblk->lm.sec_per_line, line->cur_sec);
540         spin_unlock(&line->lock);
541
542         return paddr;
543 }
544
545 /*
546  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
547  * taking the per LUN semaphore.
548  */
549 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
550                                      void *emeta_buf, u64 paddr, int dir)
551 {
552         struct nvm_tgt_dev *dev = pblk->dev;
553         struct nvm_geo *geo = &dev->geo;
554         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
555         struct pblk_line_meta *lm = &pblk->lm;
556         void *ppa_list, *meta_list;
557         struct bio *bio;
558         struct nvm_rq rqd;
559         dma_addr_t dma_ppa_list, dma_meta_list;
560         int min = pblk->min_write_pgs;
561         int left_ppas = lm->emeta_sec[0];
562         int id = line->id;
563         int rq_ppas, rq_len;
564         int cmd_op, bio_op;
565         int i, j;
566         int ret;
567         DECLARE_COMPLETION_ONSTACK(wait);
568
569         if (dir == WRITE) {
570                 bio_op = REQ_OP_WRITE;
571                 cmd_op = NVM_OP_PWRITE;
572         } else if (dir == READ) {
573                 bio_op = REQ_OP_READ;
574                 cmd_op = NVM_OP_PREAD;
575         } else
576                 return -EINVAL;
577
578         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
579                                                         &dma_meta_list);
580         if (!meta_list)
581                 return -ENOMEM;
582
583         ppa_list = meta_list + pblk_dma_meta_size;
584         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
585
586 next_rq:
587         memset(&rqd, 0, sizeof(struct nvm_rq));
588
589         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
590         rq_len = rq_ppas * geo->sec_size;
591
592         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
593                                         l_mg->emeta_alloc_type, GFP_KERNEL);
594         if (IS_ERR(bio)) {
595                 ret = PTR_ERR(bio);
596                 goto free_rqd_dma;
597         }
598
599         bio->bi_iter.bi_sector = 0; /* internal bio */
600         bio_set_op_attrs(bio, bio_op, 0);
601
602         rqd.bio = bio;
603         rqd.meta_list = meta_list;
604         rqd.ppa_list = ppa_list;
605         rqd.dma_meta_list = dma_meta_list;
606         rqd.dma_ppa_list = dma_ppa_list;
607         rqd.opcode = cmd_op;
608         rqd.nr_ppas = rq_ppas;
609         rqd.end_io = pblk_end_io_sync;
610         rqd.private = &wait;
611
612         if (dir == WRITE) {
613                 struct pblk_sec_meta *meta_list = rqd.meta_list;
614
615                 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
616                 for (i = 0; i < rqd.nr_ppas; ) {
617                         spin_lock(&line->lock);
618                         paddr = __pblk_alloc_page(pblk, line, min);
619                         spin_unlock(&line->lock);
620                         for (j = 0; j < min; j++, i++, paddr++) {
621                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
622                                 rqd.ppa_list[i] =
623                                         addr_to_gen_ppa(pblk, paddr, id);
624                         }
625                 }
626         } else {
627                 for (i = 0; i < rqd.nr_ppas; ) {
628                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
629                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
630                         int read_type = PBLK_READ_RANDOM;
631
632                         if (pblk_io_aligned(pblk, rq_ppas))
633                                 read_type = PBLK_READ_SEQUENTIAL;
634                         rqd.flags = pblk_set_read_mode(pblk, read_type);
635
636                         while (test_bit(pos, line->blk_bitmap)) {
637                                 paddr += min;
638                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
639                                         pr_err("pblk: corrupt emeta line:%d\n",
640                                                                 line->id);
641                                         bio_put(bio);
642                                         ret = -EINTR;
643                                         goto free_rqd_dma;
644                                 }
645
646                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
647                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
648                         }
649
650                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
651                                 pr_err("pblk: corrupt emeta line:%d\n",
652                                                                 line->id);
653                                 bio_put(bio);
654                                 ret = -EINTR;
655                                 goto free_rqd_dma;
656                         }
657
658                         for (j = 0; j < min; j++, i++, paddr++)
659                                 rqd.ppa_list[i] =
660                                         addr_to_gen_ppa(pblk, paddr, line->id);
661                 }
662         }
663
664         ret = pblk_submit_io(pblk, &rqd);
665         if (ret) {
666                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
667                 bio_put(bio);
668                 goto free_rqd_dma;
669         }
670
671         if (!wait_for_completion_io_timeout(&wait,
672                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
673                 pr_err("pblk: emeta I/O timed out\n");
674         }
675         atomic_dec(&pblk->inflight_io);
676         reinit_completion(&wait);
677
678         if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
679                 bio_put(bio);
680
681         if (rqd.error) {
682                 if (dir == WRITE)
683                         pblk_log_write_err(pblk, &rqd);
684                 else
685                         pblk_log_read_err(pblk, &rqd);
686         }
687
688         emeta_buf += rq_len;
689         left_ppas -= rq_ppas;
690         if (left_ppas)
691                 goto next_rq;
692 free_rqd_dma:
693         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
694         return ret;
695 }
696
697 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
698 {
699         struct nvm_tgt_dev *dev = pblk->dev;
700         struct nvm_geo *geo = &dev->geo;
701         struct pblk_line_meta *lm = &pblk->lm;
702         int bit;
703
704         /* This usually only happens on bad lines */
705         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
706         if (bit >= lm->blk_per_line)
707                 return -1;
708
709         return bit * geo->sec_per_pl;
710 }
711
712 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
713                                      u64 paddr, int dir)
714 {
715         struct nvm_tgt_dev *dev = pblk->dev;
716         struct pblk_line_meta *lm = &pblk->lm;
717         struct bio *bio;
718         struct nvm_rq rqd;
719         __le64 *lba_list = NULL;
720         int i, ret;
721         int cmd_op, bio_op;
722         int flags;
723         DECLARE_COMPLETION_ONSTACK(wait);
724
725         if (dir == WRITE) {
726                 bio_op = REQ_OP_WRITE;
727                 cmd_op = NVM_OP_PWRITE;
728                 flags = pblk_set_progr_mode(pblk, WRITE);
729                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
730         } else if (dir == READ) {
731                 bio_op = REQ_OP_READ;
732                 cmd_op = NVM_OP_PREAD;
733                 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
734         } else
735                 return -EINVAL;
736
737         memset(&rqd, 0, sizeof(struct nvm_rq));
738
739         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
740                                                         &rqd.dma_meta_list);
741         if (!rqd.meta_list)
742                 return -ENOMEM;
743
744         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
745         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
746
747         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
748         if (IS_ERR(bio)) {
749                 ret = PTR_ERR(bio);
750                 goto free_ppa_list;
751         }
752
753         bio->bi_iter.bi_sector = 0; /* internal bio */
754         bio_set_op_attrs(bio, bio_op, 0);
755
756         rqd.bio = bio;
757         rqd.opcode = cmd_op;
758         rqd.flags = flags;
759         rqd.nr_ppas = lm->smeta_sec;
760         rqd.end_io = pblk_end_io_sync;
761         rqd.private = &wait;
762
763         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
764                 struct pblk_sec_meta *meta_list = rqd.meta_list;
765
766                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
767
768                 if (dir == WRITE) {
769                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
770
771                         meta_list[i].lba = lba_list[paddr] = addr_empty;
772                 }
773         }
774
775         /*
776          * This I/O is sent by the write thread when a line is replace. Since
777          * the write thread is the only one sending write and erase commands,
778          * there is no need to take the LUN semaphore.
779          */
780         ret = pblk_submit_io(pblk, &rqd);
781         if (ret) {
782                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
783                 bio_put(bio);
784                 goto free_ppa_list;
785         }
786
787         if (!wait_for_completion_io_timeout(&wait,
788                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
789                 pr_err("pblk: smeta I/O timed out\n");
790         }
791         atomic_dec(&pblk->inflight_io);
792
793         if (rqd.error) {
794                 if (dir == WRITE)
795                         pblk_log_write_err(pblk, &rqd);
796                 else
797                         pblk_log_read_err(pblk, &rqd);
798         }
799
800 free_ppa_list:
801         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
802
803         return ret;
804 }
805
806 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
807 {
808         u64 bpaddr = pblk_line_smeta_start(pblk, line);
809
810         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
811 }
812
813 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
814                          void *emeta_buf)
815 {
816         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
817                                                 line->emeta_ssec, READ);
818 }
819
820 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
821                             struct ppa_addr ppa)
822 {
823         rqd->opcode = NVM_OP_ERASE;
824         rqd->ppa_addr = ppa;
825         rqd->nr_ppas = 1;
826         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
827         rqd->bio = NULL;
828 }
829
830 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
831 {
832         struct nvm_rq rqd;
833         int ret = 0;
834         DECLARE_COMPLETION_ONSTACK(wait);
835
836         memset(&rqd, 0, sizeof(struct nvm_rq));
837
838         pblk_setup_e_rq(pblk, &rqd, ppa);
839
840         rqd.end_io = pblk_end_io_sync;
841         rqd.private = &wait;
842
843         /* The write thread schedules erases so that it minimizes disturbances
844          * with writes. Thus, there is no need to take the LUN semaphore.
845          */
846         ret = pblk_submit_io(pblk, &rqd);
847         if (ret) {
848                 struct nvm_tgt_dev *dev = pblk->dev;
849                 struct nvm_geo *geo = &dev->geo;
850
851                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
852                                         pblk_dev_ppa_to_line(ppa),
853                                         pblk_dev_ppa_to_pos(geo, ppa));
854
855                 rqd.error = ret;
856                 goto out;
857         }
858
859         if (!wait_for_completion_io_timeout(&wait,
860                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
861                 pr_err("pblk: sync erase timed out\n");
862         }
863
864 out:
865         rqd.private = pblk;
866         __pblk_end_io_erase(pblk, &rqd);
867
868         return ret;
869 }
870
871 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
872 {
873         struct pblk_line_meta *lm = &pblk->lm;
874         struct ppa_addr ppa;
875         int ret, bit = -1;
876
877         /* Erase only good blocks, one at a time */
878         do {
879                 spin_lock(&line->lock);
880                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
881                                                                 bit + 1);
882                 if (bit >= lm->blk_per_line) {
883                         spin_unlock(&line->lock);
884                         break;
885                 }
886
887                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
888                 ppa.g.blk = line->id;
889
890                 atomic_dec(&line->left_eblks);
891                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
892                 spin_unlock(&line->lock);
893
894                 ret = pblk_blk_erase_sync(pblk, ppa);
895                 if (ret) {
896                         pr_err("pblk: failed to erase line %d\n", line->id);
897                         return ret;
898                 }
899         } while (1);
900
901         return 0;
902 }
903
904 static void pblk_line_setup_metadata(struct pblk_line *line,
905                                      struct pblk_line_mgmt *l_mg,
906                                      struct pblk_line_meta *lm)
907 {
908         int meta_line;
909
910         lockdep_assert_held(&l_mg->free_lock);
911
912 retry_meta:
913         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
914         if (meta_line == PBLK_DATA_LINES) {
915                 spin_unlock(&l_mg->free_lock);
916                 io_schedule();
917                 spin_lock(&l_mg->free_lock);
918                 goto retry_meta;
919         }
920
921         set_bit(meta_line, &l_mg->meta_bitmap);
922         line->meta_line = meta_line;
923
924         line->smeta = l_mg->sline_meta[meta_line];
925         line->emeta = l_mg->eline_meta[meta_line];
926
927         memset(line->smeta, 0, lm->smeta_len);
928         memset(line->emeta->buf, 0, lm->emeta_len[0]);
929
930         line->emeta->mem = 0;
931         atomic_set(&line->emeta->sync, 0);
932 }
933
934 /* For now lines are always assumed full lines. Thus, smeta former and current
935  * lun bitmaps are omitted.
936  */
937 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
938                                   struct pblk_line *cur)
939 {
940         struct nvm_tgt_dev *dev = pblk->dev;
941         struct nvm_geo *geo = &dev->geo;
942         struct pblk_line_meta *lm = &pblk->lm;
943         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
944         struct pblk_emeta *emeta = line->emeta;
945         struct line_emeta *emeta_buf = emeta->buf;
946         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
947         int nr_blk_line;
948
949         /* After erasing the line, new bad blocks might appear and we risk
950          * having an invalid line
951          */
952         nr_blk_line = lm->blk_per_line -
953                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
954         if (nr_blk_line < lm->min_blk_line) {
955                 spin_lock(&l_mg->free_lock);
956                 spin_lock(&line->lock);
957                 line->state = PBLK_LINESTATE_BAD;
958                 spin_unlock(&line->lock);
959
960                 list_add_tail(&line->list, &l_mg->bad_list);
961                 spin_unlock(&l_mg->free_lock);
962
963                 pr_debug("pblk: line %d is bad\n", line->id);
964
965                 return 0;
966         }
967
968         /* Run-time metadata */
969         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
970
971         /* Mark LUNs allocated in this line (all for now) */
972         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
973
974         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
975         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
976         smeta_buf->header.id = cpu_to_le32(line->id);
977         smeta_buf->header.type = cpu_to_le16(line->type);
978         smeta_buf->header.version = cpu_to_le16(1);
979
980         /* Start metadata */
981         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
982         smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
983
984         /* Fill metadata among lines */
985         if (cur) {
986                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
987                 smeta_buf->prev_id = cpu_to_le32(cur->id);
988                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
989         } else {
990                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
991         }
992
993         /* All smeta must be set at this point */
994         smeta_buf->header.crc = cpu_to_le32(
995                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
996         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
997
998         /* End metadata */
999         memcpy(&emeta_buf->header, &smeta_buf->header,
1000                                                 sizeof(struct line_header));
1001         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1002         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1003         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1004         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1005         emeta_buf->crc = cpu_to_le32(0);
1006         emeta_buf->prev_id = smeta_buf->prev_id;
1007
1008         return 1;
1009 }
1010
1011 /* For now lines are always assumed full lines. Thus, smeta former and current
1012  * lun bitmaps are omitted.
1013  */
1014 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1015                              int init)
1016 {
1017         struct nvm_tgt_dev *dev = pblk->dev;
1018         struct nvm_geo *geo = &dev->geo;
1019         struct pblk_line_meta *lm = &pblk->lm;
1020         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1021         int nr_bb = 0;
1022         u64 off;
1023         int bit = -1;
1024
1025         line->sec_in_line = lm->sec_per_line;
1026
1027         /* Capture bad block information on line mapping bitmaps */
1028         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1029                                         bit + 1)) < lm->blk_per_line) {
1030                 off = bit * geo->sec_per_pl;
1031                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1032                                                         lm->sec_per_line);
1033                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1034                                                         lm->sec_per_line);
1035                 line->sec_in_line -= geo->sec_per_blk;
1036                 if (bit >= lm->emeta_bb)
1037                         nr_bb++;
1038         }
1039
1040         /* Mark smeta metadata sectors as bad sectors */
1041         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1042         off = bit * geo->sec_per_pl;
1043         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1044         line->sec_in_line -= lm->smeta_sec;
1045         line->smeta_ssec = off;
1046         line->cur_sec = off + lm->smeta_sec;
1047
1048         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1049                 pr_debug("pblk: line smeta I/O failed. Retry\n");
1050                 return 1;
1051         }
1052
1053         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1054
1055         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1056          * blocks to make sure that there are enough sectors to store emeta
1057          */
1058         bit = lm->sec_per_line;
1059         off = lm->sec_per_line - lm->emeta_sec[0];
1060         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1061         while (nr_bb) {
1062                 off -= geo->sec_per_pl;
1063                 if (!test_bit(off, line->invalid_bitmap)) {
1064                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1065                         nr_bb--;
1066                 }
1067         }
1068
1069         line->sec_in_line -= lm->emeta_sec[0];
1070         line->emeta_ssec = off;
1071         line->nr_valid_lbas = 0;
1072         line->left_msecs = line->sec_in_line;
1073         *line->vsc = cpu_to_le32(line->sec_in_line);
1074
1075         if (lm->sec_per_line - line->sec_in_line !=
1076                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1077                 spin_lock(&line->lock);
1078                 line->state = PBLK_LINESTATE_BAD;
1079                 spin_unlock(&line->lock);
1080
1081                 list_add_tail(&line->list, &l_mg->bad_list);
1082                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1083
1084                 return 0;
1085         }
1086
1087         return 1;
1088 }
1089
1090 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1091 {
1092         struct pblk_line_meta *lm = &pblk->lm;
1093         int blk_in_line = atomic_read(&line->blk_in_line);
1094
1095         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1096         if (!line->map_bitmap)
1097                 return -ENOMEM;
1098         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1099
1100         /* invalid_bitmap is special since it is used when line is closed. No
1101          * need to zeroized; it will be initialized using bb info form
1102          * map_bitmap
1103          */
1104         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1105         if (!line->invalid_bitmap) {
1106                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1107                 return -ENOMEM;
1108         }
1109
1110         spin_lock(&line->lock);
1111         if (line->state != PBLK_LINESTATE_FREE) {
1112                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1113                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1114                 spin_unlock(&line->lock);
1115                 WARN(1, "pblk: corrupted line %d, state %d\n",
1116                                                         line->id, line->state);
1117                 return -EAGAIN;
1118         }
1119
1120         line->state = PBLK_LINESTATE_OPEN;
1121
1122         atomic_set(&line->left_eblks, blk_in_line);
1123         atomic_set(&line->left_seblks, blk_in_line);
1124
1125         line->meta_distance = lm->meta_distance;
1126         spin_unlock(&line->lock);
1127
1128         /* Bad blocks do not need to be erased */
1129         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1130
1131         kref_init(&line->ref);
1132
1133         return 0;
1134 }
1135
1136 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1137 {
1138         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1139         int ret;
1140
1141         spin_lock(&l_mg->free_lock);
1142         l_mg->data_line = line;
1143         list_del(&line->list);
1144
1145         ret = pblk_line_prepare(pblk, line);
1146         if (ret) {
1147                 list_add(&line->list, &l_mg->free_list);
1148                 spin_unlock(&l_mg->free_lock);
1149                 return ret;
1150         }
1151         spin_unlock(&l_mg->free_lock);
1152
1153         pblk_rl_free_lines_dec(&pblk->rl, line);
1154
1155         if (!pblk_line_init_bb(pblk, line, 0)) {
1156                 list_add(&line->list, &l_mg->free_list);
1157                 return -EINTR;
1158         }
1159
1160         return 0;
1161 }
1162
1163 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1164 {
1165         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1166         line->map_bitmap = NULL;
1167         line->smeta = NULL;
1168         line->emeta = NULL;
1169 }
1170
1171 struct pblk_line *pblk_line_get(struct pblk *pblk)
1172 {
1173         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1174         struct pblk_line_meta *lm = &pblk->lm;
1175         struct pblk_line *line;
1176         int ret, bit;
1177
1178         lockdep_assert_held(&l_mg->free_lock);
1179
1180 retry:
1181         if (list_empty(&l_mg->free_list)) {
1182                 pr_err("pblk: no free lines\n");
1183                 return NULL;
1184         }
1185
1186         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1187         list_del(&line->list);
1188         l_mg->nr_free_lines--;
1189
1190         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1191         if (unlikely(bit >= lm->blk_per_line)) {
1192                 spin_lock(&line->lock);
1193                 line->state = PBLK_LINESTATE_BAD;
1194                 spin_unlock(&line->lock);
1195
1196                 list_add_tail(&line->list, &l_mg->bad_list);
1197
1198                 pr_debug("pblk: line %d is bad\n", line->id);
1199                 goto retry;
1200         }
1201
1202         ret = pblk_line_prepare(pblk, line);
1203         if (ret) {
1204                 if (ret == -EAGAIN) {
1205                         list_add(&line->list, &l_mg->corrupt_list);
1206                         goto retry;
1207                 } else {
1208                         pr_err("pblk: failed to prepare line %d\n", line->id);
1209                         list_add(&line->list, &l_mg->free_list);
1210                         l_mg->nr_free_lines++;
1211                         return NULL;
1212                 }
1213         }
1214
1215         return line;
1216 }
1217
1218 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1219                                          struct pblk_line *line)
1220 {
1221         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1222         struct pblk_line *retry_line;
1223
1224 retry:
1225         spin_lock(&l_mg->free_lock);
1226         retry_line = pblk_line_get(pblk);
1227         if (!retry_line) {
1228                 l_mg->data_line = NULL;
1229                 spin_unlock(&l_mg->free_lock);
1230                 return NULL;
1231         }
1232
1233         retry_line->smeta = line->smeta;
1234         retry_line->emeta = line->emeta;
1235         retry_line->meta_line = line->meta_line;
1236
1237         pblk_line_free(pblk, line);
1238         l_mg->data_line = retry_line;
1239         spin_unlock(&l_mg->free_lock);
1240
1241         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1242
1243         if (pblk_line_erase(pblk, retry_line))
1244                 goto retry;
1245
1246         return retry_line;
1247 }
1248
1249 static void pblk_set_space_limit(struct pblk *pblk)
1250 {
1251         struct pblk_rl *rl = &pblk->rl;
1252
1253         atomic_set(&rl->rb_space, 0);
1254 }
1255
1256 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1257 {
1258         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1259         struct pblk_line *line;
1260         int is_next = 0;
1261
1262         spin_lock(&l_mg->free_lock);
1263         line = pblk_line_get(pblk);
1264         if (!line) {
1265                 spin_unlock(&l_mg->free_lock);
1266                 return NULL;
1267         }
1268
1269         line->seq_nr = l_mg->d_seq_nr++;
1270         line->type = PBLK_LINETYPE_DATA;
1271         l_mg->data_line = line;
1272
1273         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1274
1275         /* Allocate next line for preparation */
1276         l_mg->data_next = pblk_line_get(pblk);
1277         if (!l_mg->data_next) {
1278                 /* If we cannot get a new line, we need to stop the pipeline.
1279                  * Only allow as many writes in as we can store safely and then
1280                  * fail gracefully
1281                  */
1282                 pblk_set_space_limit(pblk);
1283
1284                 l_mg->data_next = NULL;
1285         } else {
1286                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1287                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1288                 is_next = 1;
1289         }
1290         spin_unlock(&l_mg->free_lock);
1291
1292         if (pblk_line_erase(pblk, line)) {
1293                 line = pblk_line_retry(pblk, line);
1294                 if (!line)
1295                         return NULL;
1296         }
1297
1298         pblk_rl_free_lines_dec(&pblk->rl, line);
1299         if (is_next)
1300                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1301
1302 retry_setup:
1303         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1304                 line = pblk_line_retry(pblk, line);
1305                 if (!line)
1306                         return NULL;
1307
1308                 goto retry_setup;
1309         }
1310
1311         if (!pblk_line_init_bb(pblk, line, 1)) {
1312                 line = pblk_line_retry(pblk, line);
1313                 if (!line)
1314                         return NULL;
1315
1316                 goto retry_setup;
1317         }
1318
1319         return line;
1320 }
1321
1322 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1323 {
1324         lockdep_assert_held(&pblk->l_mg.free_lock);
1325
1326         pblk_set_space_limit(pblk);
1327         pblk->state = PBLK_STATE_STOPPING;
1328 }
1329
1330 void pblk_pipeline_stop(struct pblk *pblk)
1331 {
1332         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1333         int ret;
1334
1335         spin_lock(&l_mg->free_lock);
1336         if (pblk->state == PBLK_STATE_RECOVERING ||
1337                                         pblk->state == PBLK_STATE_STOPPED) {
1338                 spin_unlock(&l_mg->free_lock);
1339                 return;
1340         }
1341         pblk->state = PBLK_STATE_RECOVERING;
1342         spin_unlock(&l_mg->free_lock);
1343
1344         pblk_flush_writer(pblk);
1345         pblk_wait_for_meta(pblk);
1346
1347         ret = pblk_recov_pad(pblk);
1348         if (ret) {
1349                 pr_err("pblk: could not close data on teardown(%d)\n", ret);
1350                 return;
1351         }
1352
1353         pblk_line_close_meta_sync(pblk);
1354
1355         spin_lock(&l_mg->free_lock);
1356         pblk->state = PBLK_STATE_STOPPED;
1357         l_mg->data_line = NULL;
1358         l_mg->data_next = NULL;
1359         spin_unlock(&l_mg->free_lock);
1360 }
1361
1362 void pblk_line_replace_data(struct pblk *pblk)
1363 {
1364         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1365         struct pblk_line *cur, *new;
1366         unsigned int left_seblks;
1367         int is_next = 0;
1368
1369         cur = l_mg->data_line;
1370         new = l_mg->data_next;
1371         if (!new)
1372                 return;
1373         l_mg->data_line = new;
1374
1375         spin_lock(&l_mg->free_lock);
1376         if (pblk->state != PBLK_STATE_RUNNING) {
1377                 l_mg->data_line = NULL;
1378                 l_mg->data_next = NULL;
1379                 spin_unlock(&l_mg->free_lock);
1380                 return;
1381         }
1382
1383         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1384         spin_unlock(&l_mg->free_lock);
1385
1386 retry_erase:
1387         left_seblks = atomic_read(&new->left_seblks);
1388         if (left_seblks) {
1389                 /* If line is not fully erased, erase it */
1390                 if (atomic_read(&new->left_eblks)) {
1391                         if (pblk_line_erase(pblk, new))
1392                                 return;
1393                 } else {
1394                         io_schedule();
1395                 }
1396                 goto retry_erase;
1397         }
1398
1399 retry_setup:
1400         if (!pblk_line_init_metadata(pblk, new, cur)) {
1401                 new = pblk_line_retry(pblk, new);
1402                 if (!new)
1403                         return;
1404
1405                 goto retry_setup;
1406         }
1407
1408         if (!pblk_line_init_bb(pblk, new, 1)) {
1409                 new = pblk_line_retry(pblk, new);
1410                 if (!new)
1411                         return;
1412
1413                 goto retry_setup;
1414         }
1415
1416         /* Allocate next line for preparation */
1417         spin_lock(&l_mg->free_lock);
1418         l_mg->data_next = pblk_line_get(pblk);
1419         if (!l_mg->data_next) {
1420                 /* If we cannot get a new line, we need to stop the pipeline.
1421                  * Only allow as many writes in as we can store safely and then
1422                  * fail gracefully
1423                  */
1424                 pblk_stop_writes(pblk, new);
1425                 l_mg->data_next = NULL;
1426         } else {
1427                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1428                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1429                 is_next = 1;
1430         }
1431         spin_unlock(&l_mg->free_lock);
1432
1433         if (is_next)
1434                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1435 }
1436
1437 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1438 {
1439         if (line->map_bitmap)
1440                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1441         if (line->invalid_bitmap)
1442                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1443
1444         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1445
1446         line->map_bitmap = NULL;
1447         line->invalid_bitmap = NULL;
1448         line->smeta = NULL;
1449         line->emeta = NULL;
1450 }
1451
1452 void pblk_line_put(struct kref *ref)
1453 {
1454         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1455         struct pblk *pblk = line->pblk;
1456         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1457
1458         spin_lock(&line->lock);
1459         WARN_ON(line->state != PBLK_LINESTATE_GC);
1460         line->state = PBLK_LINESTATE_FREE;
1461         line->gc_group = PBLK_LINEGC_NONE;
1462         pblk_line_free(pblk, line);
1463         spin_unlock(&line->lock);
1464
1465         spin_lock(&l_mg->free_lock);
1466         list_add_tail(&line->list, &l_mg->free_list);
1467         l_mg->nr_free_lines++;
1468         spin_unlock(&l_mg->free_lock);
1469
1470         pblk_rl_free_lines_inc(&pblk->rl, line);
1471 }
1472
1473 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1474 {
1475         struct nvm_rq *rqd;
1476         int err;
1477
1478         rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1479         memset(rqd, 0, pblk_g_rq_size);
1480
1481         pblk_setup_e_rq(pblk, rqd, ppa);
1482
1483         rqd->end_io = pblk_end_io_erase;
1484         rqd->private = pblk;
1485
1486         /* The write thread schedules erases so that it minimizes disturbances
1487          * with writes. Thus, there is no need to take the LUN semaphore.
1488          */
1489         err = pblk_submit_io(pblk, rqd);
1490         if (err) {
1491                 struct nvm_tgt_dev *dev = pblk->dev;
1492                 struct nvm_geo *geo = &dev->geo;
1493
1494                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1495                                         pblk_dev_ppa_to_line(ppa),
1496                                         pblk_dev_ppa_to_pos(geo, ppa));
1497         }
1498
1499         return err;
1500 }
1501
1502 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1503 {
1504         return pblk->l_mg.data_line;
1505 }
1506
1507 /* For now, always erase next line */
1508 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1509 {
1510         return pblk->l_mg.data_next;
1511 }
1512
1513 int pblk_line_is_full(struct pblk_line *line)
1514 {
1515         return (line->left_msecs == 0);
1516 }
1517
1518 void pblk_line_close_meta_sync(struct pblk *pblk)
1519 {
1520         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1521         struct pblk_line_meta *lm = &pblk->lm;
1522         struct pblk_line *line, *tline;
1523         LIST_HEAD(list);
1524
1525         spin_lock(&l_mg->close_lock);
1526         if (list_empty(&l_mg->emeta_list)) {
1527                 spin_unlock(&l_mg->close_lock);
1528                 return;
1529         }
1530
1531         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1532         spin_unlock(&l_mg->close_lock);
1533
1534         list_for_each_entry_safe(line, tline, &list, list) {
1535                 struct pblk_emeta *emeta = line->emeta;
1536
1537                 while (emeta->mem < lm->emeta_len[0]) {
1538                         int ret;
1539
1540                         ret = pblk_submit_meta_io(pblk, line);
1541                         if (ret) {
1542                                 pr_err("pblk: sync meta line %d failed (%d)\n",
1543                                                         line->id, ret);
1544                                 return;
1545                         }
1546                 }
1547         }
1548
1549         pblk_wait_for_meta(pblk);
1550 }
1551
1552 static void pblk_line_should_sync_meta(struct pblk *pblk)
1553 {
1554         if (pblk_rl_is_limit(&pblk->rl))
1555                 pblk_line_close_meta_sync(pblk);
1556 }
1557
1558 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1559 {
1560         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1561         struct pblk_line_meta *lm = &pblk->lm;
1562         struct list_head *move_list;
1563
1564         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1565                                 "pblk: corrupt closed line %d\n", line->id);
1566
1567         spin_lock(&l_mg->free_lock);
1568         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1569         spin_unlock(&l_mg->free_lock);
1570
1571         spin_lock(&l_mg->gc_lock);
1572         spin_lock(&line->lock);
1573         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1574         line->state = PBLK_LINESTATE_CLOSED;
1575         move_list = pblk_line_gc_list(pblk, line);
1576
1577         list_add_tail(&line->list, move_list);
1578
1579         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1580         line->map_bitmap = NULL;
1581         line->smeta = NULL;
1582         line->emeta = NULL;
1583
1584         spin_unlock(&line->lock);
1585         spin_unlock(&l_mg->gc_lock);
1586
1587         pblk_gc_should_kick(pblk);
1588 }
1589
1590 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1591 {
1592         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1593         struct pblk_line_meta *lm = &pblk->lm;
1594         struct pblk_emeta *emeta = line->emeta;
1595         struct line_emeta *emeta_buf = emeta->buf;
1596
1597         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1598         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1599         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1600
1601         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1602         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1603
1604         spin_lock(&l_mg->close_lock);
1605         spin_lock(&line->lock);
1606         list_add_tail(&line->list, &l_mg->emeta_list);
1607         spin_unlock(&line->lock);
1608         spin_unlock(&l_mg->close_lock);
1609
1610         pblk_line_should_sync_meta(pblk);
1611 }
1612
1613 void pblk_line_close_ws(struct work_struct *work)
1614 {
1615         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1616                                                                         ws);
1617         struct pblk *pblk = line_ws->pblk;
1618         struct pblk_line *line = line_ws->line;
1619
1620         pblk_line_close(pblk, line);
1621         mempool_free(line_ws, pblk->line_ws_pool);
1622 }
1623
1624 void pblk_line_mark_bb(struct work_struct *work)
1625 {
1626         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1627                                                                         ws);
1628         struct pblk *pblk = line_ws->pblk;
1629         struct nvm_tgt_dev *dev = pblk->dev;
1630         struct ppa_addr *ppa = line_ws->priv;
1631         int ret;
1632
1633         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1634         if (ret) {
1635                 struct pblk_line *line;
1636                 int pos;
1637
1638                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1639                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1640
1641                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1642                                 line->id, pos);
1643         }
1644
1645         kfree(ppa);
1646         mempool_free(line_ws, pblk->line_ws_pool);
1647 }
1648
1649 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1650                       void (*work)(struct work_struct *),
1651                       struct workqueue_struct *wq)
1652 {
1653         struct pblk_line_ws *line_ws;
1654
1655         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1656         if (!line_ws)
1657                 return;
1658
1659         line_ws->pblk = pblk;
1660         line_ws->line = line;
1661         line_ws->priv = priv;
1662
1663         INIT_WORK(&line_ws->ws, work);
1664         queue_work(wq, &line_ws->ws);
1665 }
1666
1667 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1668                   unsigned long *lun_bitmap)
1669 {
1670         struct nvm_tgt_dev *dev = pblk->dev;
1671         struct nvm_geo *geo = &dev->geo;
1672         struct pblk_lun *rlun;
1673         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1674         int ret;
1675
1676         /*
1677          * Only send one inflight I/O per LUN. Since we map at a page
1678          * granurality, all ppas in the I/O will map to the same LUN
1679          */
1680 #ifdef CONFIG_NVM_DEBUG
1681         int i;
1682
1683         for (i = 1; i < nr_ppas; i++)
1684                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1685                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1686 #endif
1687         /* If the LUN has been locked for this same request, do no attempt to
1688          * lock it again
1689          */
1690         if (test_and_set_bit(pos, lun_bitmap))
1691                 return;
1692
1693         rlun = &pblk->luns[pos];
1694         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1695         if (ret) {
1696                 switch (ret) {
1697                 case -ETIME:
1698                         pr_err("pblk: lun semaphore timed out\n");
1699                         break;
1700                 case -EINTR:
1701                         pr_err("pblk: lun semaphore timed out\n");
1702                         break;
1703                 }
1704         }
1705 }
1706
1707 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1708                 unsigned long *lun_bitmap)
1709 {
1710         struct nvm_tgt_dev *dev = pblk->dev;
1711         struct nvm_geo *geo = &dev->geo;
1712         struct pblk_lun *rlun;
1713         int nr_luns = geo->nr_luns;
1714         int bit = -1;
1715
1716         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1717                 rlun = &pblk->luns[bit];
1718                 up(&rlun->wr_sem);
1719         }
1720
1721         kfree(lun_bitmap);
1722 }
1723
1724 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1725 {
1726         struct ppa_addr l2p_ppa;
1727
1728         /* logic error: lba out-of-bounds. Ignore update */
1729         if (!(lba < pblk->rl.nr_secs)) {
1730                 WARN(1, "pblk: corrupted L2P map request\n");
1731                 return;
1732         }
1733
1734         spin_lock(&pblk->trans_lock);
1735         l2p_ppa = pblk_trans_map_get(pblk, lba);
1736
1737         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1738                 pblk_map_invalidate(pblk, l2p_ppa);
1739
1740         pblk_trans_map_set(pblk, lba, ppa);
1741         spin_unlock(&pblk->trans_lock);
1742 }
1743
1744 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1745 {
1746 #ifdef CONFIG_NVM_DEBUG
1747         /* Callers must ensure that the ppa points to a cache address */
1748         BUG_ON(!pblk_addr_in_cache(ppa));
1749         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1750 #endif
1751
1752         pblk_update_map(pblk, lba, ppa);
1753 }
1754
1755 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1756                        struct pblk_line *gc_line)
1757 {
1758         struct ppa_addr l2p_ppa;
1759         int ret = 1;
1760
1761 #ifdef CONFIG_NVM_DEBUG
1762         /* Callers must ensure that the ppa points to a cache address */
1763         BUG_ON(!pblk_addr_in_cache(ppa));
1764         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1765 #endif
1766
1767         /* logic error: lba out-of-bounds. Ignore update */
1768         if (!(lba < pblk->rl.nr_secs)) {
1769                 WARN(1, "pblk: corrupted L2P map request\n");
1770                 return 0;
1771         }
1772
1773         spin_lock(&pblk->trans_lock);
1774         l2p_ppa = pblk_trans_map_get(pblk, lba);
1775
1776         /* Prevent updated entries to be overwritten by GC */
1777         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1778                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1779                 ret = 0;
1780                 goto out;
1781         }
1782
1783         pblk_trans_map_set(pblk, lba, ppa);
1784 out:
1785         spin_unlock(&pblk->trans_lock);
1786         return ret;
1787 }
1788
1789 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1790                          struct ppa_addr entry_line)
1791 {
1792         struct ppa_addr l2p_line;
1793
1794 #ifdef CONFIG_NVM_DEBUG
1795         /* Callers must ensure that the ppa points to a device address */
1796         BUG_ON(pblk_addr_in_cache(ppa));
1797 #endif
1798         /* Invalidate and discard padded entries */
1799         if (lba == ADDR_EMPTY) {
1800 #ifdef CONFIG_NVM_DEBUG
1801                 atomic_long_inc(&pblk->padded_wb);
1802 #endif
1803                 pblk_map_invalidate(pblk, ppa);
1804                 return;
1805         }
1806
1807         /* logic error: lba out-of-bounds. Ignore update */
1808         if (!(lba < pblk->rl.nr_secs)) {
1809                 WARN(1, "pblk: corrupted L2P map request\n");
1810                 return;
1811         }
1812
1813         spin_lock(&pblk->trans_lock);
1814         l2p_line = pblk_trans_map_get(pblk, lba);
1815
1816         /* Do not update L2P if the cacheline has been updated. In this case,
1817          * the mapped ppa must be invalidated
1818          */
1819         if (l2p_line.ppa != entry_line.ppa) {
1820                 if (!pblk_ppa_empty(ppa))
1821                         pblk_map_invalidate(pblk, ppa);
1822                 goto out;
1823         }
1824
1825 #ifdef CONFIG_NVM_DEBUG
1826         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1827 #endif
1828
1829         pblk_trans_map_set(pblk, lba, ppa);
1830 out:
1831         spin_unlock(&pblk->trans_lock);
1832 }
1833
1834 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1835                          sector_t blba, int nr_secs)
1836 {
1837         int i;
1838
1839         spin_lock(&pblk->trans_lock);
1840         for (i = 0; i < nr_secs; i++)
1841                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1842         spin_unlock(&pblk->trans_lock);
1843 }
1844
1845 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1846                           u64 *lba_list, int nr_secs)
1847 {
1848         sector_t lba;
1849         int i;
1850
1851         spin_lock(&pblk->trans_lock);
1852         for (i = 0; i < nr_secs; i++) {
1853                 lba = lba_list[i];
1854                 if (lba == ADDR_EMPTY) {
1855                         ppas[i].ppa = ADDR_EMPTY;
1856                 } else {
1857                         /* logic error: lba out-of-bounds. Ignore update */
1858                         if (!(lba < pblk->rl.nr_secs)) {
1859                                 WARN(1, "pblk: corrupted L2P map request\n");
1860                                 continue;
1861                         }
1862                         ppas[i] = pblk_trans_map_get(pblk, lba);
1863                 }
1864         }
1865         spin_unlock(&pblk->trans_lock);
1866 }