]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
lightnvm: pblk: cleanup unnecessary code
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
22                          struct ppa_addr *ppa)
23 {
24         struct nvm_tgt_dev *dev = pblk->dev;
25         struct nvm_geo *geo = &dev->geo;
26         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
27
28         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
29         atomic_long_inc(&pblk->erase_failed);
30
31         atomic_dec(&line->blk_in_line);
32         if (test_and_set_bit(pos, line->blk_bitmap))
33                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
34                                                         line->id, pos);
35
36         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
37 }
38
39 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
40 {
41         struct pblk_line *line;
42
43         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
44         atomic_dec(&line->left_seblks);
45
46         if (rqd->error) {
47                 struct ppa_addr *ppa;
48
49                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
50                 if (!ppa)
51                         return;
52
53                 *ppa = rqd->ppa_addr;
54                 pblk_mark_bb(pblk, line, ppa);
55         }
56 }
57
58 /* Erase completion assumes that only one block is erased at the time */
59 static void pblk_end_io_erase(struct nvm_rq *rqd)
60 {
61         struct pblk *pblk = rqd->private;
62
63         __pblk_end_io_erase(pblk, rqd);
64         mempool_free(rqd, pblk->g_rq_pool);
65 }
66
67 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
68                            u64 paddr)
69 {
70         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
71         struct list_head *move_list = NULL;
72
73         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
74          * table is modified with reclaimed sectors, a check is done to endure
75          * that newer updates are not overwritten.
76          */
77         spin_lock(&line->lock);
78         if (line->state == PBLK_LINESTATE_GC ||
79                                         line->state == PBLK_LINESTATE_FREE) {
80                 spin_unlock(&line->lock);
81                 return;
82         }
83
84         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
85                 WARN_ONCE(1, "pblk: double invalidate\n");
86                 spin_unlock(&line->lock);
87                 return;
88         }
89         le32_add_cpu(line->vsc, -1);
90
91         if (line->state == PBLK_LINESTATE_CLOSED)
92                 move_list = pblk_line_gc_list(pblk, line);
93         spin_unlock(&line->lock);
94
95         if (move_list) {
96                 spin_lock(&l_mg->gc_lock);
97                 spin_lock(&line->lock);
98                 /* Prevent moving a line that has just been chosen for GC */
99                 if (line->state == PBLK_LINESTATE_GC ||
100                                         line->state == PBLK_LINESTATE_FREE) {
101                         spin_unlock(&line->lock);
102                         spin_unlock(&l_mg->gc_lock);
103                         return;
104                 }
105                 spin_unlock(&line->lock);
106
107                 list_move_tail(&line->list, move_list);
108                 spin_unlock(&l_mg->gc_lock);
109         }
110 }
111
112 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
113 {
114         struct pblk_line *line;
115         u64 paddr;
116         int line_id;
117
118 #ifdef CONFIG_NVM_DEBUG
119         /* Callers must ensure that the ppa points to a device address */
120         BUG_ON(pblk_addr_in_cache(ppa));
121         BUG_ON(pblk_ppa_empty(ppa));
122 #endif
123
124         line_id = pblk_tgt_ppa_to_line(ppa);
125         line = &pblk->lines[line_id];
126         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
127
128         __pblk_map_invalidate(pblk, line, paddr);
129 }
130
131 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
132                                   unsigned int nr_secs)
133 {
134         sector_t lba;
135
136         spin_lock(&pblk->trans_lock);
137         for (lba = slba; lba < slba + nr_secs; lba++) {
138                 struct ppa_addr ppa;
139
140                 ppa = pblk_trans_map_get(pblk, lba);
141
142                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
143                         pblk_map_invalidate(pblk, ppa);
144
145                 pblk_ppa_set_empty(&ppa);
146                 pblk_trans_map_set(pblk, lba, ppa);
147         }
148         spin_unlock(&pblk->trans_lock);
149 }
150
151 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
152 {
153         mempool_t *pool;
154         struct nvm_rq *rqd;
155         int rq_size;
156
157         if (rw == WRITE) {
158                 pool = pblk->w_rq_pool;
159                 rq_size = pblk_w_rq_size;
160         } else {
161                 pool = pblk->g_rq_pool;
162                 rq_size = pblk_g_rq_size;
163         }
164
165         rqd = mempool_alloc(pool, GFP_KERNEL);
166         memset(rqd, 0, rq_size);
167
168         return rqd;
169 }
170
171 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
172 {
173         mempool_t *pool;
174
175         if (rw == WRITE)
176                 pool = pblk->w_rq_pool;
177         else
178                 pool = pblk->g_rq_pool;
179
180         mempool_free(rqd, pool);
181 }
182
183 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
184                          int nr_pages)
185 {
186         struct bio_vec bv;
187         int i;
188
189         WARN_ON(off + nr_pages != bio->bi_vcnt);
190
191         bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
192         for (i = off; i < nr_pages + off; i++) {
193                 bv = bio->bi_io_vec[i];
194                 mempool_free(bv.bv_page, pblk->page_pool);
195         }
196 }
197
198 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
199                        int nr_pages)
200 {
201         struct request_queue *q = pblk->dev->q;
202         struct page *page;
203         int i, ret;
204
205         for (i = 0; i < nr_pages; i++) {
206                 page = mempool_alloc(pblk->page_pool, flags);
207                 if (!page)
208                         goto err;
209
210                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
211                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
212                         pr_err("pblk: could not add page to bio\n");
213                         mempool_free(page, pblk->page_pool);
214                         goto err;
215                 }
216         }
217
218         return 0;
219 err:
220         pblk_bio_free_pages(pblk, bio, 0, i - 1);
221         return -1;
222 }
223
224 static void pblk_write_kick(struct pblk *pblk)
225 {
226         wake_up_process(pblk->writer_ts);
227         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
228 }
229
230 void pblk_write_timer_fn(unsigned long data)
231 {
232         struct pblk *pblk = (struct pblk *)data;
233
234         /* kick the write thread every tick to flush outstanding data */
235         pblk_write_kick(pblk);
236 }
237
238 void pblk_write_should_kick(struct pblk *pblk)
239 {
240         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
241
242         if (secs_avail >= pblk->min_write_pgs)
243                 pblk_write_kick(pblk);
244 }
245
246 void pblk_end_bio_sync(struct bio *bio)
247 {
248         struct completion *waiting = bio->bi_private;
249
250         complete(waiting);
251 }
252
253 void pblk_end_io_sync(struct nvm_rq *rqd)
254 {
255         struct completion *waiting = rqd->private;
256
257         complete(waiting);
258 }
259
260 void pblk_flush_writer(struct pblk *pblk)
261 {
262         struct bio *bio;
263         int ret;
264         DECLARE_COMPLETION_ONSTACK(wait);
265
266         bio = bio_alloc(GFP_KERNEL, 1);
267         if (!bio)
268                 return;
269
270         bio->bi_iter.bi_sector = 0; /* internal bio */
271         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
272         bio->bi_private = &wait;
273         bio->bi_end_io = pblk_end_bio_sync;
274
275         ret = pblk_write_to_cache(pblk, bio, 0);
276         if (ret == NVM_IO_OK) {
277                 if (!wait_for_completion_io_timeout(&wait,
278                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
279                         pr_err("pblk: flush cache timed out\n");
280                 }
281         } else if (ret != NVM_IO_DONE) {
282                 pr_err("pblk: tear down bio failed\n");
283         }
284
285         if (bio->bi_status)
286                 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
287
288         bio_put(bio);
289 }
290
291 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
292 {
293         struct pblk_line_meta *lm = &pblk->lm;
294         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
295         struct list_head *move_list = NULL;
296         int vsc = le32_to_cpu(*line->vsc);
297
298         if (!vsc) {
299                 if (line->gc_group != PBLK_LINEGC_FULL) {
300                         line->gc_group = PBLK_LINEGC_FULL;
301                         move_list = &l_mg->gc_full_list;
302                 }
303         } else if (vsc < lm->mid_thrs) {
304                 if (line->gc_group != PBLK_LINEGC_HIGH) {
305                         line->gc_group = PBLK_LINEGC_HIGH;
306                         move_list = &l_mg->gc_high_list;
307                 }
308         } else if (vsc < lm->high_thrs) {
309                 if (line->gc_group != PBLK_LINEGC_MID) {
310                         line->gc_group = PBLK_LINEGC_MID;
311                         move_list = &l_mg->gc_mid_list;
312                 }
313         } else if (vsc < line->sec_in_line) {
314                 if (line->gc_group != PBLK_LINEGC_LOW) {
315                         line->gc_group = PBLK_LINEGC_LOW;
316                         move_list = &l_mg->gc_low_list;
317                 }
318         } else if (vsc == line->sec_in_line) {
319                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
320                         line->gc_group = PBLK_LINEGC_EMPTY;
321                         move_list = &l_mg->gc_empty_list;
322                 }
323         } else {
324                 line->state = PBLK_LINESTATE_CORRUPT;
325                 line->gc_group = PBLK_LINEGC_NONE;
326                 move_list =  &l_mg->corrupt_list;
327                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
328                                                 line->id, vsc,
329                                                 line->sec_in_line,
330                                                 lm->high_thrs, lm->mid_thrs);
331         }
332
333         return move_list;
334 }
335
336 void pblk_discard(struct pblk *pblk, struct bio *bio)
337 {
338         sector_t slba = pblk_get_lba(bio);
339         sector_t nr_secs = pblk_get_secs(bio);
340
341         pblk_invalidate_range(pblk, slba, nr_secs);
342 }
343
344 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
345 {
346         struct ppa_addr ppa;
347
348         spin_lock(&pblk->trans_lock);
349         ppa = pblk_trans_map_get(pblk, lba);
350         spin_unlock(&pblk->trans_lock);
351
352         return ppa;
353 }
354
355 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
356 {
357         atomic_long_inc(&pblk->write_failed);
358 #ifdef CONFIG_NVM_DEBUG
359         pblk_print_failed_rqd(pblk, rqd, rqd->error);
360 #endif
361 }
362
363 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
364 {
365         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
366         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
367                 atomic_long_inc(&pblk->read_empty);
368                 return;
369         }
370
371         switch (rqd->error) {
372         case NVM_RSP_WARN_HIGHECC:
373                 atomic_long_inc(&pblk->read_high_ecc);
374                 break;
375         case NVM_RSP_ERR_FAILECC:
376         case NVM_RSP_ERR_FAILCRC:
377                 atomic_long_inc(&pblk->read_failed);
378                 break;
379         default:
380                 pr_err("pblk: unknown read error:%d\n", rqd->error);
381         }
382 #ifdef CONFIG_NVM_DEBUG
383         pblk_print_failed_rqd(pblk, rqd, rqd->error);
384 #endif
385 }
386
387 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
388 {
389         pblk->sec_per_write = sec_per_write;
390 }
391
392 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
393 {
394         struct nvm_tgt_dev *dev = pblk->dev;
395
396 #ifdef CONFIG_NVM_DEBUG
397         struct ppa_addr *ppa_list;
398
399         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
400         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
401                 WARN_ON(1);
402                 return -EINVAL;
403         }
404
405         if (rqd->opcode == NVM_OP_PWRITE) {
406                 struct pblk_line *line;
407                 struct ppa_addr ppa;
408                 int i;
409
410                 for (i = 0; i < rqd->nr_ppas; i++) {
411                         ppa = ppa_list[i];
412                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
413
414                         spin_lock(&line->lock);
415                         if (line->state != PBLK_LINESTATE_OPEN) {
416                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
417                                                         line->id, line->state);
418                                 WARN_ON(1);
419                                 spin_unlock(&line->lock);
420                                 return -EINVAL;
421                         }
422                         spin_unlock(&line->lock);
423                 }
424         }
425 #endif
426         return nvm_submit_io(dev, rqd);
427 }
428
429 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
430                               unsigned int nr_secs, unsigned int len,
431                               gfp_t gfp_mask)
432 {
433         struct nvm_tgt_dev *dev = pblk->dev;
434         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
435         void *kaddr = data;
436         struct page *page;
437         struct bio *bio;
438         int i, ret;
439
440         if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
441                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
442
443         bio = bio_kmalloc(gfp_mask, nr_secs);
444         if (!bio)
445                 return ERR_PTR(-ENOMEM);
446
447         for (i = 0; i < nr_secs; i++) {
448                 page = vmalloc_to_page(kaddr);
449                 if (!page) {
450                         pr_err("pblk: could not map vmalloc bio\n");
451                         bio_put(bio);
452                         bio = ERR_PTR(-ENOMEM);
453                         goto out;
454                 }
455
456                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
457                 if (ret != PAGE_SIZE) {
458                         pr_err("pblk: could not add page to bio\n");
459                         bio_put(bio);
460                         bio = ERR_PTR(-ENOMEM);
461                         goto out;
462                 }
463
464                 kaddr += PAGE_SIZE;
465         }
466 out:
467         return bio;
468 }
469
470 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
471                    unsigned long secs_to_flush)
472 {
473         int max = pblk->sec_per_write;
474         int min = pblk->min_write_pgs;
475         int secs_to_sync = 0;
476
477         if (secs_avail >= max)
478                 secs_to_sync = max;
479         else if (secs_avail >= min)
480                 secs_to_sync = min * (secs_avail / min);
481         else if (secs_to_flush)
482                 secs_to_sync = min;
483
484         return secs_to_sync;
485 }
486
487 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
488 {
489         u64 addr;
490         int i;
491
492         addr = find_next_zero_bit(line->map_bitmap,
493                                         pblk->lm.sec_per_line, line->cur_sec);
494         line->cur_sec = addr - nr_secs;
495
496         for (i = 0; i < nr_secs; i++, line->cur_sec--)
497                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
498 }
499
500 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
501 {
502         u64 addr;
503         int i;
504
505         /* logic error: ppa out-of-bounds. Prevent generating bad address */
506         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
507                 WARN(1, "pblk: page allocation out of bounds\n");
508                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
509         }
510
511         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
512                                         pblk->lm.sec_per_line, line->cur_sec);
513         for (i = 0; i < nr_secs; i++, line->cur_sec++)
514                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
515
516         return addr;
517 }
518
519 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
520 {
521         u64 addr;
522
523         /* Lock needed in case a write fails and a recovery needs to remap
524          * failed write buffer entries
525          */
526         spin_lock(&line->lock);
527         addr = __pblk_alloc_page(pblk, line, nr_secs);
528         line->left_msecs -= nr_secs;
529         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
530         spin_unlock(&line->lock);
531
532         return addr;
533 }
534
535 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
536 {
537         u64 paddr;
538
539         spin_lock(&line->lock);
540         paddr = find_next_zero_bit(line->map_bitmap,
541                                         pblk->lm.sec_per_line, line->cur_sec);
542         spin_unlock(&line->lock);
543
544         return paddr;
545 }
546
547 /*
548  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
549  * taking the per LUN semaphore.
550  */
551 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
552                                      void *emeta_buf, u64 paddr, int dir)
553 {
554         struct nvm_tgt_dev *dev = pblk->dev;
555         struct nvm_geo *geo = &dev->geo;
556         struct pblk_line_meta *lm = &pblk->lm;
557         void *ppa_list, *meta_list;
558         struct bio *bio;
559         struct nvm_rq rqd;
560         dma_addr_t dma_ppa_list, dma_meta_list;
561         int min = pblk->min_write_pgs;
562         int left_ppas = lm->emeta_sec[0];
563         int id = line->id;
564         int rq_ppas, rq_len;
565         int cmd_op, bio_op;
566         int i, j;
567         int ret;
568         DECLARE_COMPLETION_ONSTACK(wait);
569
570         if (dir == WRITE) {
571                 bio_op = REQ_OP_WRITE;
572                 cmd_op = NVM_OP_PWRITE;
573         } else if (dir == READ) {
574                 bio_op = REQ_OP_READ;
575                 cmd_op = NVM_OP_PREAD;
576         } else
577                 return -EINVAL;
578
579         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
580                                                         &dma_meta_list);
581         if (!meta_list)
582                 return -ENOMEM;
583
584         ppa_list = meta_list + pblk_dma_meta_size;
585         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
586
587 next_rq:
588         memset(&rqd, 0, sizeof(struct nvm_rq));
589
590         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
591         rq_len = rq_ppas * geo->sec_size;
592
593         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, GFP_KERNEL);
594         if (IS_ERR(bio)) {
595                 ret = PTR_ERR(bio);
596                 goto free_rqd_dma;
597         }
598
599         bio->bi_iter.bi_sector = 0; /* internal bio */
600         bio_set_op_attrs(bio, bio_op, 0);
601
602         rqd.bio = bio;
603         rqd.meta_list = meta_list;
604         rqd.ppa_list = ppa_list;
605         rqd.dma_meta_list = dma_meta_list;
606         rqd.dma_ppa_list = dma_ppa_list;
607         rqd.opcode = cmd_op;
608         rqd.nr_ppas = rq_ppas;
609         rqd.end_io = pblk_end_io_sync;
610         rqd.private = &wait;
611
612         if (dir == WRITE) {
613                 struct pblk_sec_meta *meta_list = rqd.meta_list;
614
615                 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
616                 for (i = 0; i < rqd.nr_ppas; ) {
617                         spin_lock(&line->lock);
618                         paddr = __pblk_alloc_page(pblk, line, min);
619                         spin_unlock(&line->lock);
620                         for (j = 0; j < min; j++, i++, paddr++) {
621                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
622                                 rqd.ppa_list[i] =
623                                         addr_to_gen_ppa(pblk, paddr, id);
624                         }
625                 }
626         } else {
627                 for (i = 0; i < rqd.nr_ppas; ) {
628                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
629                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
630                         int read_type = PBLK_READ_RANDOM;
631
632                         if (pblk_io_aligned(pblk, rq_ppas))
633                                 read_type = PBLK_READ_SEQUENTIAL;
634                         rqd.flags = pblk_set_read_mode(pblk, read_type);
635
636                         while (test_bit(pos, line->blk_bitmap)) {
637                                 paddr += min;
638                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
639                                         pr_err("pblk: corrupt emeta line:%d\n",
640                                                                 line->id);
641                                         bio_put(bio);
642                                         ret = -EINTR;
643                                         goto free_rqd_dma;
644                                 }
645
646                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
647                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
648                         }
649
650                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
651                                 pr_err("pblk: corrupt emeta line:%d\n",
652                                                                 line->id);
653                                 bio_put(bio);
654                                 ret = -EINTR;
655                                 goto free_rqd_dma;
656                         }
657
658                         for (j = 0; j < min; j++, i++, paddr++)
659                                 rqd.ppa_list[i] =
660                                         addr_to_gen_ppa(pblk, paddr, line->id);
661                 }
662         }
663
664         ret = pblk_submit_io(pblk, &rqd);
665         if (ret) {
666                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
667                 bio_put(bio);
668                 goto free_rqd_dma;
669         }
670
671         if (!wait_for_completion_io_timeout(&wait,
672                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
673                 pr_err("pblk: emeta I/O timed out\n");
674         }
675         reinit_completion(&wait);
676
677         if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
678                 bio_put(bio);
679
680         if (rqd.error) {
681                 if (dir == WRITE)
682                         pblk_log_write_err(pblk, &rqd);
683                 else
684                         pblk_log_read_err(pblk, &rqd);
685         }
686
687         emeta_buf += rq_len;
688         left_ppas -= rq_ppas;
689         if (left_ppas)
690                 goto next_rq;
691 free_rqd_dma:
692         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
693         return ret;
694 }
695
696 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
697 {
698         struct nvm_tgt_dev *dev = pblk->dev;
699         struct nvm_geo *geo = &dev->geo;
700         struct pblk_line_meta *lm = &pblk->lm;
701         int bit;
702
703         /* This usually only happens on bad lines */
704         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
705         if (bit >= lm->blk_per_line)
706                 return -1;
707
708         return bit * geo->sec_per_pl;
709 }
710
711 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
712                                      u64 paddr, int dir)
713 {
714         struct nvm_tgt_dev *dev = pblk->dev;
715         struct pblk_line_meta *lm = &pblk->lm;
716         struct bio *bio;
717         struct nvm_rq rqd;
718         __le64 *lba_list = NULL;
719         int i, ret;
720         int cmd_op, bio_op;
721         int flags;
722         DECLARE_COMPLETION_ONSTACK(wait);
723
724         if (dir == WRITE) {
725                 bio_op = REQ_OP_WRITE;
726                 cmd_op = NVM_OP_PWRITE;
727                 flags = pblk_set_progr_mode(pblk, WRITE);
728                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
729         } else if (dir == READ) {
730                 bio_op = REQ_OP_READ;
731                 cmd_op = NVM_OP_PREAD;
732                 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
733         } else
734                 return -EINVAL;
735
736         memset(&rqd, 0, sizeof(struct nvm_rq));
737
738         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
739                                                         &rqd.dma_meta_list);
740         if (!rqd.meta_list)
741                 return -ENOMEM;
742
743         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
744         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
745
746         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
747         if (IS_ERR(bio)) {
748                 ret = PTR_ERR(bio);
749                 goto free_ppa_list;
750         }
751
752         bio->bi_iter.bi_sector = 0; /* internal bio */
753         bio_set_op_attrs(bio, bio_op, 0);
754
755         rqd.bio = bio;
756         rqd.opcode = cmd_op;
757         rqd.flags = flags;
758         rqd.nr_ppas = lm->smeta_sec;
759         rqd.end_io = pblk_end_io_sync;
760         rqd.private = &wait;
761
762         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
763                 struct pblk_sec_meta *meta_list = rqd.meta_list;
764
765                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
766
767                 if (dir == WRITE) {
768                         u64 addr_empty = cpu_to_le64(ADDR_EMPTY);
769
770                         meta_list[i].lba = lba_list[paddr] = addr_empty;
771                 }
772         }
773
774         /*
775          * This I/O is sent by the write thread when a line is replace. Since
776          * the write thread is the only one sending write and erase commands,
777          * there is no need to take the LUN semaphore.
778          */
779         ret = pblk_submit_io(pblk, &rqd);
780         if (ret) {
781                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
782                 bio_put(bio);
783                 goto free_ppa_list;
784         }
785
786         if (!wait_for_completion_io_timeout(&wait,
787                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
788                 pr_err("pblk: smeta I/O timed out\n");
789         }
790
791         if (rqd.error) {
792                 if (dir == WRITE)
793                         pblk_log_write_err(pblk, &rqd);
794                 else
795                         pblk_log_read_err(pblk, &rqd);
796         }
797
798 free_ppa_list:
799         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
800
801         return ret;
802 }
803
804 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
805 {
806         u64 bpaddr = pblk_line_smeta_start(pblk, line);
807
808         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
809 }
810
811 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
812                          void *emeta_buf)
813 {
814         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
815                                                 line->emeta_ssec, READ);
816 }
817
818 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
819                             struct ppa_addr ppa)
820 {
821         rqd->opcode = NVM_OP_ERASE;
822         rqd->ppa_addr = ppa;
823         rqd->nr_ppas = 1;
824         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
825         rqd->bio = NULL;
826 }
827
828 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
829 {
830         struct nvm_rq rqd;
831         int ret;
832         DECLARE_COMPLETION_ONSTACK(wait);
833
834         memset(&rqd, 0, sizeof(struct nvm_rq));
835
836         pblk_setup_e_rq(pblk, &rqd, ppa);
837
838         rqd.end_io = pblk_end_io_sync;
839         rqd.private = &wait;
840
841         /* The write thread schedules erases so that it minimizes disturbances
842          * with writes. Thus, there is no need to take the LUN semaphore.
843          */
844         ret = pblk_submit_io(pblk, &rqd);
845         if (ret) {
846                 struct nvm_tgt_dev *dev = pblk->dev;
847                 struct nvm_geo *geo = &dev->geo;
848
849                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
850                                         pblk_dev_ppa_to_line(ppa),
851                                         pblk_dev_ppa_to_pos(geo, ppa));
852
853                 rqd.error = ret;
854                 goto out;
855         }
856
857         if (!wait_for_completion_io_timeout(&wait,
858                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
859                 pr_err("pblk: sync erase timed out\n");
860         }
861
862 out:
863         rqd.private = pblk;
864         __pblk_end_io_erase(pblk, &rqd);
865
866         return 0;
867 }
868
869 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
870 {
871         struct pblk_line_meta *lm = &pblk->lm;
872         struct ppa_addr ppa;
873         int bit = -1;
874
875         /* Erase only good blocks, one at a time */
876         do {
877                 spin_lock(&line->lock);
878                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
879                                                                 bit + 1);
880                 if (bit >= lm->blk_per_line) {
881                         spin_unlock(&line->lock);
882                         break;
883                 }
884
885                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
886                 ppa.g.blk = line->id;
887
888                 atomic_dec(&line->left_eblks);
889                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
890                 spin_unlock(&line->lock);
891
892                 if (pblk_blk_erase_sync(pblk, ppa)) {
893                         pr_err("pblk: failed to erase line %d\n", line->id);
894                         return -ENOMEM;
895                 }
896         } while (1);
897
898         return 0;
899 }
900
901 static void pblk_line_setup_metadata(struct pblk_line *line,
902                                      struct pblk_line_mgmt *l_mg,
903                                      struct pblk_line_meta *lm)
904 {
905         int meta_line;
906
907 retry_meta:
908         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
909         if (meta_line == PBLK_DATA_LINES) {
910                 spin_unlock(&l_mg->free_lock);
911                 io_schedule();
912                 spin_lock(&l_mg->free_lock);
913                 goto retry_meta;
914         }
915
916         set_bit(meta_line, &l_mg->meta_bitmap);
917         line->meta_line = meta_line;
918
919         line->smeta = l_mg->sline_meta[meta_line];
920         line->emeta = l_mg->eline_meta[meta_line];
921
922         memset(line->smeta, 0, lm->smeta_len);
923         memset(line->emeta->buf, 0, lm->emeta_len[0]);
924
925         line->emeta->mem = 0;
926         atomic_set(&line->emeta->sync, 0);
927 }
928
929 /* For now lines are always assumed full lines. Thus, smeta former and current
930  * lun bitmaps are omitted.
931  */
932 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
933                                   struct pblk_line *cur)
934 {
935         struct nvm_tgt_dev *dev = pblk->dev;
936         struct nvm_geo *geo = &dev->geo;
937         struct pblk_line_meta *lm = &pblk->lm;
938         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
939         struct pblk_emeta *emeta = line->emeta;
940         struct line_emeta *emeta_buf = emeta->buf;
941         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
942         int nr_blk_line;
943
944         /* After erasing the line, new bad blocks might appear and we risk
945          * having an invalid line
946          */
947         nr_blk_line = lm->blk_per_line -
948                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
949         if (nr_blk_line < lm->min_blk_line) {
950                 spin_lock(&l_mg->free_lock);
951                 spin_lock(&line->lock);
952                 line->state = PBLK_LINESTATE_BAD;
953                 spin_unlock(&line->lock);
954
955                 list_add_tail(&line->list, &l_mg->bad_list);
956                 spin_unlock(&l_mg->free_lock);
957
958                 pr_debug("pblk: line %d is bad\n", line->id);
959
960                 return 0;
961         }
962
963         /* Run-time metadata */
964         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
965
966         /* Mark LUNs allocated in this line (all for now) */
967         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
968
969         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
970         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
971         smeta_buf->header.id = cpu_to_le32(line->id);
972         smeta_buf->header.type = cpu_to_le16(line->type);
973         smeta_buf->header.version = cpu_to_le16(1);
974
975         /* Start metadata */
976         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
977         smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
978
979         /* Fill metadata among lines */
980         if (cur) {
981                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
982                 smeta_buf->prev_id = cpu_to_le32(cur->id);
983                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
984         } else {
985                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
986         }
987
988         /* All smeta must be set at this point */
989         smeta_buf->header.crc = cpu_to_le32(
990                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
991         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
992
993         /* End metadata */
994         memcpy(&emeta_buf->header, &smeta_buf->header,
995                                                 sizeof(struct line_header));
996         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
997         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
998         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
999         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1000         emeta_buf->crc = cpu_to_le32(0);
1001         emeta_buf->prev_id = smeta_buf->prev_id;
1002
1003         return 1;
1004 }
1005
1006 /* For now lines are always assumed full lines. Thus, smeta former and current
1007  * lun bitmaps are omitted.
1008  */
1009 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1010                              int init)
1011 {
1012         struct nvm_tgt_dev *dev = pblk->dev;
1013         struct nvm_geo *geo = &dev->geo;
1014         struct pblk_line_meta *lm = &pblk->lm;
1015         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1016         int nr_bb = 0;
1017         u64 off;
1018         int bit = -1;
1019
1020         line->sec_in_line = lm->sec_per_line;
1021
1022         /* Capture bad block information on line mapping bitmaps */
1023         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1024                                         bit + 1)) < lm->blk_per_line) {
1025                 off = bit * geo->sec_per_pl;
1026                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1027                                                         lm->sec_per_line);
1028                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1029                                                         lm->sec_per_line);
1030                 line->sec_in_line -= geo->sec_per_blk;
1031                 if (bit >= lm->emeta_bb)
1032                         nr_bb++;
1033         }
1034
1035         /* Mark smeta metadata sectors as bad sectors */
1036         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1037         off = bit * geo->sec_per_pl;
1038 retry_smeta:
1039         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1040         line->sec_in_line -= lm->smeta_sec;
1041         line->smeta_ssec = off;
1042         line->cur_sec = off + lm->smeta_sec;
1043
1044         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1045                 pr_debug("pblk: line smeta I/O failed. Retry\n");
1046                 off += geo->sec_per_pl;
1047                 goto retry_smeta;
1048         }
1049
1050         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1051
1052         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1053          * blocks to make sure that there are enough sectors to store emeta
1054          */
1055         bit = lm->sec_per_line;
1056         off = lm->sec_per_line - lm->emeta_sec[0];
1057         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1058         while (nr_bb) {
1059                 off -= geo->sec_per_pl;
1060                 if (!test_bit(off, line->invalid_bitmap)) {
1061                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1062                         nr_bb--;
1063                 }
1064         }
1065
1066         line->sec_in_line -= lm->emeta_sec[0];
1067         line->emeta_ssec = off;
1068         line->nr_valid_lbas = 0;
1069         line->left_msecs = line->sec_in_line;
1070         *line->vsc = cpu_to_le32(line->sec_in_line);
1071
1072         if (lm->sec_per_line - line->sec_in_line !=
1073                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1074                 spin_lock(&line->lock);
1075                 line->state = PBLK_LINESTATE_BAD;
1076                 spin_unlock(&line->lock);
1077
1078                 list_add_tail(&line->list, &l_mg->bad_list);
1079                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1080
1081                 return 0;
1082         }
1083
1084         return 1;
1085 }
1086
1087 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1088 {
1089         struct pblk_line_meta *lm = &pblk->lm;
1090         int blk_in_line = atomic_read(&line->blk_in_line);
1091
1092         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1093         if (!line->map_bitmap)
1094                 return -ENOMEM;
1095         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1096
1097         /* invalid_bitmap is special since it is used when line is closed. No
1098          * need to zeroized; it will be initialized using bb info form
1099          * map_bitmap
1100          */
1101         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1102         if (!line->invalid_bitmap) {
1103                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1104                 return -ENOMEM;
1105         }
1106
1107         spin_lock(&line->lock);
1108         if (line->state != PBLK_LINESTATE_FREE) {
1109                 spin_unlock(&line->lock);
1110                 WARN(1, "pblk: corrupted line state\n");
1111                 return -EINTR;
1112         }
1113         line->state = PBLK_LINESTATE_OPEN;
1114
1115         atomic_set(&line->left_eblks, blk_in_line);
1116         atomic_set(&line->left_seblks, blk_in_line);
1117
1118         line->meta_distance = lm->meta_distance;
1119         spin_unlock(&line->lock);
1120
1121         /* Bad blocks do not need to be erased */
1122         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1123
1124         kref_init(&line->ref);
1125
1126         return 0;
1127 }
1128
1129 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1130 {
1131         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1132         int ret;
1133
1134         spin_lock(&l_mg->free_lock);
1135         l_mg->data_line = line;
1136         list_del(&line->list);
1137
1138         ret = pblk_line_prepare(pblk, line);
1139         if (ret) {
1140                 list_add(&line->list, &l_mg->free_list);
1141                 spin_unlock(&l_mg->free_lock);
1142                 return ret;
1143         }
1144         spin_unlock(&l_mg->free_lock);
1145
1146         pblk_rl_free_lines_dec(&pblk->rl, line);
1147
1148         if (!pblk_line_init_bb(pblk, line, 0)) {
1149                 list_add(&line->list, &l_mg->free_list);
1150                 return -EINTR;
1151         }
1152
1153         return 0;
1154 }
1155
1156 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1157 {
1158         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1159         line->map_bitmap = NULL;
1160         line->smeta = NULL;
1161         line->emeta = NULL;
1162 }
1163
1164 struct pblk_line *pblk_line_get(struct pblk *pblk)
1165 {
1166         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1167         struct pblk_line_meta *lm = &pblk->lm;
1168         struct pblk_line *line = NULL;
1169         int bit;
1170
1171         lockdep_assert_held(&l_mg->free_lock);
1172
1173 retry_get:
1174         if (list_empty(&l_mg->free_list)) {
1175                 pr_err("pblk: no free lines\n");
1176                 goto out;
1177         }
1178
1179         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1180         list_del(&line->list);
1181         l_mg->nr_free_lines--;
1182
1183         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1184         if (unlikely(bit >= lm->blk_per_line)) {
1185                 spin_lock(&line->lock);
1186                 line->state = PBLK_LINESTATE_BAD;
1187                 spin_unlock(&line->lock);
1188
1189                 list_add_tail(&line->list, &l_mg->bad_list);
1190
1191                 pr_debug("pblk: line %d is bad\n", line->id);
1192                 goto retry_get;
1193         }
1194
1195         if (pblk_line_prepare(pblk, line)) {
1196                 pr_err("pblk: failed to prepare line %d\n", line->id);
1197                 list_add(&line->list, &l_mg->free_list);
1198                 return NULL;
1199         }
1200
1201 out:
1202         return line;
1203 }
1204
1205 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1206                                          struct pblk_line *line)
1207 {
1208         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1209         struct pblk_line *retry_line;
1210
1211         spin_lock(&l_mg->free_lock);
1212         retry_line = pblk_line_get(pblk);
1213         if (!retry_line) {
1214                 l_mg->data_line = NULL;
1215                 spin_unlock(&l_mg->free_lock);
1216                 return NULL;
1217         }
1218
1219         retry_line->smeta = line->smeta;
1220         retry_line->emeta = line->emeta;
1221         retry_line->meta_line = line->meta_line;
1222
1223         pblk_line_free(pblk, line);
1224         l_mg->data_line = retry_line;
1225         spin_unlock(&l_mg->free_lock);
1226
1227         if (pblk_line_erase(pblk, retry_line)) {
1228                 spin_lock(&l_mg->free_lock);
1229                 l_mg->data_line = NULL;
1230                 spin_unlock(&l_mg->free_lock);
1231                 return NULL;
1232         }
1233
1234         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1235
1236         return retry_line;
1237 }
1238
1239 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1240 {
1241         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1242         struct pblk_line *line;
1243         int is_next = 0;
1244
1245         spin_lock(&l_mg->free_lock);
1246         line = pblk_line_get(pblk);
1247         if (!line) {
1248                 spin_unlock(&l_mg->free_lock);
1249                 return NULL;
1250         }
1251
1252         line->seq_nr = l_mg->d_seq_nr++;
1253         line->type = PBLK_LINETYPE_DATA;
1254         l_mg->data_line = line;
1255
1256         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1257
1258         /* Allocate next line for preparation */
1259         l_mg->data_next = pblk_line_get(pblk);
1260         if (l_mg->data_next) {
1261                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1262                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1263                 is_next = 1;
1264         }
1265         spin_unlock(&l_mg->free_lock);
1266
1267         pblk_rl_free_lines_dec(&pblk->rl, line);
1268         if (is_next)
1269                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1270
1271         if (pblk_line_erase(pblk, line))
1272                 return NULL;
1273
1274 retry_setup:
1275         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1276                 line = pblk_line_retry(pblk, line);
1277                 if (!line)
1278                         return NULL;
1279
1280                 goto retry_setup;
1281         }
1282
1283         if (!pblk_line_init_bb(pblk, line, 1)) {
1284                 line = pblk_line_retry(pblk, line);
1285                 if (!line)
1286                         return NULL;
1287
1288                 goto retry_setup;
1289         }
1290
1291         return line;
1292 }
1293
1294 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1295 {
1296         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1297         struct pblk_line *cur, *new;
1298         unsigned int left_seblks;
1299         int is_next = 0;
1300
1301         cur = l_mg->data_line;
1302         new = l_mg->data_next;
1303         if (!new)
1304                 return NULL;
1305         l_mg->data_line = new;
1306
1307 retry_line:
1308         left_seblks = atomic_read(&new->left_seblks);
1309         if (left_seblks) {
1310                 /* If line is not fully erased, erase it */
1311                 if (atomic_read(&new->left_eblks)) {
1312                         if (pblk_line_erase(pblk, new))
1313                                 return NULL;
1314                 } else {
1315                         io_schedule();
1316                 }
1317                 goto retry_line;
1318         }
1319
1320         spin_lock(&l_mg->free_lock);
1321         /* Allocate next line for preparation */
1322         l_mg->data_next = pblk_line_get(pblk);
1323         if (l_mg->data_next) {
1324                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1325                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1326                 is_next = 1;
1327         }
1328
1329         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1330         spin_unlock(&l_mg->free_lock);
1331
1332         if (is_next)
1333                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1334
1335 retry_setup:
1336         if (!pblk_line_init_metadata(pblk, new, cur)) {
1337                 new = pblk_line_retry(pblk, new);
1338                 if (!new)
1339                         return NULL;
1340
1341                 goto retry_setup;
1342         }
1343
1344         if (!pblk_line_init_bb(pblk, new, 1)) {
1345                 new = pblk_line_retry(pblk, new);
1346                 if (!new)
1347                         return NULL;
1348
1349                 goto retry_setup;
1350         }
1351
1352         return new;
1353 }
1354
1355 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1356 {
1357         if (line->map_bitmap)
1358                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1359         if (line->invalid_bitmap)
1360                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1361
1362         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1363
1364         line->map_bitmap = NULL;
1365         line->invalid_bitmap = NULL;
1366         line->smeta = NULL;
1367         line->emeta = NULL;
1368 }
1369
1370 void pblk_line_put(struct kref *ref)
1371 {
1372         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1373         struct pblk *pblk = line->pblk;
1374         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1375
1376         spin_lock(&line->lock);
1377         WARN_ON(line->state != PBLK_LINESTATE_GC);
1378         line->state = PBLK_LINESTATE_FREE;
1379         line->gc_group = PBLK_LINEGC_NONE;
1380         pblk_line_free(pblk, line);
1381         spin_unlock(&line->lock);
1382
1383         spin_lock(&l_mg->free_lock);
1384         list_add_tail(&line->list, &l_mg->free_list);
1385         l_mg->nr_free_lines++;
1386         spin_unlock(&l_mg->free_lock);
1387
1388         pblk_rl_free_lines_inc(&pblk->rl, line);
1389 }
1390
1391 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1392 {
1393         struct nvm_rq *rqd;
1394         int err;
1395
1396         rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1397         memset(rqd, 0, pblk_g_rq_size);
1398
1399         pblk_setup_e_rq(pblk, rqd, ppa);
1400
1401         rqd->end_io = pblk_end_io_erase;
1402         rqd->private = pblk;
1403
1404         /* The write thread schedules erases so that it minimizes disturbances
1405          * with writes. Thus, there is no need to take the LUN semaphore.
1406          */
1407         err = pblk_submit_io(pblk, rqd);
1408         if (err) {
1409                 struct nvm_tgt_dev *dev = pblk->dev;
1410                 struct nvm_geo *geo = &dev->geo;
1411
1412                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1413                                         pblk_dev_ppa_to_line(ppa),
1414                                         pblk_dev_ppa_to_pos(geo, ppa));
1415         }
1416
1417         return err;
1418 }
1419
1420 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1421 {
1422         return pblk->l_mg.data_line;
1423 }
1424
1425 /* For now, always erase next line */
1426 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1427 {
1428         return pblk->l_mg.data_next;
1429 }
1430
1431 int pblk_line_is_full(struct pblk_line *line)
1432 {
1433         return (line->left_msecs == 0);
1434 }
1435
1436 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1437 {
1438         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1439         struct pblk_line_meta *lm = &pblk->lm;
1440         struct list_head *move_list;
1441
1442         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1443                                 "pblk: corrupt closed line %d\n", line->id);
1444
1445         spin_lock(&l_mg->free_lock);
1446         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1447         spin_unlock(&l_mg->free_lock);
1448
1449         spin_lock(&l_mg->gc_lock);
1450         spin_lock(&line->lock);
1451         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1452         line->state = PBLK_LINESTATE_CLOSED;
1453         move_list = pblk_line_gc_list(pblk, line);
1454
1455         list_add_tail(&line->list, move_list);
1456
1457         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1458         line->map_bitmap = NULL;
1459         line->smeta = NULL;
1460         line->emeta = NULL;
1461
1462         spin_unlock(&line->lock);
1463         spin_unlock(&l_mg->gc_lock);
1464 }
1465
1466 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1467 {
1468         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1469         struct pblk_line_meta *lm = &pblk->lm;
1470         struct pblk_emeta *emeta = line->emeta;
1471         struct line_emeta *emeta_buf = emeta->buf;
1472
1473         /* No need for exact vsc value; avoid a big line lock and tak aprox. */
1474         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1475         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1476
1477         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1478         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1479
1480         spin_lock(&l_mg->close_lock);
1481         spin_lock(&line->lock);
1482         list_add_tail(&line->list, &l_mg->emeta_list);
1483         spin_unlock(&line->lock);
1484         spin_unlock(&l_mg->close_lock);
1485 }
1486
1487 void pblk_line_close_ws(struct work_struct *work)
1488 {
1489         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1490                                                                         ws);
1491         struct pblk *pblk = line_ws->pblk;
1492         struct pblk_line *line = line_ws->line;
1493
1494         pblk_line_close(pblk, line);
1495         mempool_free(line_ws, pblk->line_ws_pool);
1496 }
1497
1498 void pblk_line_mark_bb(struct work_struct *work)
1499 {
1500         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1501                                                                         ws);
1502         struct pblk *pblk = line_ws->pblk;
1503         struct nvm_tgt_dev *dev = pblk->dev;
1504         struct ppa_addr *ppa = line_ws->priv;
1505         int ret;
1506
1507         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1508         if (ret) {
1509                 struct pblk_line *line;
1510                 int pos;
1511
1512                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1513                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1514
1515                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1516                                 line->id, pos);
1517         }
1518
1519         kfree(ppa);
1520         mempool_free(line_ws, pblk->line_ws_pool);
1521 }
1522
1523 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1524                       void (*work)(struct work_struct *))
1525 {
1526         struct pblk_line_ws *line_ws;
1527
1528         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1529         if (!line_ws)
1530                 return;
1531
1532         line_ws->pblk = pblk;
1533         line_ws->line = line;
1534         line_ws->priv = priv;
1535
1536         INIT_WORK(&line_ws->ws, work);
1537         queue_work(pblk->kw_wq, &line_ws->ws);
1538 }
1539
1540 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1541                   unsigned long *lun_bitmap)
1542 {
1543         struct nvm_tgt_dev *dev = pblk->dev;
1544         struct nvm_geo *geo = &dev->geo;
1545         struct pblk_lun *rlun;
1546         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1547         int ret;
1548
1549         /*
1550          * Only send one inflight I/O per LUN. Since we map at a page
1551          * granurality, all ppas in the I/O will map to the same LUN
1552          */
1553 #ifdef CONFIG_NVM_DEBUG
1554         int i;
1555
1556         for (i = 1; i < nr_ppas; i++)
1557                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1558                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1559 #endif
1560         /* If the LUN has been locked for this same request, do no attempt to
1561          * lock it again
1562          */
1563         if (test_and_set_bit(pos, lun_bitmap))
1564                 return;
1565
1566         rlun = &pblk->luns[pos];
1567         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1568         if (ret) {
1569                 switch (ret) {
1570                 case -ETIME:
1571                         pr_err("pblk: lun semaphore timed out\n");
1572                         break;
1573                 case -EINTR:
1574                         pr_err("pblk: lun semaphore timed out\n");
1575                         break;
1576                 }
1577         }
1578 }
1579
1580 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1581                 unsigned long *lun_bitmap)
1582 {
1583         struct nvm_tgt_dev *dev = pblk->dev;
1584         struct nvm_geo *geo = &dev->geo;
1585         struct pblk_lun *rlun;
1586         int nr_luns = geo->nr_luns;
1587         int bit = -1;
1588
1589         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1590                 rlun = &pblk->luns[bit];
1591                 up(&rlun->wr_sem);
1592         }
1593
1594         kfree(lun_bitmap);
1595 }
1596
1597 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1598 {
1599         struct ppa_addr l2p_ppa;
1600
1601         /* logic error: lba out-of-bounds. Ignore update */
1602         if (!(lba < pblk->rl.nr_secs)) {
1603                 WARN(1, "pblk: corrupted L2P map request\n");
1604                 return;
1605         }
1606
1607         spin_lock(&pblk->trans_lock);
1608         l2p_ppa = pblk_trans_map_get(pblk, lba);
1609
1610         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1611                 pblk_map_invalidate(pblk, l2p_ppa);
1612
1613         pblk_trans_map_set(pblk, lba, ppa);
1614         spin_unlock(&pblk->trans_lock);
1615 }
1616
1617 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1618 {
1619 #ifdef CONFIG_NVM_DEBUG
1620         /* Callers must ensure that the ppa points to a cache address */
1621         BUG_ON(!pblk_addr_in_cache(ppa));
1622         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1623 #endif
1624
1625         pblk_update_map(pblk, lba, ppa);
1626 }
1627
1628 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1629                        struct pblk_line *gc_line)
1630 {
1631         struct ppa_addr l2p_ppa;
1632         int ret = 1;
1633
1634 #ifdef CONFIG_NVM_DEBUG
1635         /* Callers must ensure that the ppa points to a cache address */
1636         BUG_ON(!pblk_addr_in_cache(ppa));
1637         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1638 #endif
1639
1640         /* logic error: lba out-of-bounds. Ignore update */
1641         if (!(lba < pblk->rl.nr_secs)) {
1642                 WARN(1, "pblk: corrupted L2P map request\n");
1643                 return 0;
1644         }
1645
1646         spin_lock(&pblk->trans_lock);
1647         l2p_ppa = pblk_trans_map_get(pblk, lba);
1648
1649         /* Prevent updated entries to be overwritten by GC */
1650         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1651                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1652                 ret = 0;
1653                 goto out;
1654         }
1655
1656         pblk_trans_map_set(pblk, lba, ppa);
1657 out:
1658         spin_unlock(&pblk->trans_lock);
1659         return ret;
1660 }
1661
1662 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1663                          struct ppa_addr entry_line)
1664 {
1665         struct ppa_addr l2p_line;
1666
1667 #ifdef CONFIG_NVM_DEBUG
1668         /* Callers must ensure that the ppa points to a device address */
1669         BUG_ON(pblk_addr_in_cache(ppa));
1670 #endif
1671         /* Invalidate and discard padded entries */
1672         if (lba == ADDR_EMPTY) {
1673 #ifdef CONFIG_NVM_DEBUG
1674                 atomic_long_inc(&pblk->padded_wb);
1675 #endif
1676                 pblk_map_invalidate(pblk, ppa);
1677                 return;
1678         }
1679
1680         /* logic error: lba out-of-bounds. Ignore update */
1681         if (!(lba < pblk->rl.nr_secs)) {
1682                 WARN(1, "pblk: corrupted L2P map request\n");
1683                 return;
1684         }
1685
1686         spin_lock(&pblk->trans_lock);
1687         l2p_line = pblk_trans_map_get(pblk, lba);
1688
1689         /* Do not update L2P if the cacheline has been updated. In this case,
1690          * the mapped ppa must be invalidated
1691          */
1692         if (l2p_line.ppa != entry_line.ppa) {
1693                 if (!pblk_ppa_empty(ppa))
1694                         pblk_map_invalidate(pblk, ppa);
1695                 goto out;
1696         }
1697
1698 #ifdef CONFIG_NVM_DEBUG
1699         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1700 #endif
1701
1702         pblk_trans_map_set(pblk, lba, ppa);
1703 out:
1704         spin_unlock(&pblk->trans_lock);
1705 }
1706
1707 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1708                          sector_t blba, int nr_secs)
1709 {
1710         int i;
1711
1712         spin_lock(&pblk->trans_lock);
1713         for (i = 0; i < nr_secs; i++)
1714                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1715         spin_unlock(&pblk->trans_lock);
1716 }
1717
1718 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1719                           u64 *lba_list, int nr_secs)
1720 {
1721         sector_t lba;
1722         int i;
1723
1724         spin_lock(&pblk->trans_lock);
1725         for (i = 0; i < nr_secs; i++) {
1726                 lba = lba_list[i];
1727                 if (lba == ADDR_EMPTY) {
1728                         ppas[i].ppa = ADDR_EMPTY;
1729                 } else {
1730                         /* logic error: lba out-of-bounds. Ignore update */
1731                         if (!(lba < pblk->rl.nr_secs)) {
1732                                 WARN(1, "pblk: corrupted L2P map request\n");
1733                                 continue;
1734                         }
1735                         ppas[i] = pblk_trans_map_get(pblk, lba);
1736                 }
1737         }
1738         spin_unlock(&pblk->trans_lock);
1739 }