]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
lightnvm: pblk: rename read request pool
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20 #include <linux/time.h>
21
22 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
23                          struct ppa_addr *ppa)
24 {
25         struct nvm_tgt_dev *dev = pblk->dev;
26         struct nvm_geo *geo = &dev->geo;
27         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
28
29         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30         atomic_long_inc(&pblk->erase_failed);
31
32         atomic_dec(&line->blk_in_line);
33         if (test_and_set_bit(pos, line->blk_bitmap))
34                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
35                                                         line->id, pos);
36
37         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
38 }
39
40 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41 {
42         struct pblk_line *line;
43
44         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
45         atomic_dec(&line->left_seblks);
46
47         if (rqd->error) {
48                 struct ppa_addr *ppa;
49
50                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
51                 if (!ppa)
52                         return;
53
54                 *ppa = rqd->ppa_addr;
55                 pblk_mark_bb(pblk, line, ppa);
56         }
57 }
58
59 /* Erase completion assumes that only one block is erased at the time */
60 static void pblk_end_io_erase(struct nvm_rq *rqd)
61 {
62         struct pblk *pblk = rqd->private;
63
64         __pblk_end_io_erase(pblk, rqd);
65         mempool_free(rqd, pblk->g_rq_pool);
66 }
67
68 static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
69                                   u64 paddr)
70 {
71         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
72         struct list_head *move_list = NULL;
73
74         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
75          * table is modified with reclaimed sectors, a check is done to endure
76          * that newer updates are not overwritten.
77          */
78         spin_lock(&line->lock);
79         if (line->state == PBLK_LINESTATE_GC ||
80                                         line->state == PBLK_LINESTATE_FREE) {
81                 spin_unlock(&line->lock);
82                 return;
83         }
84
85         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
86                 WARN_ONCE(1, "pblk: double invalidate\n");
87                 spin_unlock(&line->lock);
88                 return;
89         }
90         line->vsc--;
91
92         if (line->state == PBLK_LINESTATE_CLOSED)
93                 move_list = pblk_line_gc_list(pblk, line);
94         spin_unlock(&line->lock);
95
96         if (move_list) {
97                 spin_lock(&l_mg->gc_lock);
98                 spin_lock(&line->lock);
99                 /* Prevent moving a line that has just been chosen for GC */
100                 if (line->state == PBLK_LINESTATE_GC ||
101                                         line->state == PBLK_LINESTATE_FREE) {
102                         spin_unlock(&line->lock);
103                         spin_unlock(&l_mg->gc_lock);
104                         return;
105                 }
106                 spin_unlock(&line->lock);
107
108                 list_move_tail(&line->list, move_list);
109                 spin_unlock(&l_mg->gc_lock);
110         }
111 }
112
113 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
114 {
115         struct pblk_line *line;
116         u64 paddr;
117         int line_id;
118
119 #ifdef CONFIG_NVM_DEBUG
120         /* Callers must ensure that the ppa points to a device address */
121         BUG_ON(pblk_addr_in_cache(ppa));
122         BUG_ON(pblk_ppa_empty(ppa));
123 #endif
124
125         line_id = pblk_tgt_ppa_to_line(ppa);
126         line = &pblk->lines[line_id];
127         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
128
129         __pblk_map_invalidate(pblk, line, paddr);
130 }
131
132 void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
133                              u64 paddr)
134 {
135         __pblk_map_invalidate(pblk, line, paddr);
136
137         pblk_rb_sync_init(&pblk->rwb, NULL);
138         line->left_ssecs--;
139         if (!line->left_ssecs)
140                 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
141         pblk_rb_sync_end(&pblk->rwb, NULL);
142 }
143
144 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
145                                   unsigned int nr_secs)
146 {
147         sector_t lba;
148
149         spin_lock(&pblk->trans_lock);
150         for (lba = slba; lba < slba + nr_secs; lba++) {
151                 struct ppa_addr ppa;
152
153                 ppa = pblk_trans_map_get(pblk, lba);
154
155                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
156                         pblk_map_invalidate(pblk, ppa);
157
158                 pblk_ppa_set_empty(&ppa);
159                 pblk_trans_map_set(pblk, lba, ppa);
160         }
161         spin_unlock(&pblk->trans_lock);
162 }
163
164 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
165 {
166         mempool_t *pool;
167         struct nvm_rq *rqd;
168         int rq_size;
169
170         if (rw == WRITE) {
171                 pool = pblk->w_rq_pool;
172                 rq_size = pblk_w_rq_size;
173         } else {
174                 pool = pblk->g_rq_pool;
175                 rq_size = pblk_g_rq_size;
176         }
177
178         rqd = mempool_alloc(pool, GFP_KERNEL);
179         memset(rqd, 0, rq_size);
180
181         return rqd;
182 }
183
184 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
185 {
186         mempool_t *pool;
187
188         if (rw == WRITE)
189                 pool = pblk->w_rq_pool;
190         else
191                 pool = pblk->g_rq_pool;
192
193         mempool_free(rqd, pool);
194 }
195
196 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
197                          int nr_pages)
198 {
199         struct bio_vec bv;
200         int i;
201
202         WARN_ON(off + nr_pages != bio->bi_vcnt);
203
204         bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
205         for (i = off; i < nr_pages + off; i++) {
206                 bv = bio->bi_io_vec[i];
207                 mempool_free(bv.bv_page, pblk->page_pool);
208         }
209 }
210
211 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
212                        int nr_pages)
213 {
214         struct request_queue *q = pblk->dev->q;
215         struct page *page;
216         int i, ret;
217
218         for (i = 0; i < nr_pages; i++) {
219                 page = mempool_alloc(pblk->page_pool, flags);
220                 if (!page)
221                         goto err;
222
223                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
224                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
225                         pr_err("pblk: could not add page to bio\n");
226                         mempool_free(page, pblk->page_pool);
227                         goto err;
228                 }
229         }
230
231         return 0;
232 err:
233         pblk_bio_free_pages(pblk, bio, 0, i - 1);
234         return -1;
235 }
236
237 static void pblk_write_kick(struct pblk *pblk)
238 {
239         wake_up_process(pblk->writer_ts);
240         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
241 }
242
243 void pblk_write_timer_fn(unsigned long data)
244 {
245         struct pblk *pblk = (struct pblk *)data;
246
247         /* kick the write thread every tick to flush outstanding data */
248         pblk_write_kick(pblk);
249 }
250
251 void pblk_write_should_kick(struct pblk *pblk)
252 {
253         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
254
255         if (secs_avail >= pblk->min_write_pgs)
256                 pblk_write_kick(pblk);
257 }
258
259 void pblk_end_bio_sync(struct bio *bio)
260 {
261         struct completion *waiting = bio->bi_private;
262
263         complete(waiting);
264 }
265
266 void pblk_end_io_sync(struct nvm_rq *rqd)
267 {
268         struct completion *waiting = rqd->private;
269
270         complete(waiting);
271 }
272
273 void pblk_flush_writer(struct pblk *pblk)
274 {
275         struct bio *bio;
276         int ret;
277         DECLARE_COMPLETION_ONSTACK(wait);
278
279         bio = bio_alloc(GFP_KERNEL, 1);
280         if (!bio)
281                 return;
282
283         bio->bi_iter.bi_sector = 0; /* internal bio */
284         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
285         bio->bi_private = &wait;
286         bio->bi_end_io = pblk_end_bio_sync;
287
288         ret = pblk_write_to_cache(pblk, bio, 0);
289         if (ret == NVM_IO_OK) {
290                 if (!wait_for_completion_io_timeout(&wait,
291                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
292                         pr_err("pblk: flush cache timed out\n");
293                 }
294         } else if (ret != NVM_IO_DONE) {
295                 pr_err("pblk: tear down bio failed\n");
296         }
297
298         if (bio->bi_status)
299                 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
300
301         bio_put(bio);
302 }
303
304 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
305 {
306         struct pblk_line_meta *lm = &pblk->lm;
307         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
308         struct list_head *move_list = NULL;
309
310         if (!line->vsc) {
311                 if (line->gc_group != PBLK_LINEGC_FULL) {
312                         line->gc_group = PBLK_LINEGC_FULL;
313                         move_list = &l_mg->gc_full_list;
314                 }
315         } else if (line->vsc < lm->mid_thrs) {
316                 if (line->gc_group != PBLK_LINEGC_HIGH) {
317                         line->gc_group = PBLK_LINEGC_HIGH;
318                         move_list = &l_mg->gc_high_list;
319                 }
320         } else if (line->vsc < lm->high_thrs) {
321                 if (line->gc_group != PBLK_LINEGC_MID) {
322                         line->gc_group = PBLK_LINEGC_MID;
323                         move_list = &l_mg->gc_mid_list;
324                 }
325         } else if (line->vsc < line->sec_in_line) {
326                 if (line->gc_group != PBLK_LINEGC_LOW) {
327                         line->gc_group = PBLK_LINEGC_LOW;
328                         move_list = &l_mg->gc_low_list;
329                 }
330         } else if (line->vsc == line->sec_in_line) {
331                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
332                         line->gc_group = PBLK_LINEGC_EMPTY;
333                         move_list = &l_mg->gc_empty_list;
334                 }
335         } else {
336                 line->state = PBLK_LINESTATE_CORRUPT;
337                 line->gc_group = PBLK_LINEGC_NONE;
338                 move_list =  &l_mg->corrupt_list;
339                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
340                                                 line->id, line->vsc,
341                                                 line->sec_in_line,
342                                                 lm->high_thrs, lm->mid_thrs);
343         }
344
345         return move_list;
346 }
347
348 void pblk_discard(struct pblk *pblk, struct bio *bio)
349 {
350         sector_t slba = pblk_get_lba(bio);
351         sector_t nr_secs = pblk_get_secs(bio);
352
353         pblk_invalidate_range(pblk, slba, nr_secs);
354 }
355
356 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
357 {
358         struct ppa_addr ppa;
359
360         spin_lock(&pblk->trans_lock);
361         ppa = pblk_trans_map_get(pblk, lba);
362         spin_unlock(&pblk->trans_lock);
363
364         return ppa;
365 }
366
367 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
368 {
369         atomic_long_inc(&pblk->write_failed);
370 #ifdef CONFIG_NVM_DEBUG
371         pblk_print_failed_rqd(pblk, rqd, rqd->error);
372 #endif
373 }
374
375 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
376 {
377         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
378         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
379                 atomic_long_inc(&pblk->read_empty);
380                 return;
381         }
382
383         switch (rqd->error) {
384         case NVM_RSP_WARN_HIGHECC:
385                 atomic_long_inc(&pblk->read_high_ecc);
386                 break;
387         case NVM_RSP_ERR_FAILECC:
388         case NVM_RSP_ERR_FAILCRC:
389                 atomic_long_inc(&pblk->read_failed);
390                 break;
391         default:
392                 pr_err("pblk: unknown read error:%d\n", rqd->error);
393         }
394 #ifdef CONFIG_NVM_DEBUG
395         pblk_print_failed_rqd(pblk, rqd, rqd->error);
396 #endif
397 }
398
399 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
400 {
401         pblk->sec_per_write = sec_per_write;
402 }
403
404 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
405 {
406         struct nvm_tgt_dev *dev = pblk->dev;
407
408 #ifdef CONFIG_NVM_DEBUG
409         struct ppa_addr *ppa_list;
410
411         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
412         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
413                 WARN_ON(1);
414                 return -EINVAL;
415         }
416
417         if (rqd->opcode == NVM_OP_PWRITE) {
418                 struct pblk_line *line;
419                 struct ppa_addr ppa;
420                 int i;
421
422                 for (i = 0; i < rqd->nr_ppas; i++) {
423                         ppa = ppa_list[i];
424                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
425
426                         spin_lock(&line->lock);
427                         if (line->state != PBLK_LINESTATE_OPEN) {
428                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
429                                                         line->id, line->state);
430                                 WARN_ON(1);
431                                 spin_unlock(&line->lock);
432                                 return -EINVAL;
433                         }
434                         spin_unlock(&line->lock);
435                 }
436         }
437 #endif
438         return nvm_submit_io(dev, rqd);
439 }
440
441 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
442                               unsigned int nr_secs, unsigned int len,
443                               gfp_t gfp_mask)
444 {
445         struct nvm_tgt_dev *dev = pblk->dev;
446         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
447         void *kaddr = data;
448         struct page *page;
449         struct bio *bio;
450         int i, ret;
451
452         if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
453                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
454
455         bio = bio_kmalloc(gfp_mask, nr_secs);
456         if (!bio)
457                 return ERR_PTR(-ENOMEM);
458
459         for (i = 0; i < nr_secs; i++) {
460                 page = vmalloc_to_page(kaddr);
461                 if (!page) {
462                         pr_err("pblk: could not map vmalloc bio\n");
463                         bio_put(bio);
464                         bio = ERR_PTR(-ENOMEM);
465                         goto out;
466                 }
467
468                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
469                 if (ret != PAGE_SIZE) {
470                         pr_err("pblk: could not add page to bio\n");
471                         bio_put(bio);
472                         bio = ERR_PTR(-ENOMEM);
473                         goto out;
474                 }
475
476                 kaddr += PAGE_SIZE;
477         }
478 out:
479         return bio;
480 }
481
482 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
483                    unsigned long secs_to_flush)
484 {
485         int max = pblk->sec_per_write;
486         int min = pblk->min_write_pgs;
487         int secs_to_sync = 0;
488
489         if (secs_avail >= max)
490                 secs_to_sync = max;
491         else if (secs_avail >= min)
492                 secs_to_sync = min * (secs_avail / min);
493         else if (secs_to_flush)
494                 secs_to_sync = min;
495
496         return secs_to_sync;
497 }
498
499 static u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line,
500                              int nr_secs)
501 {
502         u64 addr;
503         int i;
504
505         /* logic error: ppa out-of-bounds. Prevent generating bad address */
506         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
507                 WARN(1, "pblk: page allocation out of bounds\n");
508                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
509         }
510
511         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
512                                         pblk->lm.sec_per_line, line->cur_sec);
513         for (i = 0; i < nr_secs; i++, line->cur_sec++)
514                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
515
516         return addr;
517 }
518
519 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
520 {
521         u64 addr;
522
523         /* Lock needed in case a write fails and a recovery needs to remap
524          * failed write buffer entries
525          */
526         spin_lock(&line->lock);
527         addr = __pblk_alloc_page(pblk, line, nr_secs);
528         line->left_msecs -= nr_secs;
529         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
530         spin_unlock(&line->lock);
531
532         return addr;
533 }
534
535 /*
536  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
537  * taking the per LUN semaphore.
538  */
539 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
540                                      u64 paddr, int dir)
541 {
542         struct nvm_tgt_dev *dev = pblk->dev;
543         struct nvm_geo *geo = &dev->geo;
544         struct pblk_line_meta *lm = &pblk->lm;
545         struct bio *bio;
546         struct nvm_rq rqd;
547         struct ppa_addr *ppa_list;
548         dma_addr_t dma_ppa_list;
549         void *emeta = line->emeta;
550         int min = pblk->min_write_pgs;
551         int left_ppas = lm->emeta_sec;
552         int id = line->id;
553         int rq_ppas, rq_len;
554         int cmd_op, bio_op;
555         int flags;
556         int i, j;
557         int ret;
558         DECLARE_COMPLETION_ONSTACK(wait);
559
560         if (dir == WRITE) {
561                 bio_op = REQ_OP_WRITE;
562                 cmd_op = NVM_OP_PWRITE;
563                 flags = pblk_set_progr_mode(pblk, WRITE);
564         } else if (dir == READ) {
565                 bio_op = REQ_OP_READ;
566                 cmd_op = NVM_OP_PREAD;
567                 flags = pblk_set_read_mode(pblk);
568         } else
569                 return -EINVAL;
570
571         ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
572         if (!ppa_list)
573                 return -ENOMEM;
574
575 next_rq:
576         memset(&rqd, 0, sizeof(struct nvm_rq));
577
578         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
579         rq_len = rq_ppas * geo->sec_size;
580
581         bio = pblk_bio_map_addr(pblk, emeta, rq_ppas, rq_len, GFP_KERNEL);
582         if (IS_ERR(bio)) {
583                 ret = PTR_ERR(bio);
584                 goto free_rqd_dma;
585         }
586
587         bio->bi_iter.bi_sector = 0; /* internal bio */
588         bio_set_op_attrs(bio, bio_op, 0);
589
590         rqd.bio = bio;
591         rqd.opcode = cmd_op;
592         rqd.flags = flags;
593         rqd.nr_ppas = rq_ppas;
594         rqd.ppa_list = ppa_list;
595         rqd.dma_ppa_list = dma_ppa_list;
596         rqd.end_io = pblk_end_io_sync;
597         rqd.private = &wait;
598
599         if (dir == WRITE) {
600                 for (i = 0; i < rqd.nr_ppas; ) {
601                         spin_lock(&line->lock);
602                         paddr = __pblk_alloc_page(pblk, line, min);
603                         spin_unlock(&line->lock);
604                         for (j = 0; j < min; j++, i++, paddr++)
605                                 rqd.ppa_list[i] =
606                                         addr_to_gen_ppa(pblk, paddr, id);
607                 }
608         } else {
609                 for (i = 0; i < rqd.nr_ppas; ) {
610                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
611                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
612
613                         while (test_bit(pos, line->blk_bitmap)) {
614                                 paddr += min;
615                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
616                                         pr_err("pblk: corrupt emeta line:%d\n",
617                                                                 line->id);
618                                         bio_put(bio);
619                                         ret = -EINTR;
620                                         goto free_rqd_dma;
621                                 }
622
623                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
624                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
625                         }
626
627                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
628                                 pr_err("pblk: corrupt emeta line:%d\n",
629                                                                 line->id);
630                                 bio_put(bio);
631                                 ret = -EINTR;
632                                 goto free_rqd_dma;
633                         }
634
635                         for (j = 0; j < min; j++, i++, paddr++)
636                                 rqd.ppa_list[i] =
637                                         addr_to_gen_ppa(pblk, paddr, line->id);
638                 }
639         }
640
641         ret = pblk_submit_io(pblk, &rqd);
642         if (ret) {
643                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
644                 bio_put(bio);
645                 goto free_rqd_dma;
646         }
647
648         if (!wait_for_completion_io_timeout(&wait,
649                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
650                 pr_err("pblk: emeta I/O timed out\n");
651         }
652         reinit_completion(&wait);
653
654         bio_put(bio);
655
656         if (rqd.error) {
657                 if (dir == WRITE)
658                         pblk_log_write_err(pblk, &rqd);
659                 else
660                         pblk_log_read_err(pblk, &rqd);
661         }
662
663         emeta += rq_len;
664         left_ppas -= rq_ppas;
665         if (left_ppas)
666                 goto next_rq;
667 free_rqd_dma:
668         nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
669         return ret;
670 }
671
672 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
673 {
674         struct nvm_tgt_dev *dev = pblk->dev;
675         struct nvm_geo *geo = &dev->geo;
676         struct pblk_line_meta *lm = &pblk->lm;
677         int bit;
678
679         /* This usually only happens on bad lines */
680         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
681         if (bit >= lm->blk_per_line)
682                 return -1;
683
684         return bit * geo->sec_per_pl;
685 }
686
687 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
688                                      u64 paddr, int dir)
689 {
690         struct nvm_tgt_dev *dev = pblk->dev;
691         struct pblk_line_meta *lm = &pblk->lm;
692         struct bio *bio;
693         struct nvm_rq rqd;
694         __le64 *lba_list = NULL;
695         int i, ret;
696         int cmd_op, bio_op;
697         int flags;
698         DECLARE_COMPLETION_ONSTACK(wait);
699
700         if (dir == WRITE) {
701                 bio_op = REQ_OP_WRITE;
702                 cmd_op = NVM_OP_PWRITE;
703                 flags = pblk_set_progr_mode(pblk, WRITE);
704                 lba_list = pblk_line_emeta_to_lbas(line->emeta);
705         } else if (dir == READ) {
706                 bio_op = REQ_OP_READ;
707                 cmd_op = NVM_OP_PREAD;
708                 flags = pblk_set_read_mode(pblk);
709         } else
710                 return -EINVAL;
711
712         memset(&rqd, 0, sizeof(struct nvm_rq));
713
714         rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
715                                                         &rqd.dma_ppa_list);
716         if (!rqd.ppa_list)
717                 return -ENOMEM;
718
719         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
720         if (IS_ERR(bio)) {
721                 ret = PTR_ERR(bio);
722                 goto free_ppa_list;
723         }
724
725         bio->bi_iter.bi_sector = 0; /* internal bio */
726         bio_set_op_attrs(bio, bio_op, 0);
727
728         rqd.bio = bio;
729         rqd.opcode = cmd_op;
730         rqd.flags = flags;
731         rqd.nr_ppas = lm->smeta_sec;
732         rqd.end_io = pblk_end_io_sync;
733         rqd.private = &wait;
734
735         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
736                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
737                 if (dir == WRITE)
738                         lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
739         }
740
741         /*
742          * This I/O is sent by the write thread when a line is replace. Since
743          * the write thread is the only one sending write and erase commands,
744          * there is no need to take the LUN semaphore.
745          */
746         ret = pblk_submit_io(pblk, &rqd);
747         if (ret) {
748                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
749                 bio_put(bio);
750                 goto free_ppa_list;
751         }
752
753         if (!wait_for_completion_io_timeout(&wait,
754                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
755                 pr_err("pblk: smeta I/O timed out\n");
756         }
757
758         if (rqd.error) {
759                 if (dir == WRITE)
760                         pblk_log_write_err(pblk, &rqd);
761                 else
762                         pblk_log_read_err(pblk, &rqd);
763         }
764
765 free_ppa_list:
766         nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
767
768         return ret;
769 }
770
771 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
772 {
773         u64 bpaddr = pblk_line_smeta_start(pblk, line);
774
775         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
776 }
777
778 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line)
779 {
780         return pblk_line_submit_emeta_io(pblk, line, line->emeta_ssec, READ);
781 }
782
783 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
784                             struct ppa_addr ppa)
785 {
786         rqd->opcode = NVM_OP_ERASE;
787         rqd->ppa_addr = ppa;
788         rqd->nr_ppas = 1;
789         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
790         rqd->bio = NULL;
791 }
792
793 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
794 {
795         struct nvm_rq rqd;
796         int ret;
797         DECLARE_COMPLETION_ONSTACK(wait);
798
799         memset(&rqd, 0, sizeof(struct nvm_rq));
800
801         pblk_setup_e_rq(pblk, &rqd, ppa);
802
803         rqd.end_io = pblk_end_io_sync;
804         rqd.private = &wait;
805
806         /* The write thread schedules erases so that it minimizes disturbances
807          * with writes. Thus, there is no need to take the LUN semaphore.
808          */
809         ret = pblk_submit_io(pblk, &rqd);
810         if (ret) {
811                 struct nvm_tgt_dev *dev = pblk->dev;
812                 struct nvm_geo *geo = &dev->geo;
813
814                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
815                                         pblk_dev_ppa_to_line(ppa),
816                                         pblk_dev_ppa_to_pos(geo, ppa));
817
818                 rqd.error = ret;
819                 goto out;
820         }
821
822         if (!wait_for_completion_io_timeout(&wait,
823                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
824                 pr_err("pblk: sync erase timed out\n");
825         }
826
827 out:
828         rqd.private = pblk;
829         __pblk_end_io_erase(pblk, &rqd);
830
831         return 0;
832 }
833
834 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
835 {
836         struct pblk_line_meta *lm = &pblk->lm;
837         struct ppa_addr ppa;
838         int bit = -1;
839
840         /* Erase only good blocks, one at a time */
841         do {
842                 spin_lock(&line->lock);
843                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
844                                                                 bit + 1);
845                 if (bit >= lm->blk_per_line) {
846                         spin_unlock(&line->lock);
847                         break;
848                 }
849
850                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
851                 ppa.g.blk = line->id;
852
853                 atomic_dec(&line->left_eblks);
854                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
855                 spin_unlock(&line->lock);
856
857                 if (pblk_blk_erase_sync(pblk, ppa)) {
858                         pr_err("pblk: failed to erase line %d\n", line->id);
859                         return -ENOMEM;
860                 }
861         } while (1);
862
863         return 0;
864 }
865
866 /* For now lines are always assumed full lines. Thus, smeta former and current
867  * lun bitmaps are omitted.
868  */
869 static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
870                                   struct pblk_line *cur)
871 {
872         struct nvm_tgt_dev *dev = pblk->dev;
873         struct nvm_geo *geo = &dev->geo;
874         struct pblk_line_meta *lm = &pblk->lm;
875         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
876         struct line_smeta *smeta = line->smeta;
877         struct line_emeta *emeta = line->emeta;
878         int nr_blk_line;
879
880         /* After erasing the line, new bad blocks might appear and we risk
881          * having an invalid line
882          */
883         nr_blk_line = lm->blk_per_line -
884                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
885         if (nr_blk_line < lm->min_blk_line) {
886                 spin_lock(&l_mg->free_lock);
887                 spin_lock(&line->lock);
888                 line->state = PBLK_LINESTATE_BAD;
889                 spin_unlock(&line->lock);
890
891                 list_add_tail(&line->list, &l_mg->bad_list);
892                 spin_unlock(&l_mg->free_lock);
893
894                 pr_debug("pblk: line %d is bad\n", line->id);
895
896                 return 0;
897         }
898
899         /* Run-time metadata */
900         line->lun_bitmap = ((void *)(smeta)) + sizeof(struct line_smeta);
901
902         /* Mark LUNs allocated in this line (all for now) */
903         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
904
905         smeta->header.identifier = cpu_to_le32(PBLK_MAGIC);
906         memcpy(smeta->header.uuid, pblk->instance_uuid, 16);
907         smeta->header.id = cpu_to_le32(line->id);
908         smeta->header.type = cpu_to_le16(line->type);
909         smeta->header.version = cpu_to_le16(1);
910
911         /* Start metadata */
912         smeta->seq_nr = cpu_to_le64(line->seq_nr);
913         smeta->window_wr_lun = cpu_to_le32(geo->nr_luns);
914
915         /* Fill metadata among lines */
916         if (cur) {
917                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
918                 smeta->prev_id = cpu_to_le32(cur->id);
919                 cur->emeta->next_id = cpu_to_le32(line->id);
920         } else {
921                 smeta->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
922         }
923
924         /* All smeta must be set at this point */
925         smeta->header.crc = cpu_to_le32(pblk_calc_meta_header_crc(pblk, smeta));
926         smeta->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta));
927
928         /* End metadata */
929         memcpy(&emeta->header, &smeta->header, sizeof(struct line_header));
930         emeta->seq_nr = cpu_to_le64(line->seq_nr);
931         emeta->nr_lbas = cpu_to_le64(line->sec_in_line);
932         emeta->nr_valid_lbas = cpu_to_le64(0);
933         emeta->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
934         emeta->crc = cpu_to_le32(0);
935         emeta->prev_id = smeta->prev_id;
936
937         return 1;
938 }
939
940 /* For now lines are always assumed full lines. Thus, smeta former and current
941  * lun bitmaps are omitted.
942  */
943 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
944                              int init)
945 {
946         struct nvm_tgt_dev *dev = pblk->dev;
947         struct nvm_geo *geo = &dev->geo;
948         struct pblk_line_meta *lm = &pblk->lm;
949         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
950         int nr_bb = 0;
951         u64 off;
952         int bit = -1;
953
954         line->sec_in_line = lm->sec_per_line;
955
956         /* Capture bad block information on line mapping bitmaps */
957         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
958                                         bit + 1)) < lm->blk_per_line) {
959                 off = bit * geo->sec_per_pl;
960                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
961                                                         lm->sec_per_line);
962                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
963                                                         lm->sec_per_line);
964                 line->sec_in_line -= geo->sec_per_blk;
965                 if (bit >= lm->emeta_bb)
966                         nr_bb++;
967         }
968
969         /* Mark smeta metadata sectors as bad sectors */
970         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
971         off = bit * geo->sec_per_pl;
972 retry_smeta:
973         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
974         line->sec_in_line -= lm->smeta_sec;
975         line->smeta_ssec = off;
976         line->cur_sec = off + lm->smeta_sec;
977
978         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
979                 pr_debug("pblk: line smeta I/O failed. Retry\n");
980                 off += geo->sec_per_pl;
981                 goto retry_smeta;
982         }
983
984         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
985
986         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
987          * blocks to make sure that there are enough sectors to store emeta
988          */
989         bit = lm->sec_per_line;
990         off = lm->sec_per_line - lm->emeta_sec;
991         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec);
992         while (nr_bb) {
993                 off -= geo->sec_per_pl;
994                 if (!test_bit(off, line->invalid_bitmap)) {
995                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
996                         nr_bb--;
997                 }
998         }
999
1000         line->sec_in_line -= lm->emeta_sec;
1001         line->emeta_ssec = off;
1002         line->vsc = line->left_ssecs = line->left_msecs = line->sec_in_line;
1003
1004         if (lm->sec_per_line - line->sec_in_line !=
1005                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1006                 spin_lock(&line->lock);
1007                 line->state = PBLK_LINESTATE_BAD;
1008                 spin_unlock(&line->lock);
1009
1010                 list_add_tail(&line->list, &l_mg->bad_list);
1011                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1012
1013                 return 0;
1014         }
1015
1016         return 1;
1017 }
1018
1019 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1020 {
1021         struct pblk_line_meta *lm = &pblk->lm;
1022         int blk_in_line = atomic_read(&line->blk_in_line);
1023
1024         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1025         if (!line->map_bitmap)
1026                 return -ENOMEM;
1027         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1028
1029         /* invalid_bitmap is special since it is used when line is closed. No
1030          * need to zeroized; it will be initialized using bb info form
1031          * map_bitmap
1032          */
1033         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1034         if (!line->invalid_bitmap) {
1035                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1036                 return -ENOMEM;
1037         }
1038
1039         spin_lock(&line->lock);
1040         if (line->state != PBLK_LINESTATE_FREE) {
1041                 spin_unlock(&line->lock);
1042                 WARN(1, "pblk: corrupted line state\n");
1043                 return -EINTR;
1044         }
1045         line->state = PBLK_LINESTATE_OPEN;
1046
1047         atomic_set(&line->left_eblks, blk_in_line);
1048         atomic_set(&line->left_seblks, blk_in_line);
1049         spin_unlock(&line->lock);
1050
1051         /* Bad blocks do not need to be erased */
1052         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1053
1054         kref_init(&line->ref);
1055
1056         return 0;
1057 }
1058
1059 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1060 {
1061         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1062         int ret;
1063
1064         spin_lock(&l_mg->free_lock);
1065         l_mg->data_line = line;
1066         list_del(&line->list);
1067
1068         ret = pblk_line_prepare(pblk, line);
1069         if (ret) {
1070                 list_add(&line->list, &l_mg->free_list);
1071                 spin_unlock(&l_mg->free_lock);
1072                 return ret;
1073         }
1074         spin_unlock(&l_mg->free_lock);
1075
1076         pblk_rl_free_lines_dec(&pblk->rl, line);
1077
1078         if (!pblk_line_init_bb(pblk, line, 0)) {
1079                 list_add(&line->list, &l_mg->free_list);
1080                 return -EINTR;
1081         }
1082
1083         return 0;
1084 }
1085
1086 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1087 {
1088         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1089         line->map_bitmap = NULL;
1090         line->smeta = NULL;
1091         line->emeta = NULL;
1092 }
1093
1094 struct pblk_line *pblk_line_get(struct pblk *pblk)
1095 {
1096         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1097         struct pblk_line_meta *lm = &pblk->lm;
1098         struct pblk_line *line = NULL;
1099         int bit;
1100
1101         lockdep_assert_held(&l_mg->free_lock);
1102
1103 retry_get:
1104         if (list_empty(&l_mg->free_list)) {
1105                 pr_err("pblk: no free lines\n");
1106                 goto out;
1107         }
1108
1109         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1110         list_del(&line->list);
1111         l_mg->nr_free_lines--;
1112
1113         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1114         if (unlikely(bit >= lm->blk_per_line)) {
1115                 spin_lock(&line->lock);
1116                 line->state = PBLK_LINESTATE_BAD;
1117                 spin_unlock(&line->lock);
1118
1119                 list_add_tail(&line->list, &l_mg->bad_list);
1120
1121                 pr_debug("pblk: line %d is bad\n", line->id);
1122                 goto retry_get;
1123         }
1124
1125         if (pblk_line_prepare(pblk, line)) {
1126                 pr_err("pblk: failed to prepare line %d\n", line->id);
1127                 list_add(&line->list, &l_mg->free_list);
1128                 return NULL;
1129         }
1130
1131 out:
1132         return line;
1133 }
1134
1135 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1136                                          struct pblk_line *line)
1137 {
1138         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1139         struct pblk_line *retry_line;
1140
1141         spin_lock(&l_mg->free_lock);
1142         retry_line = pblk_line_get(pblk);
1143         if (!retry_line) {
1144                 l_mg->data_line = NULL;
1145                 spin_unlock(&l_mg->free_lock);
1146                 return NULL;
1147         }
1148
1149         retry_line->smeta = line->smeta;
1150         retry_line->emeta = line->emeta;
1151         retry_line->meta_line = line->meta_line;
1152
1153         pblk_line_free(pblk, line);
1154         l_mg->data_line = retry_line;
1155         spin_unlock(&l_mg->free_lock);
1156
1157         if (pblk_line_erase(pblk, retry_line)) {
1158                 spin_lock(&l_mg->free_lock);
1159                 l_mg->data_line = NULL;
1160                 spin_unlock(&l_mg->free_lock);
1161                 return NULL;
1162         }
1163
1164         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1165
1166         return retry_line;
1167 }
1168
1169 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1170 {
1171         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1172         struct pblk_line *line;
1173         int meta_line;
1174         int is_next = 0;
1175
1176         spin_lock(&l_mg->free_lock);
1177         line = pblk_line_get(pblk);
1178         if (!line) {
1179                 spin_unlock(&l_mg->free_lock);
1180                 return NULL;
1181         }
1182
1183         line->seq_nr = l_mg->d_seq_nr++;
1184         line->type = PBLK_LINETYPE_DATA;
1185         l_mg->data_line = line;
1186
1187         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1188         set_bit(meta_line, &l_mg->meta_bitmap);
1189         line->smeta = l_mg->sline_meta[meta_line].meta;
1190         line->emeta = l_mg->eline_meta[meta_line].meta;
1191         line->meta_line = meta_line;
1192
1193         /* Allocate next line for preparation */
1194         l_mg->data_next = pblk_line_get(pblk);
1195         if (l_mg->data_next) {
1196                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1197                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1198                 is_next = 1;
1199         }
1200         spin_unlock(&l_mg->free_lock);
1201
1202         pblk_rl_free_lines_dec(&pblk->rl, line);
1203         if (is_next)
1204                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1205
1206         if (pblk_line_erase(pblk, line))
1207                 return NULL;
1208
1209 retry_setup:
1210         if (!pblk_line_set_metadata(pblk, line, NULL)) {
1211                 line = pblk_line_retry(pblk, line);
1212                 if (!line)
1213                         return NULL;
1214
1215                 goto retry_setup;
1216         }
1217
1218         if (!pblk_line_init_bb(pblk, line, 1)) {
1219                 line = pblk_line_retry(pblk, line);
1220                 if (!line)
1221                         return NULL;
1222
1223                 goto retry_setup;
1224         }
1225
1226         return line;
1227 }
1228
1229 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1230 {
1231         struct pblk_line_meta *lm = &pblk->lm;
1232         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1233         struct pblk_line *cur, *new;
1234         unsigned int left_seblks;
1235         int meta_line;
1236         int is_next = 0;
1237
1238         cur = l_mg->data_line;
1239         new = l_mg->data_next;
1240         if (!new)
1241                 return NULL;
1242         l_mg->data_line = new;
1243
1244 retry_line:
1245         left_seblks = atomic_read(&new->left_seblks);
1246         if (left_seblks) {
1247                 /* If line is not fully erased, erase it */
1248                 if (atomic_read(&new->left_eblks)) {
1249                         if (pblk_line_erase(pblk, new))
1250                                 return NULL;
1251                 } else {
1252                         io_schedule();
1253                 }
1254                 goto retry_line;
1255         }
1256
1257         spin_lock(&l_mg->free_lock);
1258         /* Allocate next line for preparation */
1259         l_mg->data_next = pblk_line_get(pblk);
1260         if (l_mg->data_next) {
1261                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1262                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1263                 is_next = 1;
1264         }
1265
1266 retry_meta:
1267         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1268         if (meta_line == PBLK_DATA_LINES) {
1269                 spin_unlock(&l_mg->free_lock);
1270                 io_schedule();
1271                 spin_lock(&l_mg->free_lock);
1272                 goto retry_meta;
1273         }
1274
1275         set_bit(meta_line, &l_mg->meta_bitmap);
1276         new->smeta = l_mg->sline_meta[meta_line].meta;
1277         new->emeta = l_mg->eline_meta[meta_line].meta;
1278         new->meta_line = meta_line;
1279
1280         memset(new->smeta, 0, lm->smeta_len);
1281         memset(new->emeta, 0, lm->emeta_len);
1282         spin_unlock(&l_mg->free_lock);
1283
1284         if (is_next)
1285                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1286
1287 retry_setup:
1288         if (!pblk_line_set_metadata(pblk, new, cur)) {
1289                 new = pblk_line_retry(pblk, new);
1290                 if (!new)
1291                         return NULL;
1292
1293                 goto retry_setup;
1294         }
1295
1296         if (!pblk_line_init_bb(pblk, new, 1)) {
1297                 new = pblk_line_retry(pblk, new);
1298                 if (!new)
1299                         return NULL;
1300
1301                 goto retry_setup;
1302         }
1303
1304         return new;
1305 }
1306
1307 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1308 {
1309         if (line->map_bitmap)
1310                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1311         if (line->invalid_bitmap)
1312                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1313
1314         line->map_bitmap = NULL;
1315         line->invalid_bitmap = NULL;
1316         line->smeta = NULL;
1317         line->emeta = NULL;
1318 }
1319
1320 void pblk_line_put(struct kref *ref)
1321 {
1322         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1323         struct pblk *pblk = line->pblk;
1324         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1325
1326         spin_lock(&line->lock);
1327         WARN_ON(line->state != PBLK_LINESTATE_GC);
1328         line->state = PBLK_LINESTATE_FREE;
1329         line->gc_group = PBLK_LINEGC_NONE;
1330         pblk_line_free(pblk, line);
1331         spin_unlock(&line->lock);
1332
1333         spin_lock(&l_mg->free_lock);
1334         list_add_tail(&line->list, &l_mg->free_list);
1335         l_mg->nr_free_lines++;
1336         spin_unlock(&l_mg->free_lock);
1337
1338         pblk_rl_free_lines_inc(&pblk->rl, line);
1339 }
1340
1341 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1342 {
1343         struct nvm_rq *rqd;
1344         int err;
1345
1346         rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1347         memset(rqd, 0, pblk_g_rq_size);
1348
1349         pblk_setup_e_rq(pblk, rqd, ppa);
1350
1351         rqd->end_io = pblk_end_io_erase;
1352         rqd->private = pblk;
1353
1354         /* The write thread schedules erases so that it minimizes disturbances
1355          * with writes. Thus, there is no need to take the LUN semaphore.
1356          */
1357         err = pblk_submit_io(pblk, rqd);
1358         if (err) {
1359                 struct nvm_tgt_dev *dev = pblk->dev;
1360                 struct nvm_geo *geo = &dev->geo;
1361
1362                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1363                                         pblk_dev_ppa_to_line(ppa),
1364                                         pblk_dev_ppa_to_pos(geo, ppa));
1365         }
1366
1367         return err;
1368 }
1369
1370 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1371 {
1372         return pblk->l_mg.data_line;
1373 }
1374
1375 /* For now, always erase next line */
1376 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1377 {
1378         return pblk->l_mg.data_next;
1379 }
1380
1381 int pblk_line_is_full(struct pblk_line *line)
1382 {
1383         return (line->left_msecs == 0);
1384 }
1385
1386 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1387 {
1388         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1389         struct list_head *move_list;
1390
1391         line->emeta->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, line->emeta));
1392
1393         if (pblk_line_submit_emeta_io(pblk, line, line->cur_sec, WRITE))
1394                 pr_err("pblk: line %d close I/O failed\n", line->id);
1395
1396         WARN(!bitmap_full(line->map_bitmap, line->sec_in_line),
1397                                 "pblk: corrupt closed line %d\n", line->id);
1398
1399         spin_lock(&l_mg->free_lock);
1400         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1401         spin_unlock(&l_mg->free_lock);
1402
1403         spin_lock(&l_mg->gc_lock);
1404         spin_lock(&line->lock);
1405         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1406         line->state = PBLK_LINESTATE_CLOSED;
1407         move_list = pblk_line_gc_list(pblk, line);
1408
1409         list_add_tail(&line->list, move_list);
1410
1411         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1412         line->map_bitmap = NULL;
1413         line->smeta = NULL;
1414         line->emeta = NULL;
1415
1416         spin_unlock(&line->lock);
1417         spin_unlock(&l_mg->gc_lock);
1418 }
1419
1420 void pblk_line_close_ws(struct work_struct *work)
1421 {
1422         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1423                                                                         ws);
1424         struct pblk *pblk = line_ws->pblk;
1425         struct pblk_line *line = line_ws->line;
1426
1427         pblk_line_close(pblk, line);
1428         mempool_free(line_ws, pblk->line_ws_pool);
1429 }
1430
1431 void pblk_line_mark_bb(struct work_struct *work)
1432 {
1433         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1434                                                                         ws);
1435         struct pblk *pblk = line_ws->pblk;
1436         struct nvm_tgt_dev *dev = pblk->dev;
1437         struct ppa_addr *ppa = line_ws->priv;
1438         int ret;
1439
1440         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1441         if (ret) {
1442                 struct pblk_line *line;
1443                 int pos;
1444
1445                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1446                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1447
1448                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1449                                 line->id, pos);
1450         }
1451
1452         kfree(ppa);
1453         mempool_free(line_ws, pblk->line_ws_pool);
1454 }
1455
1456 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1457                       void (*work)(struct work_struct *))
1458 {
1459         struct pblk_line_ws *line_ws;
1460
1461         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1462         if (!line_ws)
1463                 return;
1464
1465         line_ws->pblk = pblk;
1466         line_ws->line = line;
1467         line_ws->priv = priv;
1468
1469         INIT_WORK(&line_ws->ws, work);
1470         queue_work(pblk->kw_wq, &line_ws->ws);
1471 }
1472
1473 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1474                   unsigned long *lun_bitmap)
1475 {
1476         struct nvm_tgt_dev *dev = pblk->dev;
1477         struct nvm_geo *geo = &dev->geo;
1478         struct pblk_lun *rlun;
1479         int lun_id = ppa_list[0].g.ch * geo->luns_per_chnl + ppa_list[0].g.lun;
1480         int ret;
1481
1482         /*
1483          * Only send one inflight I/O per LUN. Since we map at a page
1484          * granurality, all ppas in the I/O will map to the same LUN
1485          */
1486 #ifdef CONFIG_NVM_DEBUG
1487         int i;
1488
1489         for (i = 1; i < nr_ppas; i++)
1490                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1491                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1492 #endif
1493         /* If the LUN has been locked for this same request, do no attempt to
1494          * lock it again
1495          */
1496         if (test_and_set_bit(lun_id, lun_bitmap))
1497                 return;
1498
1499         rlun = &pblk->luns[lun_id];
1500         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1501         if (ret) {
1502                 switch (ret) {
1503                 case -ETIME:
1504                         pr_err("pblk: lun semaphore timed out\n");
1505                         break;
1506                 case -EINTR:
1507                         pr_err("pblk: lun semaphore timed out\n");
1508                         break;
1509                 }
1510         }
1511 }
1512
1513 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1514                 unsigned long *lun_bitmap)
1515 {
1516         struct nvm_tgt_dev *dev = pblk->dev;
1517         struct nvm_geo *geo = &dev->geo;
1518         struct pblk_lun *rlun;
1519         int nr_luns = geo->nr_luns;
1520         int bit = -1;
1521
1522         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1523                 rlun = &pblk->luns[bit];
1524                 up(&rlun->wr_sem);
1525         }
1526
1527         kfree(lun_bitmap);
1528 }
1529
1530 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1531 {
1532         struct ppa_addr l2p_ppa;
1533
1534         /* logic error: lba out-of-bounds. Ignore update */
1535         if (!(lba < pblk->rl.nr_secs)) {
1536                 WARN(1, "pblk: corrupted L2P map request\n");
1537                 return;
1538         }
1539
1540         spin_lock(&pblk->trans_lock);
1541         l2p_ppa = pblk_trans_map_get(pblk, lba);
1542
1543         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1544                 pblk_map_invalidate(pblk, l2p_ppa);
1545
1546         pblk_trans_map_set(pblk, lba, ppa);
1547         spin_unlock(&pblk->trans_lock);
1548 }
1549
1550 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1551 {
1552 #ifdef CONFIG_NVM_DEBUG
1553         /* Callers must ensure that the ppa points to a cache address */
1554         BUG_ON(!pblk_addr_in_cache(ppa));
1555         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1556 #endif
1557
1558         pblk_update_map(pblk, lba, ppa);
1559 }
1560
1561 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1562                        struct pblk_line *gc_line)
1563 {
1564         struct ppa_addr l2p_ppa;
1565         int ret = 1;
1566
1567 #ifdef CONFIG_NVM_DEBUG
1568         /* Callers must ensure that the ppa points to a cache address */
1569         BUG_ON(!pblk_addr_in_cache(ppa));
1570         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1571 #endif
1572
1573         /* logic error: lba out-of-bounds. Ignore update */
1574         if (!(lba < pblk->rl.nr_secs)) {
1575                 WARN(1, "pblk: corrupted L2P map request\n");
1576                 return 0;
1577         }
1578
1579         spin_lock(&pblk->trans_lock);
1580         l2p_ppa = pblk_trans_map_get(pblk, lba);
1581
1582         /* Prevent updated entries to be overwritten by GC */
1583         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1584                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1585                 ret = 0;
1586                 goto out;
1587         }
1588
1589         pblk_trans_map_set(pblk, lba, ppa);
1590 out:
1591         spin_unlock(&pblk->trans_lock);
1592         return ret;
1593 }
1594
1595 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1596                          struct ppa_addr entry_line)
1597 {
1598         struct ppa_addr l2p_line;
1599
1600 #ifdef CONFIG_NVM_DEBUG
1601         /* Callers must ensure that the ppa points to a device address */
1602         BUG_ON(pblk_addr_in_cache(ppa));
1603 #endif
1604         /* Invalidate and discard padded entries */
1605         if (lba == ADDR_EMPTY) {
1606 #ifdef CONFIG_NVM_DEBUG
1607                 atomic_long_inc(&pblk->padded_wb);
1608 #endif
1609                 pblk_map_invalidate(pblk, ppa);
1610                 return;
1611         }
1612
1613         /* logic error: lba out-of-bounds. Ignore update */
1614         if (!(lba < pblk->rl.nr_secs)) {
1615                 WARN(1, "pblk: corrupted L2P map request\n");
1616                 return;
1617         }
1618
1619         spin_lock(&pblk->trans_lock);
1620         l2p_line = pblk_trans_map_get(pblk, lba);
1621
1622         /* Do not update L2P if the cacheline has been updated. In this case,
1623          * the mapped ppa must be invalidated
1624          */
1625         if (l2p_line.ppa != entry_line.ppa) {
1626                 if (!pblk_ppa_empty(ppa))
1627                         pblk_map_invalidate(pblk, ppa);
1628                 goto out;
1629         }
1630
1631 #ifdef CONFIG_NVM_DEBUG
1632         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1633 #endif
1634
1635         pblk_trans_map_set(pblk, lba, ppa);
1636 out:
1637         spin_unlock(&pblk->trans_lock);
1638 }
1639
1640 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1641                          sector_t blba, int nr_secs)
1642 {
1643         int i;
1644
1645         spin_lock(&pblk->trans_lock);
1646         for (i = 0; i < nr_secs; i++)
1647                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1648         spin_unlock(&pblk->trans_lock);
1649 }
1650
1651 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1652                           u64 *lba_list, int nr_secs)
1653 {
1654         sector_t lba;
1655         int i;
1656
1657         spin_lock(&pblk->trans_lock);
1658         for (i = 0; i < nr_secs; i++) {
1659                 lba = lba_list[i];
1660                 if (lba == ADDR_EMPTY) {
1661                         ppas[i].ppa = ADDR_EMPTY;
1662                 } else {
1663                         /* logic error: lba out-of-bounds. Ignore update */
1664                         if (!(lba < pblk->rl.nr_secs)) {
1665                                 WARN(1, "pblk: corrupted L2P map request\n");
1666                                 continue;
1667                         }
1668                         ppas[i] = pblk_trans_map_get(pblk, lba);
1669                 }
1670         }
1671         spin_unlock(&pblk->trans_lock);
1672 }