]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
Merge branch 'pm-cpufreq'
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20 #include <linux/time.h>
21
22 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
23                          struct ppa_addr *ppa)
24 {
25         struct nvm_tgt_dev *dev = pblk->dev;
26         struct nvm_geo *geo = &dev->geo;
27         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
28
29         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30         atomic_long_inc(&pblk->erase_failed);
31
32         atomic_dec(&line->blk_in_line);
33         if (test_and_set_bit(pos, line->blk_bitmap))
34                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
35                                                         line->id, pos);
36
37         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
38 }
39
40 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41 {
42         struct pblk_line *line;
43
44         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
45         atomic_dec(&line->left_seblks);
46
47         if (rqd->error) {
48                 struct ppa_addr *ppa;
49
50                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
51                 if (!ppa)
52                         return;
53
54                 *ppa = rqd->ppa_addr;
55                 pblk_mark_bb(pblk, line, ppa);
56         }
57 }
58
59 /* Erase completion assumes that only one block is erased at the time */
60 static void pblk_end_io_erase(struct nvm_rq *rqd)
61 {
62         struct pblk *pblk = rqd->private;
63
64         up(&pblk->erase_sem);
65         __pblk_end_io_erase(pblk, rqd);
66         mempool_free(rqd, pblk->r_rq_pool);
67 }
68
69 static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
70                                   u64 paddr)
71 {
72         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
73         struct list_head *move_list = NULL;
74
75         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
76          * table is modified with reclaimed sectors, a check is done to endure
77          * that newer updates are not overwritten.
78          */
79         spin_lock(&line->lock);
80         if (line->state == PBLK_LINESTATE_GC ||
81                                         line->state == PBLK_LINESTATE_FREE) {
82                 spin_unlock(&line->lock);
83                 return;
84         }
85
86         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
87                 WARN_ONCE(1, "pblk: double invalidate\n");
88                 spin_unlock(&line->lock);
89                 return;
90         }
91         line->vsc--;
92
93         if (line->state == PBLK_LINESTATE_CLOSED)
94                 move_list = pblk_line_gc_list(pblk, line);
95         spin_unlock(&line->lock);
96
97         if (move_list) {
98                 spin_lock(&l_mg->gc_lock);
99                 spin_lock(&line->lock);
100                 /* Prevent moving a line that has just been chosen for GC */
101                 if (line->state == PBLK_LINESTATE_GC ||
102                                         line->state == PBLK_LINESTATE_FREE) {
103                         spin_unlock(&line->lock);
104                         spin_unlock(&l_mg->gc_lock);
105                         return;
106                 }
107                 spin_unlock(&line->lock);
108
109                 list_move_tail(&line->list, move_list);
110                 spin_unlock(&l_mg->gc_lock);
111         }
112 }
113
114 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
115 {
116         struct pblk_line *line;
117         u64 paddr;
118         int line_id;
119
120 #ifdef CONFIG_NVM_DEBUG
121         /* Callers must ensure that the ppa points to a device address */
122         BUG_ON(pblk_addr_in_cache(ppa));
123         BUG_ON(pblk_ppa_empty(ppa));
124 #endif
125
126         line_id = pblk_tgt_ppa_to_line(ppa);
127         line = &pblk->lines[line_id];
128         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
129
130         __pblk_map_invalidate(pblk, line, paddr);
131 }
132
133 void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
134                              u64 paddr)
135 {
136         __pblk_map_invalidate(pblk, line, paddr);
137
138         pblk_rb_sync_init(&pblk->rwb, NULL);
139         line->left_ssecs--;
140         if (!line->left_ssecs)
141                 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
142         pblk_rb_sync_end(&pblk->rwb, NULL);
143 }
144
145 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
146                                   unsigned int nr_secs)
147 {
148         sector_t lba;
149
150         spin_lock(&pblk->trans_lock);
151         for (lba = slba; lba < slba + nr_secs; lba++) {
152                 struct ppa_addr ppa;
153
154                 ppa = pblk_trans_map_get(pblk, lba);
155
156                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
157                         pblk_map_invalidate(pblk, ppa);
158
159                 pblk_ppa_set_empty(&ppa);
160                 pblk_trans_map_set(pblk, lba, ppa);
161         }
162         spin_unlock(&pblk->trans_lock);
163 }
164
165 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
166 {
167         mempool_t *pool;
168         struct nvm_rq *rqd;
169         int rq_size;
170
171         if (rw == WRITE) {
172                 pool = pblk->w_rq_pool;
173                 rq_size = pblk_w_rq_size;
174         } else {
175                 pool = pblk->r_rq_pool;
176                 rq_size = pblk_r_rq_size;
177         }
178
179         rqd = mempool_alloc(pool, GFP_KERNEL);
180         memset(rqd, 0, rq_size);
181
182         return rqd;
183 }
184
185 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
186 {
187         mempool_t *pool;
188
189         if (rw == WRITE)
190                 pool = pblk->w_rq_pool;
191         else
192                 pool = pblk->r_rq_pool;
193
194         mempool_free(rqd, pool);
195 }
196
197 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
198                          int nr_pages)
199 {
200         struct bio_vec bv;
201         int i;
202
203         WARN_ON(off + nr_pages != bio->bi_vcnt);
204
205         bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
206         for (i = off; i < nr_pages + off; i++) {
207                 bv = bio->bi_io_vec[i];
208                 mempool_free(bv.bv_page, pblk->page_pool);
209         }
210 }
211
212 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
213                        int nr_pages)
214 {
215         struct request_queue *q = pblk->dev->q;
216         struct page *page;
217         int i, ret;
218
219         for (i = 0; i < nr_pages; i++) {
220                 page = mempool_alloc(pblk->page_pool, flags);
221                 if (!page)
222                         goto err;
223
224                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
225                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
226                         pr_err("pblk: could not add page to bio\n");
227                         mempool_free(page, pblk->page_pool);
228                         goto err;
229                 }
230         }
231
232         return 0;
233 err:
234         pblk_bio_free_pages(pblk, bio, 0, i - 1);
235         return -1;
236 }
237
238 static void pblk_write_kick(struct pblk *pblk)
239 {
240         wake_up_process(pblk->writer_ts);
241         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
242 }
243
244 void pblk_write_timer_fn(unsigned long data)
245 {
246         struct pblk *pblk = (struct pblk *)data;
247
248         /* kick the write thread every tick to flush outstanding data */
249         pblk_write_kick(pblk);
250 }
251
252 void pblk_write_should_kick(struct pblk *pblk)
253 {
254         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
255
256         if (secs_avail >= pblk->min_write_pgs)
257                 pblk_write_kick(pblk);
258 }
259
260 void pblk_end_bio_sync(struct bio *bio)
261 {
262         struct completion *waiting = bio->bi_private;
263
264         complete(waiting);
265 }
266
267 void pblk_end_io_sync(struct nvm_rq *rqd)
268 {
269         struct completion *waiting = rqd->private;
270
271         complete(waiting);
272 }
273
274 void pblk_flush_writer(struct pblk *pblk)
275 {
276         struct bio *bio;
277         int ret;
278         DECLARE_COMPLETION_ONSTACK(wait);
279
280         bio = bio_alloc(GFP_KERNEL, 1);
281         if (!bio)
282                 return;
283
284         bio->bi_iter.bi_sector = 0; /* internal bio */
285         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
286         bio->bi_private = &wait;
287         bio->bi_end_io = pblk_end_bio_sync;
288
289         ret = pblk_write_to_cache(pblk, bio, 0);
290         if (ret == NVM_IO_OK) {
291                 if (!wait_for_completion_io_timeout(&wait,
292                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
293                         pr_err("pblk: flush cache timed out\n");
294                 }
295         } else if (ret != NVM_IO_DONE) {
296                 pr_err("pblk: tear down bio failed\n");
297         }
298
299         if (bio->bi_error)
300                 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error);
301
302         bio_put(bio);
303 }
304
305 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
306 {
307         struct pblk_line_meta *lm = &pblk->lm;
308         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
309         struct list_head *move_list = NULL;
310
311         if (!line->vsc) {
312                 if (line->gc_group != PBLK_LINEGC_FULL) {
313                         line->gc_group = PBLK_LINEGC_FULL;
314                         move_list = &l_mg->gc_full_list;
315                 }
316         } else if (line->vsc < lm->mid_thrs) {
317                 if (line->gc_group != PBLK_LINEGC_HIGH) {
318                         line->gc_group = PBLK_LINEGC_HIGH;
319                         move_list = &l_mg->gc_high_list;
320                 }
321         } else if (line->vsc < lm->high_thrs) {
322                 if (line->gc_group != PBLK_LINEGC_MID) {
323                         line->gc_group = PBLK_LINEGC_MID;
324                         move_list = &l_mg->gc_mid_list;
325                 }
326         } else if (line->vsc < line->sec_in_line) {
327                 if (line->gc_group != PBLK_LINEGC_LOW) {
328                         line->gc_group = PBLK_LINEGC_LOW;
329                         move_list = &l_mg->gc_low_list;
330                 }
331         } else if (line->vsc == line->sec_in_line) {
332                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
333                         line->gc_group = PBLK_LINEGC_EMPTY;
334                         move_list = &l_mg->gc_empty_list;
335                 }
336         } else {
337                 line->state = PBLK_LINESTATE_CORRUPT;
338                 line->gc_group = PBLK_LINEGC_NONE;
339                 move_list =  &l_mg->corrupt_list;
340                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
341                                                 line->id, line->vsc,
342                                                 line->sec_in_line,
343                                                 lm->high_thrs, lm->mid_thrs);
344         }
345
346         return move_list;
347 }
348
349 void pblk_discard(struct pblk *pblk, struct bio *bio)
350 {
351         sector_t slba = pblk_get_lba(bio);
352         sector_t nr_secs = pblk_get_secs(bio);
353
354         pblk_invalidate_range(pblk, slba, nr_secs);
355 }
356
357 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
358 {
359         struct ppa_addr ppa;
360
361         spin_lock(&pblk->trans_lock);
362         ppa = pblk_trans_map_get(pblk, lba);
363         spin_unlock(&pblk->trans_lock);
364
365         return ppa;
366 }
367
368 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
369 {
370         atomic_long_inc(&pblk->write_failed);
371 #ifdef CONFIG_NVM_DEBUG
372         pblk_print_failed_rqd(pblk, rqd, rqd->error);
373 #endif
374 }
375
376 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
377 {
378         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
379         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
380                 atomic_long_inc(&pblk->read_empty);
381                 return;
382         }
383
384         switch (rqd->error) {
385         case NVM_RSP_WARN_HIGHECC:
386                 atomic_long_inc(&pblk->read_high_ecc);
387                 break;
388         case NVM_RSP_ERR_FAILECC:
389         case NVM_RSP_ERR_FAILCRC:
390                 atomic_long_inc(&pblk->read_failed);
391                 break;
392         default:
393                 pr_err("pblk: unknown read error:%d\n", rqd->error);
394         }
395 #ifdef CONFIG_NVM_DEBUG
396         pblk_print_failed_rqd(pblk, rqd, rqd->error);
397 #endif
398 }
399
400 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
401 {
402         struct nvm_tgt_dev *dev = pblk->dev;
403
404 #ifdef CONFIG_NVM_DEBUG
405         struct ppa_addr *ppa_list;
406
407         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
408         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
409                 WARN_ON(1);
410                 return -EINVAL;
411         }
412
413         if (rqd->opcode == NVM_OP_PWRITE) {
414                 struct pblk_line *line;
415                 struct ppa_addr ppa;
416                 int i;
417
418                 for (i = 0; i < rqd->nr_ppas; i++) {
419                         ppa = ppa_list[i];
420                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
421
422                         spin_lock(&line->lock);
423                         if (line->state != PBLK_LINESTATE_OPEN) {
424                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
425                                                         line->id, line->state);
426                                 WARN_ON(1);
427                                 spin_unlock(&line->lock);
428                                 return -EINVAL;
429                         }
430                         spin_unlock(&line->lock);
431                 }
432         }
433 #endif
434         return nvm_submit_io(dev, rqd);
435 }
436
437 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
438                               unsigned int nr_secs, unsigned int len,
439                               gfp_t gfp_mask)
440 {
441         struct nvm_tgt_dev *dev = pblk->dev;
442         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
443         void *kaddr = data;
444         struct page *page;
445         struct bio *bio;
446         int i, ret;
447
448         if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
449                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
450
451         bio = bio_kmalloc(gfp_mask, nr_secs);
452         if (!bio)
453                 return ERR_PTR(-ENOMEM);
454
455         for (i = 0; i < nr_secs; i++) {
456                 page = vmalloc_to_page(kaddr);
457                 if (!page) {
458                         pr_err("pblk: could not map vmalloc bio\n");
459                         bio_put(bio);
460                         bio = ERR_PTR(-ENOMEM);
461                         goto out;
462                 }
463
464                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
465                 if (ret != PAGE_SIZE) {
466                         pr_err("pblk: could not add page to bio\n");
467                         bio_put(bio);
468                         bio = ERR_PTR(-ENOMEM);
469                         goto out;
470                 }
471
472                 kaddr += PAGE_SIZE;
473         }
474 out:
475         return bio;
476 }
477
478 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
479                    unsigned long secs_to_flush)
480 {
481         int max = pblk->max_write_pgs;
482         int min = pblk->min_write_pgs;
483         int secs_to_sync = 0;
484
485         if (secs_avail >= max)
486                 secs_to_sync = max;
487         else if (secs_avail >= min)
488                 secs_to_sync = min * (secs_avail / min);
489         else if (secs_to_flush)
490                 secs_to_sync = min;
491
492         return secs_to_sync;
493 }
494
495 static u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line,
496                              int nr_secs)
497 {
498         u64 addr;
499         int i;
500
501         /* logic error: ppa out-of-bounds. Prevent generating bad address */
502         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
503                 WARN(1, "pblk: page allocation out of bounds\n");
504                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
505         }
506
507         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
508                                         pblk->lm.sec_per_line, line->cur_sec);
509         for (i = 0; i < nr_secs; i++, line->cur_sec++)
510                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
511
512         return addr;
513 }
514
515 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
516 {
517         u64 addr;
518
519         /* Lock needed in case a write fails and a recovery needs to remap
520          * failed write buffer entries
521          */
522         spin_lock(&line->lock);
523         addr = __pblk_alloc_page(pblk, line, nr_secs);
524         line->left_msecs -= nr_secs;
525         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
526         spin_unlock(&line->lock);
527
528         return addr;
529 }
530
531 /*
532  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
533  * taking the per LUN semaphore.
534  */
535 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
536                                      u64 paddr, int dir)
537 {
538         struct nvm_tgt_dev *dev = pblk->dev;
539         struct nvm_geo *geo = &dev->geo;
540         struct pblk_line_meta *lm = &pblk->lm;
541         struct bio *bio;
542         struct nvm_rq rqd;
543         struct ppa_addr *ppa_list;
544         dma_addr_t dma_ppa_list;
545         void *emeta = line->emeta;
546         int min = pblk->min_write_pgs;
547         int left_ppas = lm->emeta_sec;
548         int id = line->id;
549         int rq_ppas, rq_len;
550         int cmd_op, bio_op;
551         int flags;
552         int i, j;
553         int ret;
554         DECLARE_COMPLETION_ONSTACK(wait);
555
556         if (dir == WRITE) {
557                 bio_op = REQ_OP_WRITE;
558                 cmd_op = NVM_OP_PWRITE;
559                 flags = pblk_set_progr_mode(pblk, WRITE);
560         } else if (dir == READ) {
561                 bio_op = REQ_OP_READ;
562                 cmd_op = NVM_OP_PREAD;
563                 flags = pblk_set_read_mode(pblk);
564         } else
565                 return -EINVAL;
566
567         ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
568         if (!ppa_list)
569                 return -ENOMEM;
570
571 next_rq:
572         memset(&rqd, 0, sizeof(struct nvm_rq));
573
574         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
575         rq_len = rq_ppas * geo->sec_size;
576
577         bio = pblk_bio_map_addr(pblk, emeta, rq_ppas, rq_len, GFP_KERNEL);
578         if (IS_ERR(bio)) {
579                 ret = PTR_ERR(bio);
580                 goto free_rqd_dma;
581         }
582
583         bio->bi_iter.bi_sector = 0; /* internal bio */
584         bio_set_op_attrs(bio, bio_op, 0);
585
586         rqd.bio = bio;
587         rqd.opcode = cmd_op;
588         rqd.flags = flags;
589         rqd.nr_ppas = rq_ppas;
590         rqd.ppa_list = ppa_list;
591         rqd.dma_ppa_list = dma_ppa_list;
592         rqd.end_io = pblk_end_io_sync;
593         rqd.private = &wait;
594
595         if (dir == WRITE) {
596                 for (i = 0; i < rqd.nr_ppas; ) {
597                         spin_lock(&line->lock);
598                         paddr = __pblk_alloc_page(pblk, line, min);
599                         spin_unlock(&line->lock);
600                         for (j = 0; j < min; j++, i++, paddr++)
601                                 rqd.ppa_list[i] =
602                                         addr_to_gen_ppa(pblk, paddr, id);
603                 }
604         } else {
605                 for (i = 0; i < rqd.nr_ppas; ) {
606                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
607                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
608
609                         while (test_bit(pos, line->blk_bitmap)) {
610                                 paddr += min;
611                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
612                                         pr_err("pblk: corrupt emeta line:%d\n",
613                                                                 line->id);
614                                         bio_put(bio);
615                                         ret = -EINTR;
616                                         goto free_rqd_dma;
617                                 }
618
619                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
620                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
621                         }
622
623                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
624                                 pr_err("pblk: corrupt emeta line:%d\n",
625                                                                 line->id);
626                                 bio_put(bio);
627                                 ret = -EINTR;
628                                 goto free_rqd_dma;
629                         }
630
631                         for (j = 0; j < min; j++, i++, paddr++)
632                                 rqd.ppa_list[i] =
633                                         addr_to_gen_ppa(pblk, paddr, line->id);
634                 }
635         }
636
637         ret = pblk_submit_io(pblk, &rqd);
638         if (ret) {
639                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
640                 bio_put(bio);
641                 goto free_rqd_dma;
642         }
643
644         if (!wait_for_completion_io_timeout(&wait,
645                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
646                 pr_err("pblk: emeta I/O timed out\n");
647         }
648         reinit_completion(&wait);
649
650         bio_put(bio);
651
652         if (rqd.error) {
653                 if (dir == WRITE)
654                         pblk_log_write_err(pblk, &rqd);
655                 else
656                         pblk_log_read_err(pblk, &rqd);
657         }
658
659         emeta += rq_len;
660         left_ppas -= rq_ppas;
661         if (left_ppas)
662                 goto next_rq;
663 free_rqd_dma:
664         nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
665         return ret;
666 }
667
668 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
669 {
670         struct nvm_tgt_dev *dev = pblk->dev;
671         struct nvm_geo *geo = &dev->geo;
672         struct pblk_line_meta *lm = &pblk->lm;
673         int bit;
674
675         /* This usually only happens on bad lines */
676         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
677         if (bit >= lm->blk_per_line)
678                 return -1;
679
680         return bit * geo->sec_per_pl;
681 }
682
683 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
684                                      u64 paddr, int dir)
685 {
686         struct nvm_tgt_dev *dev = pblk->dev;
687         struct pblk_line_meta *lm = &pblk->lm;
688         struct bio *bio;
689         struct nvm_rq rqd;
690         __le64 *lba_list = NULL;
691         int i, ret;
692         int cmd_op, bio_op;
693         int flags;
694         DECLARE_COMPLETION_ONSTACK(wait);
695
696         if (dir == WRITE) {
697                 bio_op = REQ_OP_WRITE;
698                 cmd_op = NVM_OP_PWRITE;
699                 flags = pblk_set_progr_mode(pblk, WRITE);
700                 lba_list = pblk_line_emeta_to_lbas(line->emeta);
701         } else if (dir == READ) {
702                 bio_op = REQ_OP_READ;
703                 cmd_op = NVM_OP_PREAD;
704                 flags = pblk_set_read_mode(pblk);
705         } else
706                 return -EINVAL;
707
708         memset(&rqd, 0, sizeof(struct nvm_rq));
709
710         rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
711                                                         &rqd.dma_ppa_list);
712         if (!rqd.ppa_list)
713                 return -ENOMEM;
714
715         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
716         if (IS_ERR(bio)) {
717                 ret = PTR_ERR(bio);
718                 goto free_ppa_list;
719         }
720
721         bio->bi_iter.bi_sector = 0; /* internal bio */
722         bio_set_op_attrs(bio, bio_op, 0);
723
724         rqd.bio = bio;
725         rqd.opcode = cmd_op;
726         rqd.flags = flags;
727         rqd.nr_ppas = lm->smeta_sec;
728         rqd.end_io = pblk_end_io_sync;
729         rqd.private = &wait;
730
731         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
732                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
733                 if (dir == WRITE)
734                         lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
735         }
736
737         /*
738          * This I/O is sent by the write thread when a line is replace. Since
739          * the write thread is the only one sending write and erase commands,
740          * there is no need to take the LUN semaphore.
741          */
742         ret = pblk_submit_io(pblk, &rqd);
743         if (ret) {
744                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
745                 bio_put(bio);
746                 goto free_ppa_list;
747         }
748
749         if (!wait_for_completion_io_timeout(&wait,
750                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
751                 pr_err("pblk: smeta I/O timed out\n");
752         }
753
754         if (rqd.error) {
755                 if (dir == WRITE)
756                         pblk_log_write_err(pblk, &rqd);
757                 else
758                         pblk_log_read_err(pblk, &rqd);
759         }
760
761 free_ppa_list:
762         nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
763
764         return ret;
765 }
766
767 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
768 {
769         u64 bpaddr = pblk_line_smeta_start(pblk, line);
770
771         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
772 }
773
774 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line)
775 {
776         return pblk_line_submit_emeta_io(pblk, line, line->emeta_ssec, READ);
777 }
778
779 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
780                             struct ppa_addr ppa)
781 {
782         rqd->opcode = NVM_OP_ERASE;
783         rqd->ppa_addr = ppa;
784         rqd->nr_ppas = 1;
785         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
786         rqd->bio = NULL;
787 }
788
789 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
790 {
791         struct nvm_rq rqd;
792         int ret;
793         DECLARE_COMPLETION_ONSTACK(wait);
794
795         memset(&rqd, 0, sizeof(struct nvm_rq));
796
797         pblk_setup_e_rq(pblk, &rqd, ppa);
798
799         rqd.end_io = pblk_end_io_sync;
800         rqd.private = &wait;
801
802         /* The write thread schedules erases so that it minimizes disturbances
803          * with writes. Thus, there is no need to take the LUN semaphore.
804          */
805         ret = pblk_submit_io(pblk, &rqd);
806         if (ret) {
807                 struct nvm_tgt_dev *dev = pblk->dev;
808                 struct nvm_geo *geo = &dev->geo;
809
810                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
811                                         pblk_dev_ppa_to_line(ppa),
812                                         pblk_dev_ppa_to_pos(geo, ppa));
813
814                 rqd.error = ret;
815                 goto out;
816         }
817
818         if (!wait_for_completion_io_timeout(&wait,
819                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
820                 pr_err("pblk: sync erase timed out\n");
821         }
822
823 out:
824         rqd.private = pblk;
825         __pblk_end_io_erase(pblk, &rqd);
826
827         return 0;
828 }
829
830 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
831 {
832         struct pblk_line_meta *lm = &pblk->lm;
833         struct ppa_addr ppa;
834         int bit = -1;
835
836         /* Erase only good blocks, one at a time */
837         do {
838                 spin_lock(&line->lock);
839                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
840                                                                 bit + 1);
841                 if (bit >= lm->blk_per_line) {
842                         spin_unlock(&line->lock);
843                         break;
844                 }
845
846                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
847                 ppa.g.blk = line->id;
848
849                 atomic_dec(&line->left_eblks);
850                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
851                 spin_unlock(&line->lock);
852
853                 if (pblk_blk_erase_sync(pblk, ppa)) {
854                         pr_err("pblk: failed to erase line %d\n", line->id);
855                         return -ENOMEM;
856                 }
857         } while (1);
858
859         return 0;
860 }
861
862 /* For now lines are always assumed full lines. Thus, smeta former and current
863  * lun bitmaps are omitted.
864  */
865 static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
866                                   struct pblk_line *cur)
867 {
868         struct nvm_tgt_dev *dev = pblk->dev;
869         struct nvm_geo *geo = &dev->geo;
870         struct pblk_line_meta *lm = &pblk->lm;
871         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
872         struct line_smeta *smeta = line->smeta;
873         struct line_emeta *emeta = line->emeta;
874         int nr_blk_line;
875
876         /* After erasing the line, new bad blocks might appear and we risk
877          * having an invalid line
878          */
879         nr_blk_line = lm->blk_per_line -
880                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
881         if (nr_blk_line < lm->min_blk_line) {
882                 spin_lock(&l_mg->free_lock);
883                 spin_lock(&line->lock);
884                 line->state = PBLK_LINESTATE_BAD;
885                 spin_unlock(&line->lock);
886
887                 list_add_tail(&line->list, &l_mg->bad_list);
888                 spin_unlock(&l_mg->free_lock);
889
890                 pr_debug("pblk: line %d is bad\n", line->id);
891
892                 return 0;
893         }
894
895         /* Run-time metadata */
896         line->lun_bitmap = ((void *)(smeta)) + sizeof(struct line_smeta);
897
898         /* Mark LUNs allocated in this line (all for now) */
899         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
900
901         smeta->header.identifier = cpu_to_le32(PBLK_MAGIC);
902         memcpy(smeta->header.uuid, pblk->instance_uuid, 16);
903         smeta->header.id = cpu_to_le32(line->id);
904         smeta->header.type = cpu_to_le16(line->type);
905         smeta->header.version = cpu_to_le16(1);
906
907         /* Start metadata */
908         smeta->seq_nr = cpu_to_le64(line->seq_nr);
909         smeta->window_wr_lun = cpu_to_le32(geo->nr_luns);
910
911         /* Fill metadata among lines */
912         if (cur) {
913                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
914                 smeta->prev_id = cpu_to_le32(cur->id);
915                 cur->emeta->next_id = cpu_to_le32(line->id);
916         } else {
917                 smeta->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
918         }
919
920         /* All smeta must be set at this point */
921         smeta->header.crc = cpu_to_le32(pblk_calc_meta_header_crc(pblk, smeta));
922         smeta->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta));
923
924         /* End metadata */
925         memcpy(&emeta->header, &smeta->header, sizeof(struct line_header));
926         emeta->seq_nr = cpu_to_le64(line->seq_nr);
927         emeta->nr_lbas = cpu_to_le64(line->sec_in_line);
928         emeta->nr_valid_lbas = cpu_to_le64(0);
929         emeta->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
930         emeta->crc = cpu_to_le32(0);
931         emeta->prev_id = smeta->prev_id;
932
933         return 1;
934 }
935
936 /* For now lines are always assumed full lines. Thus, smeta former and current
937  * lun bitmaps are omitted.
938  */
939 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
940                              int init)
941 {
942         struct nvm_tgt_dev *dev = pblk->dev;
943         struct nvm_geo *geo = &dev->geo;
944         struct pblk_line_meta *lm = &pblk->lm;
945         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
946         int nr_bb = 0;
947         u64 off;
948         int bit = -1;
949
950         line->sec_in_line = lm->sec_per_line;
951
952         /* Capture bad block information on line mapping bitmaps */
953         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
954                                         bit + 1)) < lm->blk_per_line) {
955                 off = bit * geo->sec_per_pl;
956                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
957                                                         lm->sec_per_line);
958                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
959                                                         lm->sec_per_line);
960                 line->sec_in_line -= geo->sec_per_blk;
961                 if (bit >= lm->emeta_bb)
962                         nr_bb++;
963         }
964
965         /* Mark smeta metadata sectors as bad sectors */
966         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
967         off = bit * geo->sec_per_pl;
968 retry_smeta:
969         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
970         line->sec_in_line -= lm->smeta_sec;
971         line->smeta_ssec = off;
972         line->cur_sec = off + lm->smeta_sec;
973
974         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
975                 pr_debug("pblk: line smeta I/O failed. Retry\n");
976                 off += geo->sec_per_pl;
977                 goto retry_smeta;
978         }
979
980         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
981
982         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
983          * blocks to make sure that there are enough sectors to store emeta
984          */
985         bit = lm->sec_per_line;
986         off = lm->sec_per_line - lm->emeta_sec;
987         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec);
988         while (nr_bb) {
989                 off -= geo->sec_per_pl;
990                 if (!test_bit(off, line->invalid_bitmap)) {
991                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
992                         nr_bb--;
993                 }
994         }
995
996         line->sec_in_line -= lm->emeta_sec;
997         line->emeta_ssec = off;
998         line->vsc = line->left_ssecs = line->left_msecs = line->sec_in_line;
999
1000         if (lm->sec_per_line - line->sec_in_line !=
1001                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1002                 spin_lock(&line->lock);
1003                 line->state = PBLK_LINESTATE_BAD;
1004                 spin_unlock(&line->lock);
1005
1006                 list_add_tail(&line->list, &l_mg->bad_list);
1007                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1008
1009                 return 0;
1010         }
1011
1012         return 1;
1013 }
1014
1015 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1016 {
1017         struct pblk_line_meta *lm = &pblk->lm;
1018         int blk_in_line = atomic_read(&line->blk_in_line);
1019
1020         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1021         if (!line->map_bitmap)
1022                 return -ENOMEM;
1023         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1024
1025         /* invalid_bitmap is special since it is used when line is closed. No
1026          * need to zeroized; it will be initialized using bb info form
1027          * map_bitmap
1028          */
1029         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1030         if (!line->invalid_bitmap) {
1031                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1032                 return -ENOMEM;
1033         }
1034
1035         spin_lock(&line->lock);
1036         if (line->state != PBLK_LINESTATE_FREE) {
1037                 spin_unlock(&line->lock);
1038                 WARN(1, "pblk: corrupted line state\n");
1039                 return -EINTR;
1040         }
1041         line->state = PBLK_LINESTATE_OPEN;
1042
1043         atomic_set(&line->left_eblks, blk_in_line);
1044         atomic_set(&line->left_seblks, blk_in_line);
1045         spin_unlock(&line->lock);
1046
1047         /* Bad blocks do not need to be erased */
1048         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1049
1050         kref_init(&line->ref);
1051
1052         return 0;
1053 }
1054
1055 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1056 {
1057         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1058         int ret;
1059
1060         spin_lock(&l_mg->free_lock);
1061         l_mg->data_line = line;
1062         list_del(&line->list);
1063
1064         ret = pblk_line_prepare(pblk, line);
1065         if (ret) {
1066                 list_add(&line->list, &l_mg->free_list);
1067                 spin_unlock(&l_mg->free_lock);
1068                 return ret;
1069         }
1070         spin_unlock(&l_mg->free_lock);
1071
1072         pblk_rl_free_lines_dec(&pblk->rl, line);
1073
1074         if (!pblk_line_init_bb(pblk, line, 0)) {
1075                 list_add(&line->list, &l_mg->free_list);
1076                 return -EINTR;
1077         }
1078
1079         return 0;
1080 }
1081
1082 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1083 {
1084         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1085         line->map_bitmap = NULL;
1086         line->smeta = NULL;
1087         line->emeta = NULL;
1088 }
1089
1090 struct pblk_line *pblk_line_get(struct pblk *pblk)
1091 {
1092         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1093         struct pblk_line_meta *lm = &pblk->lm;
1094         struct pblk_line *line = NULL;
1095         int bit;
1096
1097         lockdep_assert_held(&l_mg->free_lock);
1098
1099 retry_get:
1100         if (list_empty(&l_mg->free_list)) {
1101                 pr_err("pblk: no free lines\n");
1102                 goto out;
1103         }
1104
1105         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1106         list_del(&line->list);
1107         l_mg->nr_free_lines--;
1108
1109         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1110         if (unlikely(bit >= lm->blk_per_line)) {
1111                 spin_lock(&line->lock);
1112                 line->state = PBLK_LINESTATE_BAD;
1113                 spin_unlock(&line->lock);
1114
1115                 list_add_tail(&line->list, &l_mg->bad_list);
1116
1117                 pr_debug("pblk: line %d is bad\n", line->id);
1118                 goto retry_get;
1119         }
1120
1121         if (pblk_line_prepare(pblk, line)) {
1122                 pr_err("pblk: failed to prepare line %d\n", line->id);
1123                 list_add(&line->list, &l_mg->free_list);
1124                 return NULL;
1125         }
1126
1127 out:
1128         return line;
1129 }
1130
1131 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1132                                          struct pblk_line *line)
1133 {
1134         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1135         struct pblk_line *retry_line;
1136
1137         spin_lock(&l_mg->free_lock);
1138         retry_line = pblk_line_get(pblk);
1139         if (!retry_line) {
1140                 l_mg->data_line = NULL;
1141                 spin_unlock(&l_mg->free_lock);
1142                 return NULL;
1143         }
1144
1145         retry_line->smeta = line->smeta;
1146         retry_line->emeta = line->emeta;
1147         retry_line->meta_line = line->meta_line;
1148
1149         pblk_line_free(pblk, line);
1150         l_mg->data_line = retry_line;
1151         spin_unlock(&l_mg->free_lock);
1152
1153         if (pblk_line_erase(pblk, retry_line)) {
1154                 spin_lock(&l_mg->free_lock);
1155                 l_mg->data_line = NULL;
1156                 spin_unlock(&l_mg->free_lock);
1157                 return NULL;
1158         }
1159
1160         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1161
1162         return retry_line;
1163 }
1164
1165 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1166 {
1167         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1168         struct pblk_line *line;
1169         int meta_line;
1170         int is_next = 0;
1171
1172         spin_lock(&l_mg->free_lock);
1173         line = pblk_line_get(pblk);
1174         if (!line) {
1175                 spin_unlock(&l_mg->free_lock);
1176                 return NULL;
1177         }
1178
1179         line->seq_nr = l_mg->d_seq_nr++;
1180         line->type = PBLK_LINETYPE_DATA;
1181         l_mg->data_line = line;
1182
1183         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1184         set_bit(meta_line, &l_mg->meta_bitmap);
1185         line->smeta = l_mg->sline_meta[meta_line].meta;
1186         line->emeta = l_mg->eline_meta[meta_line].meta;
1187         line->meta_line = meta_line;
1188
1189         /* Allocate next line for preparation */
1190         l_mg->data_next = pblk_line_get(pblk);
1191         if (l_mg->data_next) {
1192                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1193                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1194                 is_next = 1;
1195         }
1196         spin_unlock(&l_mg->free_lock);
1197
1198         pblk_rl_free_lines_dec(&pblk->rl, line);
1199         if (is_next)
1200                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1201
1202         if (pblk_line_erase(pblk, line))
1203                 return NULL;
1204
1205 retry_setup:
1206         if (!pblk_line_set_metadata(pblk, line, NULL)) {
1207                 line = pblk_line_retry(pblk, line);
1208                 if (!line)
1209                         return NULL;
1210
1211                 goto retry_setup;
1212         }
1213
1214         if (!pblk_line_init_bb(pblk, line, 1)) {
1215                 line = pblk_line_retry(pblk, line);
1216                 if (!line)
1217                         return NULL;
1218
1219                 goto retry_setup;
1220         }
1221
1222         return line;
1223 }
1224
1225 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1226 {
1227         struct pblk_line_meta *lm = &pblk->lm;
1228         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1229         struct pblk_line *cur, *new;
1230         unsigned int left_seblks;
1231         int meta_line;
1232         int is_next = 0;
1233
1234         cur = l_mg->data_line;
1235         new = l_mg->data_next;
1236         if (!new)
1237                 return NULL;
1238         l_mg->data_line = new;
1239
1240 retry_line:
1241         left_seblks = atomic_read(&new->left_seblks);
1242         if (left_seblks) {
1243                 /* If line is not fully erased, erase it */
1244                 if (atomic_read(&new->left_eblks)) {
1245                         if (pblk_line_erase(pblk, new))
1246                                 return NULL;
1247                 } else {
1248                         io_schedule();
1249                 }
1250                 goto retry_line;
1251         }
1252
1253         spin_lock(&l_mg->free_lock);
1254         /* Allocate next line for preparation */
1255         l_mg->data_next = pblk_line_get(pblk);
1256         if (l_mg->data_next) {
1257                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1258                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1259                 is_next = 1;
1260         }
1261
1262 retry_meta:
1263         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1264         if (meta_line == PBLK_DATA_LINES) {
1265                 spin_unlock(&l_mg->free_lock);
1266                 io_schedule();
1267                 spin_lock(&l_mg->free_lock);
1268                 goto retry_meta;
1269         }
1270
1271         set_bit(meta_line, &l_mg->meta_bitmap);
1272         new->smeta = l_mg->sline_meta[meta_line].meta;
1273         new->emeta = l_mg->eline_meta[meta_line].meta;
1274         new->meta_line = meta_line;
1275
1276         memset(new->smeta, 0, lm->smeta_len);
1277         memset(new->emeta, 0, lm->emeta_len);
1278         spin_unlock(&l_mg->free_lock);
1279
1280         if (is_next)
1281                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1282
1283 retry_setup:
1284         if (!pblk_line_set_metadata(pblk, new, cur)) {
1285                 new = pblk_line_retry(pblk, new);
1286                 if (!new)
1287                         return NULL;
1288
1289                 goto retry_setup;
1290         }
1291
1292         if (!pblk_line_init_bb(pblk, new, 1)) {
1293                 new = pblk_line_retry(pblk, new);
1294                 if (!new)
1295                         return NULL;
1296
1297                 goto retry_setup;
1298         }
1299
1300         return new;
1301 }
1302
1303 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1304 {
1305         if (line->map_bitmap)
1306                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1307         if (line->invalid_bitmap)
1308                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1309
1310         line->map_bitmap = NULL;
1311         line->invalid_bitmap = NULL;
1312         line->smeta = NULL;
1313         line->emeta = NULL;
1314 }
1315
1316 void pblk_line_put(struct kref *ref)
1317 {
1318         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1319         struct pblk *pblk = line->pblk;
1320         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1321
1322         spin_lock(&line->lock);
1323         WARN_ON(line->state != PBLK_LINESTATE_GC);
1324         line->state = PBLK_LINESTATE_FREE;
1325         line->gc_group = PBLK_LINEGC_NONE;
1326         pblk_line_free(pblk, line);
1327         spin_unlock(&line->lock);
1328
1329         spin_lock(&l_mg->free_lock);
1330         list_add_tail(&line->list, &l_mg->free_list);
1331         l_mg->nr_free_lines++;
1332         spin_unlock(&l_mg->free_lock);
1333
1334         pblk_rl_free_lines_inc(&pblk->rl, line);
1335 }
1336
1337 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1338 {
1339         struct nvm_rq *rqd;
1340         int err;
1341
1342         rqd = mempool_alloc(pblk->r_rq_pool, GFP_KERNEL);
1343         memset(rqd, 0, pblk_r_rq_size);
1344
1345         pblk_setup_e_rq(pblk, rqd, ppa);
1346
1347         rqd->end_io = pblk_end_io_erase;
1348         rqd->private = pblk;
1349
1350         /* The write thread schedules erases so that it minimizes disturbances
1351          * with writes. Thus, there is no need to take the LUN semaphore.
1352          */
1353         err = pblk_submit_io(pblk, rqd);
1354         if (err) {
1355                 struct nvm_tgt_dev *dev = pblk->dev;
1356                 struct nvm_geo *geo = &dev->geo;
1357
1358                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1359                                         pblk_dev_ppa_to_line(ppa),
1360                                         pblk_dev_ppa_to_pos(geo, ppa));
1361         }
1362
1363         return err;
1364 }
1365
1366 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1367 {
1368         return pblk->l_mg.data_line;
1369 }
1370
1371 struct pblk_line *pblk_line_get_data_next(struct pblk *pblk)
1372 {
1373         return pblk->l_mg.data_next;
1374 }
1375
1376 int pblk_line_is_full(struct pblk_line *line)
1377 {
1378         return (line->left_msecs == 0);
1379 }
1380
1381 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1382 {
1383         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1384         struct list_head *move_list;
1385
1386         line->emeta->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, line->emeta));
1387
1388         if (pblk_line_submit_emeta_io(pblk, line, line->cur_sec, WRITE))
1389                 pr_err("pblk: line %d close I/O failed\n", line->id);
1390
1391         WARN(!bitmap_full(line->map_bitmap, line->sec_in_line),
1392                                 "pblk: corrupt closed line %d\n", line->id);
1393
1394         spin_lock(&l_mg->free_lock);
1395         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1396         spin_unlock(&l_mg->free_lock);
1397
1398         spin_lock(&l_mg->gc_lock);
1399         spin_lock(&line->lock);
1400         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1401         line->state = PBLK_LINESTATE_CLOSED;
1402         move_list = pblk_line_gc_list(pblk, line);
1403
1404         list_add_tail(&line->list, move_list);
1405
1406         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1407         line->map_bitmap = NULL;
1408         line->smeta = NULL;
1409         line->emeta = NULL;
1410
1411         spin_unlock(&line->lock);
1412         spin_unlock(&l_mg->gc_lock);
1413 }
1414
1415 void pblk_line_close_ws(struct work_struct *work)
1416 {
1417         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1418                                                                         ws);
1419         struct pblk *pblk = line_ws->pblk;
1420         struct pblk_line *line = line_ws->line;
1421
1422         pblk_line_close(pblk, line);
1423         mempool_free(line_ws, pblk->line_ws_pool);
1424 }
1425
1426 void pblk_line_mark_bb(struct work_struct *work)
1427 {
1428         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1429                                                                         ws);
1430         struct pblk *pblk = line_ws->pblk;
1431         struct nvm_tgt_dev *dev = pblk->dev;
1432         struct ppa_addr *ppa = line_ws->priv;
1433         int ret;
1434
1435         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1436         if (ret) {
1437                 struct pblk_line *line;
1438                 int pos;
1439
1440                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1441                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1442
1443                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1444                                 line->id, pos);
1445         }
1446
1447         kfree(ppa);
1448         mempool_free(line_ws, pblk->line_ws_pool);
1449 }
1450
1451 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1452                       void (*work)(struct work_struct *))
1453 {
1454         struct pblk_line_ws *line_ws;
1455
1456         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1457         if (!line_ws)
1458                 return;
1459
1460         line_ws->pblk = pblk;
1461         line_ws->line = line;
1462         line_ws->priv = priv;
1463
1464         INIT_WORK(&line_ws->ws, work);
1465         queue_work(pblk->kw_wq, &line_ws->ws);
1466 }
1467
1468 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1469                   unsigned long *lun_bitmap)
1470 {
1471         struct nvm_tgt_dev *dev = pblk->dev;
1472         struct nvm_geo *geo = &dev->geo;
1473         struct pblk_lun *rlun;
1474         int lun_id = ppa_list[0].g.ch * geo->luns_per_chnl + ppa_list[0].g.lun;
1475         int ret;
1476
1477         /*
1478          * Only send one inflight I/O per LUN. Since we map at a page
1479          * granurality, all ppas in the I/O will map to the same LUN
1480          */
1481 #ifdef CONFIG_NVM_DEBUG
1482         int i;
1483
1484         for (i = 1; i < nr_ppas; i++)
1485                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1486                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1487 #endif
1488         /* If the LUN has been locked for this same request, do no attempt to
1489          * lock it again
1490          */
1491         if (test_and_set_bit(lun_id, lun_bitmap))
1492                 return;
1493
1494         rlun = &pblk->luns[lun_id];
1495         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1496         if (ret) {
1497                 switch (ret) {
1498                 case -ETIME:
1499                         pr_err("pblk: lun semaphore timed out\n");
1500                         break;
1501                 case -EINTR:
1502                         pr_err("pblk: lun semaphore timed out\n");
1503                         break;
1504                 }
1505         }
1506 }
1507
1508 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1509                 unsigned long *lun_bitmap)
1510 {
1511         struct nvm_tgt_dev *dev = pblk->dev;
1512         struct nvm_geo *geo = &dev->geo;
1513         struct pblk_lun *rlun;
1514         int nr_luns = geo->nr_luns;
1515         int bit = -1;
1516
1517         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1518                 rlun = &pblk->luns[bit];
1519                 up(&rlun->wr_sem);
1520         }
1521
1522         kfree(lun_bitmap);
1523 }
1524
1525 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1526 {
1527         struct ppa_addr l2p_ppa;
1528
1529         /* logic error: lba out-of-bounds. Ignore update */
1530         if (!(lba < pblk->rl.nr_secs)) {
1531                 WARN(1, "pblk: corrupted L2P map request\n");
1532                 return;
1533         }
1534
1535         spin_lock(&pblk->trans_lock);
1536         l2p_ppa = pblk_trans_map_get(pblk, lba);
1537
1538         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1539                 pblk_map_invalidate(pblk, l2p_ppa);
1540
1541         pblk_trans_map_set(pblk, lba, ppa);
1542         spin_unlock(&pblk->trans_lock);
1543 }
1544
1545 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1546 {
1547 #ifdef CONFIG_NVM_DEBUG
1548         /* Callers must ensure that the ppa points to a cache address */
1549         BUG_ON(!pblk_addr_in_cache(ppa));
1550         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1551 #endif
1552
1553         pblk_update_map(pblk, lba, ppa);
1554 }
1555
1556 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1557                        struct pblk_line *gc_line)
1558 {
1559         struct ppa_addr l2p_ppa;
1560         int ret = 1;
1561
1562 #ifdef CONFIG_NVM_DEBUG
1563         /* Callers must ensure that the ppa points to a cache address */
1564         BUG_ON(!pblk_addr_in_cache(ppa));
1565         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1566 #endif
1567
1568         /* logic error: lba out-of-bounds. Ignore update */
1569         if (!(lba < pblk->rl.nr_secs)) {
1570                 WARN(1, "pblk: corrupted L2P map request\n");
1571                 return 0;
1572         }
1573
1574         spin_lock(&pblk->trans_lock);
1575         l2p_ppa = pblk_trans_map_get(pblk, lba);
1576
1577         /* Prevent updated entries to be overwritten by GC */
1578         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1579                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1580                 ret = 0;
1581                 goto out;
1582         }
1583
1584         pblk_trans_map_set(pblk, lba, ppa);
1585 out:
1586         spin_unlock(&pblk->trans_lock);
1587         return ret;
1588 }
1589
1590 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1591                          struct ppa_addr entry_line)
1592 {
1593         struct ppa_addr l2p_line;
1594
1595 #ifdef CONFIG_NVM_DEBUG
1596         /* Callers must ensure that the ppa points to a device address */
1597         BUG_ON(pblk_addr_in_cache(ppa));
1598 #endif
1599         /* Invalidate and discard padded entries */
1600         if (lba == ADDR_EMPTY) {
1601 #ifdef CONFIG_NVM_DEBUG
1602                 atomic_long_inc(&pblk->padded_wb);
1603 #endif
1604                 pblk_map_invalidate(pblk, ppa);
1605                 return;
1606         }
1607
1608         /* logic error: lba out-of-bounds. Ignore update */
1609         if (!(lba < pblk->rl.nr_secs)) {
1610                 WARN(1, "pblk: corrupted L2P map request\n");
1611                 return;
1612         }
1613
1614         spin_lock(&pblk->trans_lock);
1615         l2p_line = pblk_trans_map_get(pblk, lba);
1616
1617         /* Do not update L2P if the cacheline has been updated. In this case,
1618          * the mapped ppa must be invalidated
1619          */
1620         if (l2p_line.ppa != entry_line.ppa) {
1621                 if (!pblk_ppa_empty(ppa))
1622                         pblk_map_invalidate(pblk, ppa);
1623                 goto out;
1624         }
1625
1626 #ifdef CONFIG_NVM_DEBUG
1627         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1628 #endif
1629
1630         pblk_trans_map_set(pblk, lba, ppa);
1631 out:
1632         spin_unlock(&pblk->trans_lock);
1633 }
1634
1635 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1636                          sector_t blba, int nr_secs)
1637 {
1638         int i;
1639
1640         spin_lock(&pblk->trans_lock);
1641         for (i = 0; i < nr_secs; i++)
1642                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1643         spin_unlock(&pblk->trans_lock);
1644 }
1645
1646 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1647                           u64 *lba_list, int nr_secs)
1648 {
1649         sector_t lba;
1650         int i;
1651
1652         spin_lock(&pblk->trans_lock);
1653         for (i = 0; i < nr_secs; i++) {
1654                 lba = lba_list[i];
1655                 if (lba == ADDR_EMPTY) {
1656                         ppas[i].ppa = ADDR_EMPTY;
1657                 } else {
1658                         /* logic error: lba out-of-bounds. Ignore update */
1659                         if (!(lba < pblk->rl.nr_secs)) {
1660                                 WARN(1, "pblk: corrupted L2P map request\n");
1661                                 continue;
1662                         }
1663                         ppas[i] = pblk_trans_map_get(pblk, lba);
1664                 }
1665         }
1666         spin_unlock(&pblk->trans_lock);
1667 }