]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
lightnvm: pblk: simplify meta. memory allocation
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20 #include <linux/time.h>
21
22 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
23                          struct ppa_addr *ppa)
24 {
25         struct nvm_tgt_dev *dev = pblk->dev;
26         struct nvm_geo *geo = &dev->geo;
27         int pos = pblk_dev_ppa_to_pos(geo, *ppa);
28
29         pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30         atomic_long_inc(&pblk->erase_failed);
31
32         atomic_dec(&line->blk_in_line);
33         if (test_and_set_bit(pos, line->blk_bitmap))
34                 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
35                                                         line->id, pos);
36
37         pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
38 }
39
40 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41 {
42         struct pblk_line *line;
43
44         line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
45         atomic_dec(&line->left_seblks);
46
47         if (rqd->error) {
48                 struct ppa_addr *ppa;
49
50                 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
51                 if (!ppa)
52                         return;
53
54                 *ppa = rqd->ppa_addr;
55                 pblk_mark_bb(pblk, line, ppa);
56         }
57 }
58
59 /* Erase completion assumes that only one block is erased at the time */
60 static void pblk_end_io_erase(struct nvm_rq *rqd)
61 {
62         struct pblk *pblk = rqd->private;
63
64         __pblk_end_io_erase(pblk, rqd);
65         mempool_free(rqd, pblk->g_rq_pool);
66 }
67
68 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
69                            u64 paddr)
70 {
71         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
72         struct list_head *move_list = NULL;
73
74         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
75          * table is modified with reclaimed sectors, a check is done to endure
76          * that newer updates are not overwritten.
77          */
78         spin_lock(&line->lock);
79         if (line->state == PBLK_LINESTATE_GC ||
80                                         line->state == PBLK_LINESTATE_FREE) {
81                 spin_unlock(&line->lock);
82                 return;
83         }
84
85         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
86                 WARN_ONCE(1, "pblk: double invalidate\n");
87                 spin_unlock(&line->lock);
88                 return;
89         }
90         le32_add_cpu(line->vsc, -1);
91
92         if (line->state == PBLK_LINESTATE_CLOSED)
93                 move_list = pblk_line_gc_list(pblk, line);
94         spin_unlock(&line->lock);
95
96         if (move_list) {
97                 spin_lock(&l_mg->gc_lock);
98                 spin_lock(&line->lock);
99                 /* Prevent moving a line that has just been chosen for GC */
100                 if (line->state == PBLK_LINESTATE_GC ||
101                                         line->state == PBLK_LINESTATE_FREE) {
102                         spin_unlock(&line->lock);
103                         spin_unlock(&l_mg->gc_lock);
104                         return;
105                 }
106                 spin_unlock(&line->lock);
107
108                 list_move_tail(&line->list, move_list);
109                 spin_unlock(&l_mg->gc_lock);
110         }
111 }
112
113 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
114 {
115         struct pblk_line *line;
116         u64 paddr;
117         int line_id;
118
119 #ifdef CONFIG_NVM_DEBUG
120         /* Callers must ensure that the ppa points to a device address */
121         BUG_ON(pblk_addr_in_cache(ppa));
122         BUG_ON(pblk_ppa_empty(ppa));
123 #endif
124
125         line_id = pblk_tgt_ppa_to_line(ppa);
126         line = &pblk->lines[line_id];
127         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
128
129         __pblk_map_invalidate(pblk, line, paddr);
130 }
131
132 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
133                                   unsigned int nr_secs)
134 {
135         sector_t lba;
136
137         spin_lock(&pblk->trans_lock);
138         for (lba = slba; lba < slba + nr_secs; lba++) {
139                 struct ppa_addr ppa;
140
141                 ppa = pblk_trans_map_get(pblk, lba);
142
143                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
144                         pblk_map_invalidate(pblk, ppa);
145
146                 pblk_ppa_set_empty(&ppa);
147                 pblk_trans_map_set(pblk, lba, ppa);
148         }
149         spin_unlock(&pblk->trans_lock);
150 }
151
152 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
153 {
154         mempool_t *pool;
155         struct nvm_rq *rqd;
156         int rq_size;
157
158         if (rw == WRITE) {
159                 pool = pblk->w_rq_pool;
160                 rq_size = pblk_w_rq_size;
161         } else {
162                 pool = pblk->g_rq_pool;
163                 rq_size = pblk_g_rq_size;
164         }
165
166         rqd = mempool_alloc(pool, GFP_KERNEL);
167         memset(rqd, 0, rq_size);
168
169         return rqd;
170 }
171
172 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
173 {
174         mempool_t *pool;
175
176         if (rw == WRITE)
177                 pool = pblk->w_rq_pool;
178         else
179                 pool = pblk->g_rq_pool;
180
181         mempool_free(rqd, pool);
182 }
183
184 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
185                          int nr_pages)
186 {
187         struct bio_vec bv;
188         int i;
189
190         WARN_ON(off + nr_pages != bio->bi_vcnt);
191
192         bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
193         for (i = off; i < nr_pages + off; i++) {
194                 bv = bio->bi_io_vec[i];
195                 mempool_free(bv.bv_page, pblk->page_pool);
196         }
197 }
198
199 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
200                        int nr_pages)
201 {
202         struct request_queue *q = pblk->dev->q;
203         struct page *page;
204         int i, ret;
205
206         for (i = 0; i < nr_pages; i++) {
207                 page = mempool_alloc(pblk->page_pool, flags);
208                 if (!page)
209                         goto err;
210
211                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
212                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
213                         pr_err("pblk: could not add page to bio\n");
214                         mempool_free(page, pblk->page_pool);
215                         goto err;
216                 }
217         }
218
219         return 0;
220 err:
221         pblk_bio_free_pages(pblk, bio, 0, i - 1);
222         return -1;
223 }
224
225 static void pblk_write_kick(struct pblk *pblk)
226 {
227         wake_up_process(pblk->writer_ts);
228         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
229 }
230
231 void pblk_write_timer_fn(unsigned long data)
232 {
233         struct pblk *pblk = (struct pblk *)data;
234
235         /* kick the write thread every tick to flush outstanding data */
236         pblk_write_kick(pblk);
237 }
238
239 void pblk_write_should_kick(struct pblk *pblk)
240 {
241         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
242
243         if (secs_avail >= pblk->min_write_pgs)
244                 pblk_write_kick(pblk);
245 }
246
247 void pblk_end_bio_sync(struct bio *bio)
248 {
249         struct completion *waiting = bio->bi_private;
250
251         complete(waiting);
252 }
253
254 void pblk_end_io_sync(struct nvm_rq *rqd)
255 {
256         struct completion *waiting = rqd->private;
257
258         complete(waiting);
259 }
260
261 void pblk_flush_writer(struct pblk *pblk)
262 {
263         struct bio *bio;
264         int ret;
265         DECLARE_COMPLETION_ONSTACK(wait);
266
267         bio = bio_alloc(GFP_KERNEL, 1);
268         if (!bio)
269                 return;
270
271         bio->bi_iter.bi_sector = 0; /* internal bio */
272         bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
273         bio->bi_private = &wait;
274         bio->bi_end_io = pblk_end_bio_sync;
275
276         ret = pblk_write_to_cache(pblk, bio, 0);
277         if (ret == NVM_IO_OK) {
278                 if (!wait_for_completion_io_timeout(&wait,
279                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
280                         pr_err("pblk: flush cache timed out\n");
281                 }
282         } else if (ret != NVM_IO_DONE) {
283                 pr_err("pblk: tear down bio failed\n");
284         }
285
286         if (bio->bi_status)
287                 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
288
289         bio_put(bio);
290 }
291
292 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
293 {
294         struct pblk_line_meta *lm = &pblk->lm;
295         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
296         struct list_head *move_list = NULL;
297         int vsc = le32_to_cpu(*line->vsc);
298
299         if (!vsc) {
300                 if (line->gc_group != PBLK_LINEGC_FULL) {
301                         line->gc_group = PBLK_LINEGC_FULL;
302                         move_list = &l_mg->gc_full_list;
303                 }
304         } else if (vsc < lm->mid_thrs) {
305                 if (line->gc_group != PBLK_LINEGC_HIGH) {
306                         line->gc_group = PBLK_LINEGC_HIGH;
307                         move_list = &l_mg->gc_high_list;
308                 }
309         } else if (vsc < lm->high_thrs) {
310                 if (line->gc_group != PBLK_LINEGC_MID) {
311                         line->gc_group = PBLK_LINEGC_MID;
312                         move_list = &l_mg->gc_mid_list;
313                 }
314         } else if (vsc < line->sec_in_line) {
315                 if (line->gc_group != PBLK_LINEGC_LOW) {
316                         line->gc_group = PBLK_LINEGC_LOW;
317                         move_list = &l_mg->gc_low_list;
318                 }
319         } else if (vsc == line->sec_in_line) {
320                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
321                         line->gc_group = PBLK_LINEGC_EMPTY;
322                         move_list = &l_mg->gc_empty_list;
323                 }
324         } else {
325                 line->state = PBLK_LINESTATE_CORRUPT;
326                 line->gc_group = PBLK_LINEGC_NONE;
327                 move_list =  &l_mg->corrupt_list;
328                 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
329                                                 line->id, vsc,
330                                                 line->sec_in_line,
331                                                 lm->high_thrs, lm->mid_thrs);
332         }
333
334         return move_list;
335 }
336
337 void pblk_discard(struct pblk *pblk, struct bio *bio)
338 {
339         sector_t slba = pblk_get_lba(bio);
340         sector_t nr_secs = pblk_get_secs(bio);
341
342         pblk_invalidate_range(pblk, slba, nr_secs);
343 }
344
345 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
346 {
347         struct ppa_addr ppa;
348
349         spin_lock(&pblk->trans_lock);
350         ppa = pblk_trans_map_get(pblk, lba);
351         spin_unlock(&pblk->trans_lock);
352
353         return ppa;
354 }
355
356 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
357 {
358         atomic_long_inc(&pblk->write_failed);
359 #ifdef CONFIG_NVM_DEBUG
360         pblk_print_failed_rqd(pblk, rqd, rqd->error);
361 #endif
362 }
363
364 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
365 {
366         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
367         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
368                 atomic_long_inc(&pblk->read_empty);
369                 return;
370         }
371
372         switch (rqd->error) {
373         case NVM_RSP_WARN_HIGHECC:
374                 atomic_long_inc(&pblk->read_high_ecc);
375                 break;
376         case NVM_RSP_ERR_FAILECC:
377         case NVM_RSP_ERR_FAILCRC:
378                 atomic_long_inc(&pblk->read_failed);
379                 break;
380         default:
381                 pr_err("pblk: unknown read error:%d\n", rqd->error);
382         }
383 #ifdef CONFIG_NVM_DEBUG
384         pblk_print_failed_rqd(pblk, rqd, rqd->error);
385 #endif
386 }
387
388 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
389 {
390         pblk->sec_per_write = sec_per_write;
391 }
392
393 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
394 {
395         struct nvm_tgt_dev *dev = pblk->dev;
396
397 #ifdef CONFIG_NVM_DEBUG
398         struct ppa_addr *ppa_list;
399
400         ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
401         if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
402                 WARN_ON(1);
403                 return -EINVAL;
404         }
405
406         if (rqd->opcode == NVM_OP_PWRITE) {
407                 struct pblk_line *line;
408                 struct ppa_addr ppa;
409                 int i;
410
411                 for (i = 0; i < rqd->nr_ppas; i++) {
412                         ppa = ppa_list[i];
413                         line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
414
415                         spin_lock(&line->lock);
416                         if (line->state != PBLK_LINESTATE_OPEN) {
417                                 pr_err("pblk: bad ppa: line:%d,state:%d\n",
418                                                         line->id, line->state);
419                                 WARN_ON(1);
420                                 spin_unlock(&line->lock);
421                                 return -EINVAL;
422                         }
423                         spin_unlock(&line->lock);
424                 }
425         }
426 #endif
427         return nvm_submit_io(dev, rqd);
428 }
429
430 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
431                               unsigned int nr_secs, unsigned int len,
432                               gfp_t gfp_mask)
433 {
434         struct nvm_tgt_dev *dev = pblk->dev;
435         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
436         void *kaddr = data;
437         struct page *page;
438         struct bio *bio;
439         int i, ret;
440
441         if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
442                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
443
444         bio = bio_kmalloc(gfp_mask, nr_secs);
445         if (!bio)
446                 return ERR_PTR(-ENOMEM);
447
448         for (i = 0; i < nr_secs; i++) {
449                 page = vmalloc_to_page(kaddr);
450                 if (!page) {
451                         pr_err("pblk: could not map vmalloc bio\n");
452                         bio_put(bio);
453                         bio = ERR_PTR(-ENOMEM);
454                         goto out;
455                 }
456
457                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
458                 if (ret != PAGE_SIZE) {
459                         pr_err("pblk: could not add page to bio\n");
460                         bio_put(bio);
461                         bio = ERR_PTR(-ENOMEM);
462                         goto out;
463                 }
464
465                 kaddr += PAGE_SIZE;
466         }
467 out:
468         return bio;
469 }
470
471 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
472                    unsigned long secs_to_flush)
473 {
474         int max = pblk->sec_per_write;
475         int min = pblk->min_write_pgs;
476         int secs_to_sync = 0;
477
478         if (secs_avail >= max)
479                 secs_to_sync = max;
480         else if (secs_avail >= min)
481                 secs_to_sync = min * (secs_avail / min);
482         else if (secs_to_flush)
483                 secs_to_sync = min;
484
485         return secs_to_sync;
486 }
487
488 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
489 {
490         u64 addr;
491         int i;
492
493         addr = find_next_zero_bit(line->map_bitmap,
494                                         pblk->lm.sec_per_line, line->cur_sec);
495         line->cur_sec = addr - nr_secs;
496
497         for (i = 0; i < nr_secs; i++, line->cur_sec--)
498                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
499 }
500
501 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
502 {
503         u64 addr;
504         int i;
505
506         /* logic error: ppa out-of-bounds. Prevent generating bad address */
507         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
508                 WARN(1, "pblk: page allocation out of bounds\n");
509                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
510         }
511
512         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
513                                         pblk->lm.sec_per_line, line->cur_sec);
514         for (i = 0; i < nr_secs; i++, line->cur_sec++)
515                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
516
517         return addr;
518 }
519
520 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
521 {
522         u64 addr;
523
524         /* Lock needed in case a write fails and a recovery needs to remap
525          * failed write buffer entries
526          */
527         spin_lock(&line->lock);
528         addr = __pblk_alloc_page(pblk, line, nr_secs);
529         line->left_msecs -= nr_secs;
530         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
531         spin_unlock(&line->lock);
532
533         return addr;
534 }
535
536 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
537 {
538         u64 paddr;
539
540         spin_lock(&line->lock);
541         paddr = find_next_zero_bit(line->map_bitmap,
542                                         pblk->lm.sec_per_line, line->cur_sec);
543         spin_unlock(&line->lock);
544
545         return paddr;
546 }
547
548 /*
549  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
550  * taking the per LUN semaphore.
551  */
552 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
553                                      void *emeta_buf, u64 paddr, int dir)
554 {
555         struct nvm_tgt_dev *dev = pblk->dev;
556         struct nvm_geo *geo = &dev->geo;
557         struct pblk_line_meta *lm = &pblk->lm;
558         struct bio *bio;
559         struct nvm_rq rqd;
560         struct ppa_addr *ppa_list;
561         dma_addr_t dma_ppa_list;
562         int min = pblk->min_write_pgs;
563         int left_ppas = lm->emeta_sec[0];
564         int id = line->id;
565         int rq_ppas, rq_len;
566         int cmd_op, bio_op;
567         int i, j;
568         int ret;
569         DECLARE_COMPLETION_ONSTACK(wait);
570
571         if (dir == WRITE) {
572                 bio_op = REQ_OP_WRITE;
573                 cmd_op = NVM_OP_PWRITE;
574         } else if (dir == READ) {
575                 bio_op = REQ_OP_READ;
576                 cmd_op = NVM_OP_PREAD;
577         } else
578                 return -EINVAL;
579
580         ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
581         if (!ppa_list)
582                 return -ENOMEM;
583
584 next_rq:
585         memset(&rqd, 0, sizeof(struct nvm_rq));
586
587         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
588         rq_len = rq_ppas * geo->sec_size;
589
590         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, GFP_KERNEL);
591         if (IS_ERR(bio)) {
592                 ret = PTR_ERR(bio);
593                 goto free_rqd_dma;
594         }
595
596         bio->bi_iter.bi_sector = 0; /* internal bio */
597         bio_set_op_attrs(bio, bio_op, 0);
598
599         rqd.bio = bio;
600         rqd.opcode = cmd_op;
601         rqd.nr_ppas = rq_ppas;
602         rqd.ppa_list = ppa_list;
603         rqd.dma_ppa_list = dma_ppa_list;
604         rqd.end_io = pblk_end_io_sync;
605         rqd.private = &wait;
606
607         if (dir == WRITE) {
608                 rqd.flags = pblk_set_progr_mode(pblk, WRITE);
609                 for (i = 0; i < rqd.nr_ppas; ) {
610                         spin_lock(&line->lock);
611                         paddr = __pblk_alloc_page(pblk, line, min);
612                         spin_unlock(&line->lock);
613                         for (j = 0; j < min; j++, i++, paddr++)
614                                 rqd.ppa_list[i] =
615                                         addr_to_gen_ppa(pblk, paddr, id);
616                 }
617         } else {
618                 for (i = 0; i < rqd.nr_ppas; ) {
619                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
620                         int pos = pblk_dev_ppa_to_pos(geo, ppa);
621                         int read_type = PBLK_READ_RANDOM;
622
623                         if (pblk_io_aligned(pblk, rq_ppas))
624                                 read_type = PBLK_READ_SEQUENTIAL;
625                         rqd.flags = pblk_set_read_mode(pblk, read_type);
626
627                         while (test_bit(pos, line->blk_bitmap)) {
628                                 paddr += min;
629                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
630                                         pr_err("pblk: corrupt emeta line:%d\n",
631                                                                 line->id);
632                                         bio_put(bio);
633                                         ret = -EINTR;
634                                         goto free_rqd_dma;
635                                 }
636
637                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
638                                 pos = pblk_dev_ppa_to_pos(geo, ppa);
639                         }
640
641                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
642                                 pr_err("pblk: corrupt emeta line:%d\n",
643                                                                 line->id);
644                                 bio_put(bio);
645                                 ret = -EINTR;
646                                 goto free_rqd_dma;
647                         }
648
649                         for (j = 0; j < min; j++, i++, paddr++)
650                                 rqd.ppa_list[i] =
651                                         addr_to_gen_ppa(pblk, paddr, line->id);
652                 }
653         }
654
655         ret = pblk_submit_io(pblk, &rqd);
656         if (ret) {
657                 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
658                 bio_put(bio);
659                 goto free_rqd_dma;
660         }
661
662         if (!wait_for_completion_io_timeout(&wait,
663                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
664                 pr_err("pblk: emeta I/O timed out\n");
665         }
666         reinit_completion(&wait);
667
668         if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
669                 bio_put(bio);
670
671         if (rqd.error) {
672                 if (dir == WRITE)
673                         pblk_log_write_err(pblk, &rqd);
674                 else
675                         pblk_log_read_err(pblk, &rqd);
676         }
677
678         emeta_buf += rq_len;
679         left_ppas -= rq_ppas;
680         if (left_ppas)
681                 goto next_rq;
682 free_rqd_dma:
683         nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
684         return ret;
685 }
686
687 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
688 {
689         struct nvm_tgt_dev *dev = pblk->dev;
690         struct nvm_geo *geo = &dev->geo;
691         struct pblk_line_meta *lm = &pblk->lm;
692         int bit;
693
694         /* This usually only happens on bad lines */
695         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
696         if (bit >= lm->blk_per_line)
697                 return -1;
698
699         return bit * geo->sec_per_pl;
700 }
701
702 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
703                                      u64 paddr, int dir)
704 {
705         struct nvm_tgt_dev *dev = pblk->dev;
706         struct pblk_line_meta *lm = &pblk->lm;
707         struct bio *bio;
708         struct nvm_rq rqd;
709         __le64 *lba_list = NULL;
710         int i, ret;
711         int cmd_op, bio_op;
712         int flags;
713         DECLARE_COMPLETION_ONSTACK(wait);
714
715         if (dir == WRITE) {
716                 bio_op = REQ_OP_WRITE;
717                 cmd_op = NVM_OP_PWRITE;
718                 flags = pblk_set_progr_mode(pblk, WRITE);
719                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
720         } else if (dir == READ) {
721                 bio_op = REQ_OP_READ;
722                 cmd_op = NVM_OP_PREAD;
723                 flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
724         } else
725                 return -EINVAL;
726
727         memset(&rqd, 0, sizeof(struct nvm_rq));
728
729         rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
730                                                         &rqd.dma_ppa_list);
731         if (!rqd.ppa_list)
732                 return -ENOMEM;
733
734         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
735         if (IS_ERR(bio)) {
736                 ret = PTR_ERR(bio);
737                 goto free_ppa_list;
738         }
739
740         bio->bi_iter.bi_sector = 0; /* internal bio */
741         bio_set_op_attrs(bio, bio_op, 0);
742
743         rqd.bio = bio;
744         rqd.opcode = cmd_op;
745         rqd.flags = flags;
746         rqd.nr_ppas = lm->smeta_sec;
747         rqd.end_io = pblk_end_io_sync;
748         rqd.private = &wait;
749
750         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
751                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
752                 if (dir == WRITE)
753                         lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
754         }
755
756         /*
757          * This I/O is sent by the write thread when a line is replace. Since
758          * the write thread is the only one sending write and erase commands,
759          * there is no need to take the LUN semaphore.
760          */
761         ret = pblk_submit_io(pblk, &rqd);
762         if (ret) {
763                 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
764                 bio_put(bio);
765                 goto free_ppa_list;
766         }
767
768         if (!wait_for_completion_io_timeout(&wait,
769                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
770                 pr_err("pblk: smeta I/O timed out\n");
771         }
772
773         if (rqd.error) {
774                 if (dir == WRITE)
775                         pblk_log_write_err(pblk, &rqd);
776                 else
777                         pblk_log_read_err(pblk, &rqd);
778         }
779
780 free_ppa_list:
781         nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
782
783         return ret;
784 }
785
786 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
787 {
788         u64 bpaddr = pblk_line_smeta_start(pblk, line);
789
790         return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
791 }
792
793 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
794                          void *emeta_buf)
795 {
796         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
797                                                 line->emeta_ssec, READ);
798 }
799
800 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
801                             struct ppa_addr ppa)
802 {
803         rqd->opcode = NVM_OP_ERASE;
804         rqd->ppa_addr = ppa;
805         rqd->nr_ppas = 1;
806         rqd->flags = pblk_set_progr_mode(pblk, ERASE);
807         rqd->bio = NULL;
808 }
809
810 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
811 {
812         struct nvm_rq rqd;
813         int ret;
814         DECLARE_COMPLETION_ONSTACK(wait);
815
816         memset(&rqd, 0, sizeof(struct nvm_rq));
817
818         pblk_setup_e_rq(pblk, &rqd, ppa);
819
820         rqd.end_io = pblk_end_io_sync;
821         rqd.private = &wait;
822
823         /* The write thread schedules erases so that it minimizes disturbances
824          * with writes. Thus, there is no need to take the LUN semaphore.
825          */
826         ret = pblk_submit_io(pblk, &rqd);
827         if (ret) {
828                 struct nvm_tgt_dev *dev = pblk->dev;
829                 struct nvm_geo *geo = &dev->geo;
830
831                 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
832                                         pblk_dev_ppa_to_line(ppa),
833                                         pblk_dev_ppa_to_pos(geo, ppa));
834
835                 rqd.error = ret;
836                 goto out;
837         }
838
839         if (!wait_for_completion_io_timeout(&wait,
840                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
841                 pr_err("pblk: sync erase timed out\n");
842         }
843
844 out:
845         rqd.private = pblk;
846         __pblk_end_io_erase(pblk, &rqd);
847
848         return 0;
849 }
850
851 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
852 {
853         struct pblk_line_meta *lm = &pblk->lm;
854         struct ppa_addr ppa;
855         int bit = -1;
856
857         /* Erase only good blocks, one at a time */
858         do {
859                 spin_lock(&line->lock);
860                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
861                                                                 bit + 1);
862                 if (bit >= lm->blk_per_line) {
863                         spin_unlock(&line->lock);
864                         break;
865                 }
866
867                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
868                 ppa.g.blk = line->id;
869
870                 atomic_dec(&line->left_eblks);
871                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
872                 spin_unlock(&line->lock);
873
874                 if (pblk_blk_erase_sync(pblk, ppa)) {
875                         pr_err("pblk: failed to erase line %d\n", line->id);
876                         return -ENOMEM;
877                 }
878         } while (1);
879
880         return 0;
881 }
882
883 static void pblk_line_setup_metadata(struct pblk_line *line,
884                                      struct pblk_line_mgmt *l_mg,
885                                      struct pblk_line_meta *lm)
886 {
887         int meta_line;
888
889 retry_meta:
890         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
891         if (meta_line == PBLK_DATA_LINES) {
892                 spin_unlock(&l_mg->free_lock);
893                 io_schedule();
894                 spin_lock(&l_mg->free_lock);
895                 goto retry_meta;
896         }
897
898         set_bit(meta_line, &l_mg->meta_bitmap);
899         line->meta_line = meta_line;
900
901         line->smeta = l_mg->sline_meta[meta_line];
902         line->emeta = l_mg->eline_meta[meta_line];
903
904         memset(line->smeta, 0, lm->smeta_len);
905         memset(line->emeta->buf, 0, lm->emeta_len[0]);
906
907         line->emeta->mem = 0;
908         atomic_set(&line->emeta->sync, 0);
909 }
910
911 /* For now lines are always assumed full lines. Thus, smeta former and current
912  * lun bitmaps are omitted.
913  */
914 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
915                                   struct pblk_line *cur)
916 {
917         struct nvm_tgt_dev *dev = pblk->dev;
918         struct nvm_geo *geo = &dev->geo;
919         struct pblk_line_meta *lm = &pblk->lm;
920         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
921         struct pblk_emeta *emeta = line->emeta;
922         struct line_emeta *emeta_buf = emeta->buf;
923         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
924         int nr_blk_line;
925
926         /* After erasing the line, new bad blocks might appear and we risk
927          * having an invalid line
928          */
929         nr_blk_line = lm->blk_per_line -
930                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
931         if (nr_blk_line < lm->min_blk_line) {
932                 spin_lock(&l_mg->free_lock);
933                 spin_lock(&line->lock);
934                 line->state = PBLK_LINESTATE_BAD;
935                 spin_unlock(&line->lock);
936
937                 list_add_tail(&line->list, &l_mg->bad_list);
938                 spin_unlock(&l_mg->free_lock);
939
940                 pr_debug("pblk: line %d is bad\n", line->id);
941
942                 return 0;
943         }
944
945         /* Run-time metadata */
946         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
947
948         /* Mark LUNs allocated in this line (all for now) */
949         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
950
951         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
952         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
953         smeta_buf->header.id = cpu_to_le32(line->id);
954         smeta_buf->header.type = cpu_to_le16(line->type);
955         smeta_buf->header.version = cpu_to_le16(1);
956
957         /* Start metadata */
958         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
959         smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
960
961         /* Fill metadata among lines */
962         if (cur) {
963                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
964                 smeta_buf->prev_id = cpu_to_le32(cur->id);
965                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
966         } else {
967                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
968         }
969
970         /* All smeta must be set at this point */
971         smeta_buf->header.crc = cpu_to_le32(
972                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
973         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
974
975         /* End metadata */
976         memcpy(&emeta_buf->header, &smeta_buf->header,
977                                                 sizeof(struct line_header));
978         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
979         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
980         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
981         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
982         emeta_buf->crc = cpu_to_le32(0);
983         emeta_buf->prev_id = smeta_buf->prev_id;
984
985         return 1;
986 }
987
988 /* For now lines are always assumed full lines. Thus, smeta former and current
989  * lun bitmaps are omitted.
990  */
991 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
992                              int init)
993 {
994         struct nvm_tgt_dev *dev = pblk->dev;
995         struct nvm_geo *geo = &dev->geo;
996         struct pblk_line_meta *lm = &pblk->lm;
997         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
998         int nr_bb = 0;
999         u64 off;
1000         int bit = -1;
1001
1002         line->sec_in_line = lm->sec_per_line;
1003
1004         /* Capture bad block information on line mapping bitmaps */
1005         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1006                                         bit + 1)) < lm->blk_per_line) {
1007                 off = bit * geo->sec_per_pl;
1008                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1009                                                         lm->sec_per_line);
1010                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1011                                                         lm->sec_per_line);
1012                 line->sec_in_line -= geo->sec_per_blk;
1013                 if (bit >= lm->emeta_bb)
1014                         nr_bb++;
1015         }
1016
1017         /* Mark smeta metadata sectors as bad sectors */
1018         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1019         off = bit * geo->sec_per_pl;
1020 retry_smeta:
1021         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1022         line->sec_in_line -= lm->smeta_sec;
1023         line->smeta_ssec = off;
1024         line->cur_sec = off + lm->smeta_sec;
1025
1026         if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1027                 pr_debug("pblk: line smeta I/O failed. Retry\n");
1028                 off += geo->sec_per_pl;
1029                 goto retry_smeta;
1030         }
1031
1032         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1033
1034         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1035          * blocks to make sure that there are enough sectors to store emeta
1036          */
1037         bit = lm->sec_per_line;
1038         off = lm->sec_per_line - lm->emeta_sec[0];
1039         bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
1040         while (nr_bb) {
1041                 off -= geo->sec_per_pl;
1042                 if (!test_bit(off, line->invalid_bitmap)) {
1043                         bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1044                         nr_bb--;
1045                 }
1046         }
1047
1048         line->sec_in_line -= lm->emeta_sec[0];
1049         line->emeta_ssec = off;
1050         line->nr_valid_lbas = 0;
1051         line->left_msecs = line->sec_in_line;
1052         *line->vsc = cpu_to_le32(line->sec_in_line);
1053
1054         if (lm->sec_per_line - line->sec_in_line !=
1055                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1056                 spin_lock(&line->lock);
1057                 line->state = PBLK_LINESTATE_BAD;
1058                 spin_unlock(&line->lock);
1059
1060                 list_add_tail(&line->list, &l_mg->bad_list);
1061                 pr_err("pblk: unexpected line %d is bad\n", line->id);
1062
1063                 return 0;
1064         }
1065
1066         return 1;
1067 }
1068
1069 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1070 {
1071         struct pblk_line_meta *lm = &pblk->lm;
1072         int blk_in_line = atomic_read(&line->blk_in_line);
1073
1074         line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1075         if (!line->map_bitmap)
1076                 return -ENOMEM;
1077         memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1078
1079         /* invalid_bitmap is special since it is used when line is closed. No
1080          * need to zeroized; it will be initialized using bb info form
1081          * map_bitmap
1082          */
1083         line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1084         if (!line->invalid_bitmap) {
1085                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1086                 return -ENOMEM;
1087         }
1088
1089         spin_lock(&line->lock);
1090         if (line->state != PBLK_LINESTATE_FREE) {
1091                 spin_unlock(&line->lock);
1092                 WARN(1, "pblk: corrupted line state\n");
1093                 return -EINTR;
1094         }
1095         line->state = PBLK_LINESTATE_OPEN;
1096
1097         atomic_set(&line->left_eblks, blk_in_line);
1098         atomic_set(&line->left_seblks, blk_in_line);
1099
1100         line->meta_distance = lm->meta_distance;
1101         spin_unlock(&line->lock);
1102
1103         /* Bad blocks do not need to be erased */
1104         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1105
1106         kref_init(&line->ref);
1107
1108         return 0;
1109 }
1110
1111 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1112 {
1113         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1114         int ret;
1115
1116         spin_lock(&l_mg->free_lock);
1117         l_mg->data_line = line;
1118         list_del(&line->list);
1119
1120         ret = pblk_line_prepare(pblk, line);
1121         if (ret) {
1122                 list_add(&line->list, &l_mg->free_list);
1123                 spin_unlock(&l_mg->free_lock);
1124                 return ret;
1125         }
1126         spin_unlock(&l_mg->free_lock);
1127
1128         pblk_rl_free_lines_dec(&pblk->rl, line);
1129
1130         if (!pblk_line_init_bb(pblk, line, 0)) {
1131                 list_add(&line->list, &l_mg->free_list);
1132                 return -EINTR;
1133         }
1134
1135         return 0;
1136 }
1137
1138 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1139 {
1140         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1141         line->map_bitmap = NULL;
1142         line->smeta = NULL;
1143         line->emeta = NULL;
1144 }
1145
1146 struct pblk_line *pblk_line_get(struct pblk *pblk)
1147 {
1148         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1149         struct pblk_line_meta *lm = &pblk->lm;
1150         struct pblk_line *line = NULL;
1151         int bit;
1152
1153         lockdep_assert_held(&l_mg->free_lock);
1154
1155 retry_get:
1156         if (list_empty(&l_mg->free_list)) {
1157                 pr_err("pblk: no free lines\n");
1158                 goto out;
1159         }
1160
1161         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1162         list_del(&line->list);
1163         l_mg->nr_free_lines--;
1164
1165         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1166         if (unlikely(bit >= lm->blk_per_line)) {
1167                 spin_lock(&line->lock);
1168                 line->state = PBLK_LINESTATE_BAD;
1169                 spin_unlock(&line->lock);
1170
1171                 list_add_tail(&line->list, &l_mg->bad_list);
1172
1173                 pr_debug("pblk: line %d is bad\n", line->id);
1174                 goto retry_get;
1175         }
1176
1177         if (pblk_line_prepare(pblk, line)) {
1178                 pr_err("pblk: failed to prepare line %d\n", line->id);
1179                 list_add(&line->list, &l_mg->free_list);
1180                 return NULL;
1181         }
1182
1183 out:
1184         return line;
1185 }
1186
1187 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1188                                          struct pblk_line *line)
1189 {
1190         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1191         struct pblk_line *retry_line;
1192
1193         spin_lock(&l_mg->free_lock);
1194         retry_line = pblk_line_get(pblk);
1195         if (!retry_line) {
1196                 l_mg->data_line = NULL;
1197                 spin_unlock(&l_mg->free_lock);
1198                 return NULL;
1199         }
1200
1201         retry_line->smeta = line->smeta;
1202         retry_line->emeta = line->emeta;
1203         retry_line->meta_line = line->meta_line;
1204
1205         pblk_line_free(pblk, line);
1206         l_mg->data_line = retry_line;
1207         spin_unlock(&l_mg->free_lock);
1208
1209         if (pblk_line_erase(pblk, retry_line)) {
1210                 spin_lock(&l_mg->free_lock);
1211                 l_mg->data_line = NULL;
1212                 spin_unlock(&l_mg->free_lock);
1213                 return NULL;
1214         }
1215
1216         pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1217
1218         return retry_line;
1219 }
1220
1221 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1222 {
1223         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1224         struct pblk_line *line;
1225         int is_next = 0;
1226
1227         spin_lock(&l_mg->free_lock);
1228         line = pblk_line_get(pblk);
1229         if (!line) {
1230                 spin_unlock(&l_mg->free_lock);
1231                 return NULL;
1232         }
1233
1234         line->seq_nr = l_mg->d_seq_nr++;
1235         line->type = PBLK_LINETYPE_DATA;
1236         l_mg->data_line = line;
1237
1238         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1239
1240         /* Allocate next line for preparation */
1241         l_mg->data_next = pblk_line_get(pblk);
1242         if (l_mg->data_next) {
1243                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1244                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1245                 is_next = 1;
1246         }
1247         spin_unlock(&l_mg->free_lock);
1248
1249         pblk_rl_free_lines_dec(&pblk->rl, line);
1250         if (is_next)
1251                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1252
1253         if (pblk_line_erase(pblk, line))
1254                 return NULL;
1255
1256 retry_setup:
1257         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1258                 line = pblk_line_retry(pblk, line);
1259                 if (!line)
1260                         return NULL;
1261
1262                 goto retry_setup;
1263         }
1264
1265         if (!pblk_line_init_bb(pblk, line, 1)) {
1266                 line = pblk_line_retry(pblk, line);
1267                 if (!line)
1268                         return NULL;
1269
1270                 goto retry_setup;
1271         }
1272
1273         return line;
1274 }
1275
1276 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1277 {
1278         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1279         struct pblk_line *cur, *new;
1280         unsigned int left_seblks;
1281         int is_next = 0;
1282
1283         cur = l_mg->data_line;
1284         new = l_mg->data_next;
1285         if (!new)
1286                 return NULL;
1287         l_mg->data_line = new;
1288
1289 retry_line:
1290         left_seblks = atomic_read(&new->left_seblks);
1291         if (left_seblks) {
1292                 /* If line is not fully erased, erase it */
1293                 if (atomic_read(&new->left_eblks)) {
1294                         if (pblk_line_erase(pblk, new))
1295                                 return NULL;
1296                 } else {
1297                         io_schedule();
1298                 }
1299                 goto retry_line;
1300         }
1301
1302         spin_lock(&l_mg->free_lock);
1303         /* Allocate next line for preparation */
1304         l_mg->data_next = pblk_line_get(pblk);
1305         if (l_mg->data_next) {
1306                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1307                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1308                 is_next = 1;
1309         }
1310
1311         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1312         spin_unlock(&l_mg->free_lock);
1313
1314         if (is_next)
1315                 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1316
1317 retry_setup:
1318         if (!pblk_line_init_metadata(pblk, new, cur)) {
1319                 new = pblk_line_retry(pblk, new);
1320                 if (!new)
1321                         return NULL;
1322
1323                 goto retry_setup;
1324         }
1325
1326         if (!pblk_line_init_bb(pblk, new, 1)) {
1327                 new = pblk_line_retry(pblk, new);
1328                 if (!new)
1329                         return NULL;
1330
1331                 goto retry_setup;
1332         }
1333
1334         return new;
1335 }
1336
1337 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1338 {
1339         if (line->map_bitmap)
1340                 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1341         if (line->invalid_bitmap)
1342                 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1343
1344         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1345
1346         line->map_bitmap = NULL;
1347         line->invalid_bitmap = NULL;
1348         line->smeta = NULL;
1349         line->emeta = NULL;
1350 }
1351
1352 void pblk_line_put(struct kref *ref)
1353 {
1354         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1355         struct pblk *pblk = line->pblk;
1356         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1357
1358         spin_lock(&line->lock);
1359         WARN_ON(line->state != PBLK_LINESTATE_GC);
1360         line->state = PBLK_LINESTATE_FREE;
1361         line->gc_group = PBLK_LINEGC_NONE;
1362         pblk_line_free(pblk, line);
1363         spin_unlock(&line->lock);
1364
1365         spin_lock(&l_mg->free_lock);
1366         list_add_tail(&line->list, &l_mg->free_list);
1367         l_mg->nr_free_lines++;
1368         spin_unlock(&l_mg->free_lock);
1369
1370         pblk_rl_free_lines_inc(&pblk->rl, line);
1371 }
1372
1373 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1374 {
1375         struct nvm_rq *rqd;
1376         int err;
1377
1378         rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1379         memset(rqd, 0, pblk_g_rq_size);
1380
1381         pblk_setup_e_rq(pblk, rqd, ppa);
1382
1383         rqd->end_io = pblk_end_io_erase;
1384         rqd->private = pblk;
1385
1386         /* The write thread schedules erases so that it minimizes disturbances
1387          * with writes. Thus, there is no need to take the LUN semaphore.
1388          */
1389         err = pblk_submit_io(pblk, rqd);
1390         if (err) {
1391                 struct nvm_tgt_dev *dev = pblk->dev;
1392                 struct nvm_geo *geo = &dev->geo;
1393
1394                 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1395                                         pblk_dev_ppa_to_line(ppa),
1396                                         pblk_dev_ppa_to_pos(geo, ppa));
1397         }
1398
1399         return err;
1400 }
1401
1402 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1403 {
1404         return pblk->l_mg.data_line;
1405 }
1406
1407 /* For now, always erase next line */
1408 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1409 {
1410         return pblk->l_mg.data_next;
1411 }
1412
1413 int pblk_line_is_full(struct pblk_line *line)
1414 {
1415         return (line->left_msecs == 0);
1416 }
1417
1418 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1419 {
1420         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1421         struct pblk_line_meta *lm = &pblk->lm;
1422         struct list_head *move_list;
1423
1424         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1425                                 "pblk: corrupt closed line %d\n", line->id);
1426
1427         spin_lock(&l_mg->free_lock);
1428         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1429         spin_unlock(&l_mg->free_lock);
1430
1431         spin_lock(&l_mg->gc_lock);
1432         spin_lock(&line->lock);
1433         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1434         line->state = PBLK_LINESTATE_CLOSED;
1435         move_list = pblk_line_gc_list(pblk, line);
1436
1437         list_add_tail(&line->list, move_list);
1438
1439         mempool_free(line->map_bitmap, pblk->line_meta_pool);
1440         line->map_bitmap = NULL;
1441         line->smeta = NULL;
1442         line->emeta = NULL;
1443
1444         spin_unlock(&line->lock);
1445         spin_unlock(&l_mg->gc_lock);
1446 }
1447
1448 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1449 {
1450         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1451         struct pblk_line_meta *lm = &pblk->lm;
1452         struct pblk_emeta *emeta = line->emeta;
1453         struct line_emeta *emeta_buf = emeta->buf;
1454
1455         /* No need for exact vsc value; avoid a big line lock and tak aprox. */
1456         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1457         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1458
1459         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1460         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1461
1462         spin_lock(&l_mg->close_lock);
1463         spin_lock(&line->lock);
1464         list_add_tail(&line->list, &l_mg->emeta_list);
1465         spin_unlock(&line->lock);
1466         spin_unlock(&l_mg->close_lock);
1467 }
1468
1469 void pblk_line_close_ws(struct work_struct *work)
1470 {
1471         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1472                                                                         ws);
1473         struct pblk *pblk = line_ws->pblk;
1474         struct pblk_line *line = line_ws->line;
1475
1476         pblk_line_close(pblk, line);
1477         mempool_free(line_ws, pblk->line_ws_pool);
1478 }
1479
1480 void pblk_line_mark_bb(struct work_struct *work)
1481 {
1482         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1483                                                                         ws);
1484         struct pblk *pblk = line_ws->pblk;
1485         struct nvm_tgt_dev *dev = pblk->dev;
1486         struct ppa_addr *ppa = line_ws->priv;
1487         int ret;
1488
1489         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1490         if (ret) {
1491                 struct pblk_line *line;
1492                 int pos;
1493
1494                 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1495                 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1496
1497                 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1498                                 line->id, pos);
1499         }
1500
1501         kfree(ppa);
1502         mempool_free(line_ws, pblk->line_ws_pool);
1503 }
1504
1505 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1506                       void (*work)(struct work_struct *))
1507 {
1508         struct pblk_line_ws *line_ws;
1509
1510         line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1511         if (!line_ws)
1512                 return;
1513
1514         line_ws->pblk = pblk;
1515         line_ws->line = line;
1516         line_ws->priv = priv;
1517
1518         INIT_WORK(&line_ws->ws, work);
1519         queue_work(pblk->kw_wq, &line_ws->ws);
1520 }
1521
1522 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1523                   unsigned long *lun_bitmap)
1524 {
1525         struct nvm_tgt_dev *dev = pblk->dev;
1526         struct nvm_geo *geo = &dev->geo;
1527         struct pblk_lun *rlun;
1528         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1529         int ret;
1530
1531         /*
1532          * Only send one inflight I/O per LUN. Since we map at a page
1533          * granurality, all ppas in the I/O will map to the same LUN
1534          */
1535 #ifdef CONFIG_NVM_DEBUG
1536         int i;
1537
1538         for (i = 1; i < nr_ppas; i++)
1539                 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1540                                 ppa_list[0].g.ch != ppa_list[i].g.ch);
1541 #endif
1542         /* If the LUN has been locked for this same request, do no attempt to
1543          * lock it again
1544          */
1545         if (test_and_set_bit(pos, lun_bitmap))
1546                 return;
1547
1548         rlun = &pblk->luns[pos];
1549         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1550         if (ret) {
1551                 switch (ret) {
1552                 case -ETIME:
1553                         pr_err("pblk: lun semaphore timed out\n");
1554                         break;
1555                 case -EINTR:
1556                         pr_err("pblk: lun semaphore timed out\n");
1557                         break;
1558                 }
1559         }
1560 }
1561
1562 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1563                 unsigned long *lun_bitmap)
1564 {
1565         struct nvm_tgt_dev *dev = pblk->dev;
1566         struct nvm_geo *geo = &dev->geo;
1567         struct pblk_lun *rlun;
1568         int nr_luns = geo->nr_luns;
1569         int bit = -1;
1570
1571         while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1572                 rlun = &pblk->luns[bit];
1573                 up(&rlun->wr_sem);
1574         }
1575
1576         kfree(lun_bitmap);
1577 }
1578
1579 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1580 {
1581         struct ppa_addr l2p_ppa;
1582
1583         /* logic error: lba out-of-bounds. Ignore update */
1584         if (!(lba < pblk->rl.nr_secs)) {
1585                 WARN(1, "pblk: corrupted L2P map request\n");
1586                 return;
1587         }
1588
1589         spin_lock(&pblk->trans_lock);
1590         l2p_ppa = pblk_trans_map_get(pblk, lba);
1591
1592         if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1593                 pblk_map_invalidate(pblk, l2p_ppa);
1594
1595         pblk_trans_map_set(pblk, lba, ppa);
1596         spin_unlock(&pblk->trans_lock);
1597 }
1598
1599 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1600 {
1601 #ifdef CONFIG_NVM_DEBUG
1602         /* Callers must ensure that the ppa points to a cache address */
1603         BUG_ON(!pblk_addr_in_cache(ppa));
1604         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1605 #endif
1606
1607         pblk_update_map(pblk, lba, ppa);
1608 }
1609
1610 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1611                        struct pblk_line *gc_line)
1612 {
1613         struct ppa_addr l2p_ppa;
1614         int ret = 1;
1615
1616 #ifdef CONFIG_NVM_DEBUG
1617         /* Callers must ensure that the ppa points to a cache address */
1618         BUG_ON(!pblk_addr_in_cache(ppa));
1619         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1620 #endif
1621
1622         /* logic error: lba out-of-bounds. Ignore update */
1623         if (!(lba < pblk->rl.nr_secs)) {
1624                 WARN(1, "pblk: corrupted L2P map request\n");
1625                 return 0;
1626         }
1627
1628         spin_lock(&pblk->trans_lock);
1629         l2p_ppa = pblk_trans_map_get(pblk, lba);
1630
1631         /* Prevent updated entries to be overwritten by GC */
1632         if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1633                                 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1634                 ret = 0;
1635                 goto out;
1636         }
1637
1638         pblk_trans_map_set(pblk, lba, ppa);
1639 out:
1640         spin_unlock(&pblk->trans_lock);
1641         return ret;
1642 }
1643
1644 void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1645                          struct ppa_addr entry_line)
1646 {
1647         struct ppa_addr l2p_line;
1648
1649 #ifdef CONFIG_NVM_DEBUG
1650         /* Callers must ensure that the ppa points to a device address */
1651         BUG_ON(pblk_addr_in_cache(ppa));
1652 #endif
1653         /* Invalidate and discard padded entries */
1654         if (lba == ADDR_EMPTY) {
1655 #ifdef CONFIG_NVM_DEBUG
1656                 atomic_long_inc(&pblk->padded_wb);
1657 #endif
1658                 pblk_map_invalidate(pblk, ppa);
1659                 return;
1660         }
1661
1662         /* logic error: lba out-of-bounds. Ignore update */
1663         if (!(lba < pblk->rl.nr_secs)) {
1664                 WARN(1, "pblk: corrupted L2P map request\n");
1665                 return;
1666         }
1667
1668         spin_lock(&pblk->trans_lock);
1669         l2p_line = pblk_trans_map_get(pblk, lba);
1670
1671         /* Do not update L2P if the cacheline has been updated. In this case,
1672          * the mapped ppa must be invalidated
1673          */
1674         if (l2p_line.ppa != entry_line.ppa) {
1675                 if (!pblk_ppa_empty(ppa))
1676                         pblk_map_invalidate(pblk, ppa);
1677                 goto out;
1678         }
1679
1680 #ifdef CONFIG_NVM_DEBUG
1681         WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1682 #endif
1683
1684         pblk_trans_map_set(pblk, lba, ppa);
1685 out:
1686         spin_unlock(&pblk->trans_lock);
1687 }
1688
1689 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1690                          sector_t blba, int nr_secs)
1691 {
1692         int i;
1693
1694         spin_lock(&pblk->trans_lock);
1695         for (i = 0; i < nr_secs; i++)
1696                 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1697         spin_unlock(&pblk->trans_lock);
1698 }
1699
1700 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1701                           u64 *lba_list, int nr_secs)
1702 {
1703         sector_t lba;
1704         int i;
1705
1706         spin_lock(&pblk->trans_lock);
1707         for (i = 0; i < nr_secs; i++) {
1708                 lba = lba_list[i];
1709                 if (lba == ADDR_EMPTY) {
1710                         ppas[i].ppa = ADDR_EMPTY;
1711                 } else {
1712                         /* logic error: lba out-of-bounds. Ignore update */
1713                         if (!(lba < pblk->rl.nr_secs)) {
1714                                 WARN(1, "pblk: corrupted L2P map request\n");
1715                                 continue;
1716                         }
1717                         ppas[i] = pblk_trans_map_get(pblk, lba);
1718                 }
1719         }
1720         spin_unlock(&pblk->trans_lock);
1721 }