]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/pblk-core.c
lightnvm: pblk: fix rqd.error return value in pblk_blk_erase_sync
[linux.git] / drivers / lightnvm / pblk-core.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-core.c - pblk's core functionality
16  *
17  */
18
19 #include "pblk.h"
20
21 static void pblk_line_mark_bb(struct work_struct *work)
22 {
23         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
24                                                                         ws);
25         struct pblk *pblk = line_ws->pblk;
26         struct nvm_tgt_dev *dev = pblk->dev;
27         struct ppa_addr *ppa = line_ws->priv;
28         int ret;
29
30         ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31         if (ret) {
32                 struct pblk_line *line;
33                 int pos;
34
35                 line = &pblk->lines[pblk_ppa_to_line(*ppa)];
36                 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
37
38                 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
39                                 line->id, pos);
40         }
41
42         kfree(ppa);
43         mempool_free(line_ws, &pblk->gen_ws_pool);
44 }
45
46 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47                          struct ppa_addr ppa_addr)
48 {
49         struct nvm_tgt_dev *dev = pblk->dev;
50         struct nvm_geo *geo = &dev->geo;
51         struct ppa_addr *ppa;
52         int pos = pblk_ppa_to_pos(geo, ppa_addr);
53
54         pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
55         atomic_long_inc(&pblk->erase_failed);
56
57         atomic_dec(&line->blk_in_line);
58         if (test_and_set_bit(pos, line->blk_bitmap))
59                 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
60                                                         line->id, pos);
61
62         /* Not necessary to mark bad blocks on 2.0 spec. */
63         if (geo->version == NVM_OCSSD_SPEC_20)
64                 return;
65
66         ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
67         if (!ppa)
68                 return;
69
70         *ppa = ppa_addr;
71         pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
72                                                 GFP_ATOMIC, pblk->bb_wq);
73 }
74
75 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
76 {
77         struct nvm_tgt_dev *dev = pblk->dev;
78         struct nvm_geo *geo = &dev->geo;
79         struct nvm_chk_meta *chunk;
80         struct pblk_line *line;
81         int pos;
82
83         line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
84         pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85         chunk = &line->chks[pos];
86
87         atomic_dec(&line->left_seblks);
88
89         if (rqd->error) {
90                 chunk->state = NVM_CHK_ST_OFFLINE;
91                 pblk_mark_bb(pblk, line, rqd->ppa_addr);
92         } else {
93                 chunk->state = NVM_CHK_ST_FREE;
94         }
95
96         atomic_dec(&pblk->inflight_io);
97 }
98
99 /* Erase completion assumes that only one block is erased at the time */
100 static void pblk_end_io_erase(struct nvm_rq *rqd)
101 {
102         struct pblk *pblk = rqd->private;
103
104         __pblk_end_io_erase(pblk, rqd);
105         mempool_free(rqd, &pblk->e_rq_pool);
106 }
107
108 /*
109  * Get information for all chunks from the device.
110  *
111  * The caller is responsible for freeing the returned structure
112  */
113 struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
114 {
115         struct nvm_tgt_dev *dev = pblk->dev;
116         struct nvm_geo *geo = &dev->geo;
117         struct nvm_chk_meta *meta;
118         struct ppa_addr ppa;
119         unsigned long len;
120         int ret;
121
122         ppa.ppa = 0;
123
124         len = geo->all_chunks * sizeof(*meta);
125         meta = kzalloc(len, GFP_KERNEL);
126         if (!meta)
127                 return ERR_PTR(-ENOMEM);
128
129         ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
130         if (ret) {
131                 kfree(meta);
132                 return ERR_PTR(-EIO);
133         }
134
135         return meta;
136 }
137
138 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139                                               struct nvm_chk_meta *meta,
140                                               struct ppa_addr ppa)
141 {
142         struct nvm_tgt_dev *dev = pblk->dev;
143         struct nvm_geo *geo = &dev->geo;
144         int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145         int lun_off = ppa.m.pu * geo->num_chk;
146         int chk_off = ppa.m.chk;
147
148         return meta + ch_off + lun_off + chk_off;
149 }
150
151 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
152                            u64 paddr)
153 {
154         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
155         struct list_head *move_list = NULL;
156
157         /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158          * table is modified with reclaimed sectors, a check is done to endure
159          * that newer updates are not overwritten.
160          */
161         spin_lock(&line->lock);
162         WARN_ON(line->state == PBLK_LINESTATE_FREE);
163
164         if (test_and_set_bit(paddr, line->invalid_bitmap)) {
165                 WARN_ONCE(1, "pblk: double invalidate\n");
166                 spin_unlock(&line->lock);
167                 return;
168         }
169         le32_add_cpu(line->vsc, -1);
170
171         if (line->state == PBLK_LINESTATE_CLOSED)
172                 move_list = pblk_line_gc_list(pblk, line);
173         spin_unlock(&line->lock);
174
175         if (move_list) {
176                 spin_lock(&l_mg->gc_lock);
177                 spin_lock(&line->lock);
178                 /* Prevent moving a line that has just been chosen for GC */
179                 if (line->state == PBLK_LINESTATE_GC) {
180                         spin_unlock(&line->lock);
181                         spin_unlock(&l_mg->gc_lock);
182                         return;
183                 }
184                 spin_unlock(&line->lock);
185
186                 list_move_tail(&line->list, move_list);
187                 spin_unlock(&l_mg->gc_lock);
188         }
189 }
190
191 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
192 {
193         struct pblk_line *line;
194         u64 paddr;
195         int line_id;
196
197 #ifdef CONFIG_NVM_PBLK_DEBUG
198         /* Callers must ensure that the ppa points to a device address */
199         BUG_ON(pblk_addr_in_cache(ppa));
200         BUG_ON(pblk_ppa_empty(ppa));
201 #endif
202
203         line_id = pblk_ppa_to_line(ppa);
204         line = &pblk->lines[line_id];
205         paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
206
207         __pblk_map_invalidate(pblk, line, paddr);
208 }
209
210 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
211                                   unsigned int nr_secs)
212 {
213         sector_t lba;
214
215         spin_lock(&pblk->trans_lock);
216         for (lba = slba; lba < slba + nr_secs; lba++) {
217                 struct ppa_addr ppa;
218
219                 ppa = pblk_trans_map_get(pblk, lba);
220
221                 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
222                         pblk_map_invalidate(pblk, ppa);
223
224                 pblk_ppa_set_empty(&ppa);
225                 pblk_trans_map_set(pblk, lba, ppa);
226         }
227         spin_unlock(&pblk->trans_lock);
228 }
229
230 /* Caller must guarantee that the request is a valid type */
231 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
232 {
233         mempool_t *pool;
234         struct nvm_rq *rqd;
235         int rq_size;
236
237         switch (type) {
238         case PBLK_WRITE:
239         case PBLK_WRITE_INT:
240                 pool = &pblk->w_rq_pool;
241                 rq_size = pblk_w_rq_size;
242                 break;
243         case PBLK_READ:
244                 pool = &pblk->r_rq_pool;
245                 rq_size = pblk_g_rq_size;
246                 break;
247         default:
248                 pool = &pblk->e_rq_pool;
249                 rq_size = pblk_g_rq_size;
250         }
251
252         rqd = mempool_alloc(pool, GFP_KERNEL);
253         memset(rqd, 0, rq_size);
254
255         return rqd;
256 }
257
258 /* Typically used on completion path. Cannot guarantee request consistency */
259 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
260 {
261         struct nvm_tgt_dev *dev = pblk->dev;
262         mempool_t *pool;
263
264         switch (type) {
265         case PBLK_WRITE:
266                 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
267                 /* fall through */
268         case PBLK_WRITE_INT:
269                 pool = &pblk->w_rq_pool;
270                 break;
271         case PBLK_READ:
272                 pool = &pblk->r_rq_pool;
273                 break;
274         case PBLK_ERASE:
275                 pool = &pblk->e_rq_pool;
276                 break;
277         default:
278                 pblk_err(pblk, "trying to free unknown rqd type\n");
279                 return;
280         }
281
282         if (rqd->meta_list)
283                 nvm_dev_dma_free(dev->parent, rqd->meta_list,
284                                 rqd->dma_meta_list);
285         mempool_free(rqd, pool);
286 }
287
288 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
289                          int nr_pages)
290 {
291         struct bio_vec bv;
292         int i;
293
294         WARN_ON(off + nr_pages != bio->bi_vcnt);
295
296         for (i = off; i < nr_pages + off; i++) {
297                 bv = bio->bi_io_vec[i];
298                 mempool_free(bv.bv_page, &pblk->page_bio_pool);
299         }
300 }
301
302 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
303                        int nr_pages)
304 {
305         struct request_queue *q = pblk->dev->q;
306         struct page *page;
307         int i, ret;
308
309         for (i = 0; i < nr_pages; i++) {
310                 page = mempool_alloc(&pblk->page_bio_pool, flags);
311
312                 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
313                 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
314                         pblk_err(pblk, "could not add page to bio\n");
315                         mempool_free(page, &pblk->page_bio_pool);
316                         goto err;
317                 }
318         }
319
320         return 0;
321 err:
322         pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
323         return -1;
324 }
325
326 void pblk_write_kick(struct pblk *pblk)
327 {
328         wake_up_process(pblk->writer_ts);
329         mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
330 }
331
332 void pblk_write_timer_fn(struct timer_list *t)
333 {
334         struct pblk *pblk = from_timer(pblk, t, wtimer);
335
336         /* kick the write thread every tick to flush outstanding data */
337         pblk_write_kick(pblk);
338 }
339
340 void pblk_write_should_kick(struct pblk *pblk)
341 {
342         unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
343
344         if (secs_avail >= pblk->min_write_pgs)
345                 pblk_write_kick(pblk);
346 }
347
348 static void pblk_wait_for_meta(struct pblk *pblk)
349 {
350         do {
351                 if (!atomic_read(&pblk->inflight_io))
352                         break;
353
354                 schedule();
355         } while (1);
356 }
357
358 static void pblk_flush_writer(struct pblk *pblk)
359 {
360         pblk_rb_flush(&pblk->rwb);
361         do {
362                 if (!pblk_rb_sync_count(&pblk->rwb))
363                         break;
364
365                 pblk_write_kick(pblk);
366                 schedule();
367         } while (1);
368 }
369
370 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
371 {
372         struct pblk_line_meta *lm = &pblk->lm;
373         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
374         struct list_head *move_list = NULL;
375         int vsc = le32_to_cpu(*line->vsc);
376
377         lockdep_assert_held(&line->lock);
378
379         if (line->w_err_gc->has_write_err) {
380                 if (line->gc_group != PBLK_LINEGC_WERR) {
381                         line->gc_group = PBLK_LINEGC_WERR;
382                         move_list = &l_mg->gc_werr_list;
383                         pblk_rl_werr_line_in(&pblk->rl);
384                 }
385         } else if (!vsc) {
386                 if (line->gc_group != PBLK_LINEGC_FULL) {
387                         line->gc_group = PBLK_LINEGC_FULL;
388                         move_list = &l_mg->gc_full_list;
389                 }
390         } else if (vsc < lm->high_thrs) {
391                 if (line->gc_group != PBLK_LINEGC_HIGH) {
392                         line->gc_group = PBLK_LINEGC_HIGH;
393                         move_list = &l_mg->gc_high_list;
394                 }
395         } else if (vsc < lm->mid_thrs) {
396                 if (line->gc_group != PBLK_LINEGC_MID) {
397                         line->gc_group = PBLK_LINEGC_MID;
398                         move_list = &l_mg->gc_mid_list;
399                 }
400         } else if (vsc < line->sec_in_line) {
401                 if (line->gc_group != PBLK_LINEGC_LOW) {
402                         line->gc_group = PBLK_LINEGC_LOW;
403                         move_list = &l_mg->gc_low_list;
404                 }
405         } else if (vsc == line->sec_in_line) {
406                 if (line->gc_group != PBLK_LINEGC_EMPTY) {
407                         line->gc_group = PBLK_LINEGC_EMPTY;
408                         move_list = &l_mg->gc_empty_list;
409                 }
410         } else {
411                 line->state = PBLK_LINESTATE_CORRUPT;
412                 line->gc_group = PBLK_LINEGC_NONE;
413                 move_list =  &l_mg->corrupt_list;
414                 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
415                                                 line->id, vsc,
416                                                 line->sec_in_line,
417                                                 lm->high_thrs, lm->mid_thrs);
418         }
419
420         return move_list;
421 }
422
423 void pblk_discard(struct pblk *pblk, struct bio *bio)
424 {
425         sector_t slba = pblk_get_lba(bio);
426         sector_t nr_secs = pblk_get_secs(bio);
427
428         pblk_invalidate_range(pblk, slba, nr_secs);
429 }
430
431 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
432 {
433         atomic_long_inc(&pblk->write_failed);
434 #ifdef CONFIG_NVM_PBLK_DEBUG
435         pblk_print_failed_rqd(pblk, rqd, rqd->error);
436 #endif
437 }
438
439 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
440 {
441         /* Empty page read is not necessarily an error (e.g., L2P recovery) */
442         if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
443                 atomic_long_inc(&pblk->read_empty);
444                 return;
445         }
446
447         switch (rqd->error) {
448         case NVM_RSP_WARN_HIGHECC:
449                 atomic_long_inc(&pblk->read_high_ecc);
450                 break;
451         case NVM_RSP_ERR_FAILECC:
452         case NVM_RSP_ERR_FAILCRC:
453                 atomic_long_inc(&pblk->read_failed);
454                 break;
455         default:
456                 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
457         }
458 #ifdef CONFIG_NVM_PBLK_DEBUG
459         pblk_print_failed_rqd(pblk, rqd, rqd->error);
460 #endif
461 }
462
463 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
464 {
465         pblk->sec_per_write = sec_per_write;
466 }
467
468 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
469 {
470         struct nvm_tgt_dev *dev = pblk->dev;
471
472         atomic_inc(&pblk->inflight_io);
473
474 #ifdef CONFIG_NVM_PBLK_DEBUG
475         if (pblk_check_io(pblk, rqd))
476                 return NVM_IO_ERR;
477 #endif
478
479         return nvm_submit_io(dev, rqd);
480 }
481
482 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
483 {
484         struct nvm_tgt_dev *dev = pblk->dev;
485
486         atomic_inc(&pblk->inflight_io);
487
488 #ifdef CONFIG_NVM_PBLK_DEBUG
489         if (pblk_check_io(pblk, rqd))
490                 return NVM_IO_ERR;
491 #endif
492
493         return nvm_submit_io_sync(dev, rqd);
494 }
495
496 static void pblk_bio_map_addr_endio(struct bio *bio)
497 {
498         bio_put(bio);
499 }
500
501 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
502                               unsigned int nr_secs, unsigned int len,
503                               int alloc_type, gfp_t gfp_mask)
504 {
505         struct nvm_tgt_dev *dev = pblk->dev;
506         void *kaddr = data;
507         struct page *page;
508         struct bio *bio;
509         int i, ret;
510
511         if (alloc_type == PBLK_KMALLOC_META)
512                 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
513
514         bio = bio_kmalloc(gfp_mask, nr_secs);
515         if (!bio)
516                 return ERR_PTR(-ENOMEM);
517
518         for (i = 0; i < nr_secs; i++) {
519                 page = vmalloc_to_page(kaddr);
520                 if (!page) {
521                         pblk_err(pblk, "could not map vmalloc bio\n");
522                         bio_put(bio);
523                         bio = ERR_PTR(-ENOMEM);
524                         goto out;
525                 }
526
527                 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
528                 if (ret != PAGE_SIZE) {
529                         pblk_err(pblk, "could not add page to bio\n");
530                         bio_put(bio);
531                         bio = ERR_PTR(-ENOMEM);
532                         goto out;
533                 }
534
535                 kaddr += PAGE_SIZE;
536         }
537
538         bio->bi_end_io = pblk_bio_map_addr_endio;
539 out:
540         return bio;
541 }
542
543 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
544                    unsigned long secs_to_flush)
545 {
546         int max = pblk->sec_per_write;
547         int min = pblk->min_write_pgs;
548         int secs_to_sync = 0;
549
550         if (secs_avail >= max)
551                 secs_to_sync = max;
552         else if (secs_avail >= min)
553                 secs_to_sync = min * (secs_avail / min);
554         else if (secs_to_flush)
555                 secs_to_sync = min;
556
557         return secs_to_sync;
558 }
559
560 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
561 {
562         u64 addr;
563         int i;
564
565         spin_lock(&line->lock);
566         addr = find_next_zero_bit(line->map_bitmap,
567                                         pblk->lm.sec_per_line, line->cur_sec);
568         line->cur_sec = addr - nr_secs;
569
570         for (i = 0; i < nr_secs; i++, line->cur_sec--)
571                 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
572         spin_unlock(&line->lock);
573 }
574
575 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
576 {
577         u64 addr;
578         int i;
579
580         lockdep_assert_held(&line->lock);
581
582         /* logic error: ppa out-of-bounds. Prevent generating bad address */
583         if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
584                 WARN(1, "pblk: page allocation out of bounds\n");
585                 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
586         }
587
588         line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
589                                         pblk->lm.sec_per_line, line->cur_sec);
590         for (i = 0; i < nr_secs; i++, line->cur_sec++)
591                 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
592
593         return addr;
594 }
595
596 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
597 {
598         u64 addr;
599
600         /* Lock needed in case a write fails and a recovery needs to remap
601          * failed write buffer entries
602          */
603         spin_lock(&line->lock);
604         addr = __pblk_alloc_page(pblk, line, nr_secs);
605         line->left_msecs -= nr_secs;
606         WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
607         spin_unlock(&line->lock);
608
609         return addr;
610 }
611
612 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
613 {
614         u64 paddr;
615
616         spin_lock(&line->lock);
617         paddr = find_next_zero_bit(line->map_bitmap,
618                                         pblk->lm.sec_per_line, line->cur_sec);
619         spin_unlock(&line->lock);
620
621         return paddr;
622 }
623
624 /*
625  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
626  * taking the per LUN semaphore.
627  */
628 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
629                                      void *emeta_buf, u64 paddr, int dir)
630 {
631         struct nvm_tgt_dev *dev = pblk->dev;
632         struct nvm_geo *geo = &dev->geo;
633         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
634         struct pblk_line_meta *lm = &pblk->lm;
635         void *ppa_list, *meta_list;
636         struct bio *bio;
637         struct nvm_rq rqd;
638         dma_addr_t dma_ppa_list, dma_meta_list;
639         int min = pblk->min_write_pgs;
640         int left_ppas = lm->emeta_sec[0];
641         int id = line->id;
642         int rq_ppas, rq_len;
643         int cmd_op, bio_op;
644         int i, j;
645         int ret;
646
647         if (dir == PBLK_WRITE) {
648                 bio_op = REQ_OP_WRITE;
649                 cmd_op = NVM_OP_PWRITE;
650         } else if (dir == PBLK_READ) {
651                 bio_op = REQ_OP_READ;
652                 cmd_op = NVM_OP_PREAD;
653         } else
654                 return -EINVAL;
655
656         meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
657                                                         &dma_meta_list);
658         if (!meta_list)
659                 return -ENOMEM;
660
661         ppa_list = meta_list + pblk_dma_meta_size;
662         dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
663
664 next_rq:
665         memset(&rqd, 0, sizeof(struct nvm_rq));
666
667         rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
668         rq_len = rq_ppas * geo->csecs;
669
670         bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
671                                         l_mg->emeta_alloc_type, GFP_KERNEL);
672         if (IS_ERR(bio)) {
673                 ret = PTR_ERR(bio);
674                 goto free_rqd_dma;
675         }
676
677         bio->bi_iter.bi_sector = 0; /* internal bio */
678         bio_set_op_attrs(bio, bio_op, 0);
679
680         rqd.bio = bio;
681         rqd.meta_list = meta_list;
682         rqd.ppa_list = ppa_list;
683         rqd.dma_meta_list = dma_meta_list;
684         rqd.dma_ppa_list = dma_ppa_list;
685         rqd.opcode = cmd_op;
686         rqd.nr_ppas = rq_ppas;
687
688         if (dir == PBLK_WRITE) {
689                 struct pblk_sec_meta *meta_list = rqd.meta_list;
690
691                 rqd.is_seq = 1;
692                 for (i = 0; i < rqd.nr_ppas; ) {
693                         spin_lock(&line->lock);
694                         paddr = __pblk_alloc_page(pblk, line, min);
695                         spin_unlock(&line->lock);
696                         for (j = 0; j < min; j++, i++, paddr++) {
697                                 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
698                                 rqd.ppa_list[i] =
699                                         addr_to_gen_ppa(pblk, paddr, id);
700                         }
701                 }
702         } else {
703                 for (i = 0; i < rqd.nr_ppas; ) {
704                         struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
705                         int pos = pblk_ppa_to_pos(geo, ppa);
706
707                         if (pblk_io_aligned(pblk, rq_ppas))
708                                 rqd.is_seq = 1;
709
710                         while (test_bit(pos, line->blk_bitmap)) {
711                                 paddr += min;
712                                 if (pblk_boundary_paddr_checks(pblk, paddr)) {
713                                         pblk_err(pblk, "corrupt emeta line:%d\n",
714                                                                 line->id);
715                                         bio_put(bio);
716                                         ret = -EINTR;
717                                         goto free_rqd_dma;
718                                 }
719
720                                 ppa = addr_to_gen_ppa(pblk, paddr, id);
721                                 pos = pblk_ppa_to_pos(geo, ppa);
722                         }
723
724                         if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
725                                 pblk_err(pblk, "corrupt emeta line:%d\n",
726                                                                 line->id);
727                                 bio_put(bio);
728                                 ret = -EINTR;
729                                 goto free_rqd_dma;
730                         }
731
732                         for (j = 0; j < min; j++, i++, paddr++)
733                                 rqd.ppa_list[i] =
734                                         addr_to_gen_ppa(pblk, paddr, line->id);
735                 }
736         }
737
738         ret = pblk_submit_io_sync(pblk, &rqd);
739         if (ret) {
740                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
741                 bio_put(bio);
742                 goto free_rqd_dma;
743         }
744
745         atomic_dec(&pblk->inflight_io);
746
747         if (rqd.error) {
748                 if (dir == PBLK_WRITE)
749                         pblk_log_write_err(pblk, &rqd);
750                 else
751                         pblk_log_read_err(pblk, &rqd);
752         }
753
754         emeta_buf += rq_len;
755         left_ppas -= rq_ppas;
756         if (left_ppas)
757                 goto next_rq;
758 free_rqd_dma:
759         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
760         return ret;
761 }
762
763 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
764 {
765         struct nvm_tgt_dev *dev = pblk->dev;
766         struct nvm_geo *geo = &dev->geo;
767         struct pblk_line_meta *lm = &pblk->lm;
768         int bit;
769
770         /* This usually only happens on bad lines */
771         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
772         if (bit >= lm->blk_per_line)
773                 return -1;
774
775         return bit * geo->ws_opt;
776 }
777
778 static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
779                                      u64 paddr, int dir)
780 {
781         struct nvm_tgt_dev *dev = pblk->dev;
782         struct pblk_line_meta *lm = &pblk->lm;
783         struct bio *bio;
784         struct nvm_rq rqd;
785         __le64 *lba_list = NULL;
786         int i, ret;
787         int cmd_op, bio_op;
788
789         if (dir == PBLK_WRITE) {
790                 bio_op = REQ_OP_WRITE;
791                 cmd_op = NVM_OP_PWRITE;
792                 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
793         } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
794                 bio_op = REQ_OP_READ;
795                 cmd_op = NVM_OP_PREAD;
796         } else
797                 return -EINVAL;
798
799         memset(&rqd, 0, sizeof(struct nvm_rq));
800
801         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
802                                                         &rqd.dma_meta_list);
803         if (!rqd.meta_list)
804                 return -ENOMEM;
805
806         rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
807         rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
808
809         bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
810         if (IS_ERR(bio)) {
811                 ret = PTR_ERR(bio);
812                 goto free_ppa_list;
813         }
814
815         bio->bi_iter.bi_sector = 0; /* internal bio */
816         bio_set_op_attrs(bio, bio_op, 0);
817
818         rqd.bio = bio;
819         rqd.opcode = cmd_op;
820         rqd.is_seq = 1;
821         rqd.nr_ppas = lm->smeta_sec;
822
823         for (i = 0; i < lm->smeta_sec; i++, paddr++) {
824                 struct pblk_sec_meta *meta_list = rqd.meta_list;
825
826                 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
827
828                 if (dir == PBLK_WRITE) {
829                         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
830
831                         meta_list[i].lba = lba_list[paddr] = addr_empty;
832                 }
833         }
834
835         /*
836          * This I/O is sent by the write thread when a line is replace. Since
837          * the write thread is the only one sending write and erase commands,
838          * there is no need to take the LUN semaphore.
839          */
840         ret = pblk_submit_io_sync(pblk, &rqd);
841         if (ret) {
842                 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
843                 bio_put(bio);
844                 goto free_ppa_list;
845         }
846
847         atomic_dec(&pblk->inflight_io);
848
849         if (rqd.error) {
850                 if (dir == PBLK_WRITE) {
851                         pblk_log_write_err(pblk, &rqd);
852                         ret = 1;
853                 } else if (dir == PBLK_READ)
854                         pblk_log_read_err(pblk, &rqd);
855         }
856
857 free_ppa_list:
858         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
859
860         return ret;
861 }
862
863 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
864 {
865         u64 bpaddr = pblk_line_smeta_start(pblk, line);
866
867         return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
868 }
869
870 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
871                          void *emeta_buf)
872 {
873         return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
874                                                 line->emeta_ssec, PBLK_READ);
875 }
876
877 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
878                             struct ppa_addr ppa)
879 {
880         rqd->opcode = NVM_OP_ERASE;
881         rqd->ppa_addr = ppa;
882         rqd->nr_ppas = 1;
883         rqd->is_seq = 1;
884         rqd->bio = NULL;
885 }
886
887 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
888 {
889         struct nvm_rq rqd = {NULL};
890         int ret;
891
892         pblk_setup_e_rq(pblk, &rqd, ppa);
893
894         /* The write thread schedules erases so that it minimizes disturbances
895          * with writes. Thus, there is no need to take the LUN semaphore.
896          */
897         ret = pblk_submit_io_sync(pblk, &rqd);
898         rqd.private = pblk;
899         __pblk_end_io_erase(pblk, &rqd);
900
901         return ret;
902 }
903
904 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
905 {
906         struct pblk_line_meta *lm = &pblk->lm;
907         struct ppa_addr ppa;
908         int ret, bit = -1;
909
910         /* Erase only good blocks, one at a time */
911         do {
912                 spin_lock(&line->lock);
913                 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
914                                                                 bit + 1);
915                 if (bit >= lm->blk_per_line) {
916                         spin_unlock(&line->lock);
917                         break;
918                 }
919
920                 ppa = pblk->luns[bit].bppa; /* set ch and lun */
921                 ppa.a.blk = line->id;
922
923                 atomic_dec(&line->left_eblks);
924                 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
925                 spin_unlock(&line->lock);
926
927                 ret = pblk_blk_erase_sync(pblk, ppa);
928                 if (ret) {
929                         pblk_err(pblk, "failed to erase line %d\n", line->id);
930                         return ret;
931                 }
932         } while (1);
933
934         return 0;
935 }
936
937 static void pblk_line_setup_metadata(struct pblk_line *line,
938                                      struct pblk_line_mgmt *l_mg,
939                                      struct pblk_line_meta *lm)
940 {
941         int meta_line;
942
943         lockdep_assert_held(&l_mg->free_lock);
944
945 retry_meta:
946         meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
947         if (meta_line == PBLK_DATA_LINES) {
948                 spin_unlock(&l_mg->free_lock);
949                 io_schedule();
950                 spin_lock(&l_mg->free_lock);
951                 goto retry_meta;
952         }
953
954         set_bit(meta_line, &l_mg->meta_bitmap);
955         line->meta_line = meta_line;
956
957         line->smeta = l_mg->sline_meta[meta_line];
958         line->emeta = l_mg->eline_meta[meta_line];
959
960         memset(line->smeta, 0, lm->smeta_len);
961         memset(line->emeta->buf, 0, lm->emeta_len[0]);
962
963         line->emeta->mem = 0;
964         atomic_set(&line->emeta->sync, 0);
965 }
966
967 /* For now lines are always assumed full lines. Thus, smeta former and current
968  * lun bitmaps are omitted.
969  */
970 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
971                                   struct pblk_line *cur)
972 {
973         struct nvm_tgt_dev *dev = pblk->dev;
974         struct nvm_geo *geo = &dev->geo;
975         struct pblk_line_meta *lm = &pblk->lm;
976         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
977         struct pblk_emeta *emeta = line->emeta;
978         struct line_emeta *emeta_buf = emeta->buf;
979         struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
980         int nr_blk_line;
981
982         /* After erasing the line, new bad blocks might appear and we risk
983          * having an invalid line
984          */
985         nr_blk_line = lm->blk_per_line -
986                         bitmap_weight(line->blk_bitmap, lm->blk_per_line);
987         if (nr_blk_line < lm->min_blk_line) {
988                 spin_lock(&l_mg->free_lock);
989                 spin_lock(&line->lock);
990                 line->state = PBLK_LINESTATE_BAD;
991                 spin_unlock(&line->lock);
992
993                 list_add_tail(&line->list, &l_mg->bad_list);
994                 spin_unlock(&l_mg->free_lock);
995
996                 pblk_debug(pblk, "line %d is bad\n", line->id);
997
998                 return 0;
999         }
1000
1001         /* Run-time metadata */
1002         line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1003
1004         /* Mark LUNs allocated in this line (all for now) */
1005         bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1006
1007         smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1008         memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1009         smeta_buf->header.id = cpu_to_le32(line->id);
1010         smeta_buf->header.type = cpu_to_le16(line->type);
1011         smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1012         smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1013
1014         /* Start metadata */
1015         smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1016         smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1017
1018         /* Fill metadata among lines */
1019         if (cur) {
1020                 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1021                 smeta_buf->prev_id = cpu_to_le32(cur->id);
1022                 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1023         } else {
1024                 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1025         }
1026
1027         /* All smeta must be set at this point */
1028         smeta_buf->header.crc = cpu_to_le32(
1029                         pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1030         smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1031
1032         /* End metadata */
1033         memcpy(&emeta_buf->header, &smeta_buf->header,
1034                                                 sizeof(struct line_header));
1035
1036         emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1037         emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1038         emeta_buf->header.crc = cpu_to_le32(
1039                         pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1040
1041         emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1042         emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1043         emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1044         emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1045         emeta_buf->crc = cpu_to_le32(0);
1046         emeta_buf->prev_id = smeta_buf->prev_id;
1047
1048         return 1;
1049 }
1050
1051 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1052 {
1053         struct pblk_line_meta *lm = &pblk->lm;
1054
1055         line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
1056         if (!line->map_bitmap)
1057                 return -ENOMEM;
1058
1059         /* will be initialized using bb info from map_bitmap */
1060         line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
1061         if (!line->invalid_bitmap) {
1062                 kfree(line->map_bitmap);
1063                 line->map_bitmap = NULL;
1064                 return -ENOMEM;
1065         }
1066
1067         return 0;
1068 }
1069
1070 /* For now lines are always assumed full lines. Thus, smeta former and current
1071  * lun bitmaps are omitted.
1072  */
1073 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1074                              int init)
1075 {
1076         struct nvm_tgt_dev *dev = pblk->dev;
1077         struct nvm_geo *geo = &dev->geo;
1078         struct pblk_line_meta *lm = &pblk->lm;
1079         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1080         u64 off;
1081         int bit = -1;
1082         int emeta_secs;
1083
1084         line->sec_in_line = lm->sec_per_line;
1085
1086         /* Capture bad block information on line mapping bitmaps */
1087         while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1088                                         bit + 1)) < lm->blk_per_line) {
1089                 off = bit * geo->ws_opt;
1090                 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1091                                                         lm->sec_per_line);
1092                 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1093                                                         lm->sec_per_line);
1094                 line->sec_in_line -= geo->clba;
1095         }
1096
1097         /* Mark smeta metadata sectors as bad sectors */
1098         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1099         off = bit * geo->ws_opt;
1100         bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1101         line->sec_in_line -= lm->smeta_sec;
1102         line->smeta_ssec = off;
1103         line->cur_sec = off + lm->smeta_sec;
1104
1105         if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1106                 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1107                 return 0;
1108         }
1109
1110         bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1111
1112         /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1113          * blocks to make sure that there are enough sectors to store emeta
1114          */
1115         emeta_secs = lm->emeta_sec[0];
1116         off = lm->sec_per_line;
1117         while (emeta_secs) {
1118                 off -= geo->ws_opt;
1119                 if (!test_bit(off, line->invalid_bitmap)) {
1120                         bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1121                         emeta_secs -= geo->ws_opt;
1122                 }
1123         }
1124
1125         line->emeta_ssec = off;
1126         line->sec_in_line -= lm->emeta_sec[0];
1127         line->nr_valid_lbas = 0;
1128         line->left_msecs = line->sec_in_line;
1129         *line->vsc = cpu_to_le32(line->sec_in_line);
1130
1131         if (lm->sec_per_line - line->sec_in_line !=
1132                 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1133                 spin_lock(&line->lock);
1134                 line->state = PBLK_LINESTATE_BAD;
1135                 spin_unlock(&line->lock);
1136
1137                 list_add_tail(&line->list, &l_mg->bad_list);
1138                 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1139
1140                 return 0;
1141         }
1142
1143         return 1;
1144 }
1145
1146 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1147 {
1148         struct pblk_line_meta *lm = &pblk->lm;
1149         struct nvm_tgt_dev *dev = pblk->dev;
1150         struct nvm_geo *geo = &dev->geo;
1151         int blk_to_erase = atomic_read(&line->blk_in_line);
1152         int i;
1153
1154         for (i = 0; i < lm->blk_per_line; i++) {
1155                 struct pblk_lun *rlun = &pblk->luns[i];
1156                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1157                 int state = line->chks[pos].state;
1158
1159                 /* Free chunks should not be erased */
1160                 if (state & NVM_CHK_ST_FREE) {
1161                         set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1162                                                         line->erase_bitmap);
1163                         blk_to_erase--;
1164                 }
1165         }
1166
1167         return blk_to_erase;
1168 }
1169
1170 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1171 {
1172         struct pblk_line_meta *lm = &pblk->lm;
1173         int blk_in_line = atomic_read(&line->blk_in_line);
1174         int blk_to_erase;
1175
1176         /* Bad blocks do not need to be erased */
1177         bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1178
1179         spin_lock(&line->lock);
1180
1181         /* If we have not written to this line, we need to mark up free chunks
1182          * as already erased
1183          */
1184         if (line->state == PBLK_LINESTATE_NEW) {
1185                 blk_to_erase = pblk_prepare_new_line(pblk, line);
1186                 line->state = PBLK_LINESTATE_FREE;
1187         } else {
1188                 blk_to_erase = blk_in_line;
1189         }
1190
1191         if (blk_in_line < lm->min_blk_line) {
1192                 spin_unlock(&line->lock);
1193                 return -EAGAIN;
1194         }
1195
1196         if (line->state != PBLK_LINESTATE_FREE) {
1197                 WARN(1, "pblk: corrupted line %d, state %d\n",
1198                                                         line->id, line->state);
1199                 spin_unlock(&line->lock);
1200                 return -EINTR;
1201         }
1202
1203         line->state = PBLK_LINESTATE_OPEN;
1204
1205         atomic_set(&line->left_eblks, blk_to_erase);
1206         atomic_set(&line->left_seblks, blk_to_erase);
1207
1208         line->meta_distance = lm->meta_distance;
1209         spin_unlock(&line->lock);
1210
1211         kref_init(&line->ref);
1212
1213         return 0;
1214 }
1215
1216 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1217 {
1218         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1219         int ret;
1220
1221         spin_lock(&l_mg->free_lock);
1222         l_mg->data_line = line;
1223         list_del(&line->list);
1224
1225         ret = pblk_line_prepare(pblk, line);
1226         if (ret) {
1227                 list_add(&line->list, &l_mg->free_list);
1228                 spin_unlock(&l_mg->free_lock);
1229                 return ret;
1230         }
1231         spin_unlock(&l_mg->free_lock);
1232
1233         ret = pblk_line_alloc_bitmaps(pblk, line);
1234         if (ret)
1235                 return ret;
1236
1237         if (!pblk_line_init_bb(pblk, line, 0)) {
1238                 list_add(&line->list, &l_mg->free_list);
1239                 return -EINTR;
1240         }
1241
1242         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1243         return 0;
1244 }
1245
1246 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1247 {
1248         kfree(line->map_bitmap);
1249         line->map_bitmap = NULL;
1250         line->smeta = NULL;
1251         line->emeta = NULL;
1252 }
1253
1254 static void pblk_line_reinit(struct pblk_line *line)
1255 {
1256         *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1257
1258         line->map_bitmap = NULL;
1259         line->invalid_bitmap = NULL;
1260         line->smeta = NULL;
1261         line->emeta = NULL;
1262 }
1263
1264 void pblk_line_free(struct pblk_line *line)
1265 {
1266         kfree(line->map_bitmap);
1267         kfree(line->invalid_bitmap);
1268
1269         pblk_line_reinit(line);
1270 }
1271
1272 struct pblk_line *pblk_line_get(struct pblk *pblk)
1273 {
1274         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1275         struct pblk_line_meta *lm = &pblk->lm;
1276         struct pblk_line *line;
1277         int ret, bit;
1278
1279         lockdep_assert_held(&l_mg->free_lock);
1280
1281 retry:
1282         if (list_empty(&l_mg->free_list)) {
1283                 pblk_err(pblk, "no free lines\n");
1284                 return NULL;
1285         }
1286
1287         line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1288         list_del(&line->list);
1289         l_mg->nr_free_lines--;
1290
1291         bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1292         if (unlikely(bit >= lm->blk_per_line)) {
1293                 spin_lock(&line->lock);
1294                 line->state = PBLK_LINESTATE_BAD;
1295                 spin_unlock(&line->lock);
1296
1297                 list_add_tail(&line->list, &l_mg->bad_list);
1298
1299                 pblk_debug(pblk, "line %d is bad\n", line->id);
1300                 goto retry;
1301         }
1302
1303         ret = pblk_line_prepare(pblk, line);
1304         if (ret) {
1305                 switch (ret) {
1306                 case -EAGAIN:
1307                         list_add(&line->list, &l_mg->bad_list);
1308                         goto retry;
1309                 case -EINTR:
1310                         list_add(&line->list, &l_mg->corrupt_list);
1311                         goto retry;
1312                 default:
1313                         pblk_err(pblk, "failed to prepare line %d\n", line->id);
1314                         list_add(&line->list, &l_mg->free_list);
1315                         l_mg->nr_free_lines++;
1316                         return NULL;
1317                 }
1318         }
1319
1320         return line;
1321 }
1322
1323 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1324                                          struct pblk_line *line)
1325 {
1326         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1327         struct pblk_line *retry_line;
1328
1329 retry:
1330         spin_lock(&l_mg->free_lock);
1331         retry_line = pblk_line_get(pblk);
1332         if (!retry_line) {
1333                 l_mg->data_line = NULL;
1334                 spin_unlock(&l_mg->free_lock);
1335                 return NULL;
1336         }
1337
1338         retry_line->map_bitmap = line->map_bitmap;
1339         retry_line->invalid_bitmap = line->invalid_bitmap;
1340         retry_line->smeta = line->smeta;
1341         retry_line->emeta = line->emeta;
1342         retry_line->meta_line = line->meta_line;
1343
1344         pblk_line_reinit(line);
1345
1346         l_mg->data_line = retry_line;
1347         spin_unlock(&l_mg->free_lock);
1348
1349         pblk_rl_free_lines_dec(&pblk->rl, line, false);
1350
1351         if (pblk_line_erase(pblk, retry_line))
1352                 goto retry;
1353
1354         return retry_line;
1355 }
1356
1357 static void pblk_set_space_limit(struct pblk *pblk)
1358 {
1359         struct pblk_rl *rl = &pblk->rl;
1360
1361         atomic_set(&rl->rb_space, 0);
1362 }
1363
1364 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1365 {
1366         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1367         struct pblk_line *line;
1368
1369         spin_lock(&l_mg->free_lock);
1370         line = pblk_line_get(pblk);
1371         if (!line) {
1372                 spin_unlock(&l_mg->free_lock);
1373                 return NULL;
1374         }
1375
1376         line->seq_nr = l_mg->d_seq_nr++;
1377         line->type = PBLK_LINETYPE_DATA;
1378         l_mg->data_line = line;
1379
1380         pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1381
1382         /* Allocate next line for preparation */
1383         l_mg->data_next = pblk_line_get(pblk);
1384         if (!l_mg->data_next) {
1385                 /* If we cannot get a new line, we need to stop the pipeline.
1386                  * Only allow as many writes in as we can store safely and then
1387                  * fail gracefully
1388                  */
1389                 pblk_set_space_limit(pblk);
1390
1391                 l_mg->data_next = NULL;
1392         } else {
1393                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1394                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1395         }
1396         spin_unlock(&l_mg->free_lock);
1397
1398         if (pblk_line_alloc_bitmaps(pblk, line))
1399                 return NULL;
1400
1401         if (pblk_line_erase(pblk, line)) {
1402                 line = pblk_line_retry(pblk, line);
1403                 if (!line)
1404                         return NULL;
1405         }
1406
1407 retry_setup:
1408         if (!pblk_line_init_metadata(pblk, line, NULL)) {
1409                 line = pblk_line_retry(pblk, line);
1410                 if (!line)
1411                         return NULL;
1412
1413                 goto retry_setup;
1414         }
1415
1416         if (!pblk_line_init_bb(pblk, line, 1)) {
1417                 line = pblk_line_retry(pblk, line);
1418                 if (!line)
1419                         return NULL;
1420
1421                 goto retry_setup;
1422         }
1423
1424         pblk_rl_free_lines_dec(&pblk->rl, line, true);
1425
1426         return line;
1427 }
1428
1429 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1430 {
1431         lockdep_assert_held(&pblk->l_mg.free_lock);
1432
1433         pblk_set_space_limit(pblk);
1434         pblk->state = PBLK_STATE_STOPPING;
1435 }
1436
1437 static void pblk_line_close_meta_sync(struct pblk *pblk)
1438 {
1439         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1440         struct pblk_line_meta *lm = &pblk->lm;
1441         struct pblk_line *line, *tline;
1442         LIST_HEAD(list);
1443
1444         spin_lock(&l_mg->close_lock);
1445         if (list_empty(&l_mg->emeta_list)) {
1446                 spin_unlock(&l_mg->close_lock);
1447                 return;
1448         }
1449
1450         list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1451         spin_unlock(&l_mg->close_lock);
1452
1453         list_for_each_entry_safe(line, tline, &list, list) {
1454                 struct pblk_emeta *emeta = line->emeta;
1455
1456                 while (emeta->mem < lm->emeta_len[0]) {
1457                         int ret;
1458
1459                         ret = pblk_submit_meta_io(pblk, line);
1460                         if (ret) {
1461                                 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1462                                                         line->id, ret);
1463                                 return;
1464                         }
1465                 }
1466         }
1467
1468         pblk_wait_for_meta(pblk);
1469         flush_workqueue(pblk->close_wq);
1470 }
1471
1472 void __pblk_pipeline_flush(struct pblk *pblk)
1473 {
1474         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1475         int ret;
1476
1477         spin_lock(&l_mg->free_lock);
1478         if (pblk->state == PBLK_STATE_RECOVERING ||
1479                                         pblk->state == PBLK_STATE_STOPPED) {
1480                 spin_unlock(&l_mg->free_lock);
1481                 return;
1482         }
1483         pblk->state = PBLK_STATE_RECOVERING;
1484         spin_unlock(&l_mg->free_lock);
1485
1486         pblk_flush_writer(pblk);
1487         pblk_wait_for_meta(pblk);
1488
1489         ret = pblk_recov_pad(pblk);
1490         if (ret) {
1491                 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1492                 return;
1493         }
1494
1495         flush_workqueue(pblk->bb_wq);
1496         pblk_line_close_meta_sync(pblk);
1497 }
1498
1499 void __pblk_pipeline_stop(struct pblk *pblk)
1500 {
1501         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1502
1503         spin_lock(&l_mg->free_lock);
1504         pblk->state = PBLK_STATE_STOPPED;
1505         l_mg->data_line = NULL;
1506         l_mg->data_next = NULL;
1507         spin_unlock(&l_mg->free_lock);
1508 }
1509
1510 void pblk_pipeline_stop(struct pblk *pblk)
1511 {
1512         __pblk_pipeline_flush(pblk);
1513         __pblk_pipeline_stop(pblk);
1514 }
1515
1516 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1517 {
1518         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1519         struct pblk_line *cur, *new = NULL;
1520         unsigned int left_seblks;
1521
1522         cur = l_mg->data_line;
1523         new = l_mg->data_next;
1524         if (!new)
1525                 goto out;
1526         l_mg->data_line = new;
1527
1528         spin_lock(&l_mg->free_lock);
1529         pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1530         spin_unlock(&l_mg->free_lock);
1531
1532 retry_erase:
1533         left_seblks = atomic_read(&new->left_seblks);
1534         if (left_seblks) {
1535                 /* If line is not fully erased, erase it */
1536                 if (atomic_read(&new->left_eblks)) {
1537                         if (pblk_line_erase(pblk, new))
1538                                 goto out;
1539                 } else {
1540                         io_schedule();
1541                 }
1542                 goto retry_erase;
1543         }
1544
1545         if (pblk_line_alloc_bitmaps(pblk, new))
1546                 return NULL;
1547
1548 retry_setup:
1549         if (!pblk_line_init_metadata(pblk, new, cur)) {
1550                 new = pblk_line_retry(pblk, new);
1551                 if (!new)
1552                         goto out;
1553
1554                 goto retry_setup;
1555         }
1556
1557         if (!pblk_line_init_bb(pblk, new, 1)) {
1558                 new = pblk_line_retry(pblk, new);
1559                 if (!new)
1560                         goto out;
1561
1562                 goto retry_setup;
1563         }
1564
1565         pblk_rl_free_lines_dec(&pblk->rl, new, true);
1566
1567         /* Allocate next line for preparation */
1568         spin_lock(&l_mg->free_lock);
1569         l_mg->data_next = pblk_line_get(pblk);
1570         if (!l_mg->data_next) {
1571                 /* If we cannot get a new line, we need to stop the pipeline.
1572                  * Only allow as many writes in as we can store safely and then
1573                  * fail gracefully
1574                  */
1575                 pblk_stop_writes(pblk, new);
1576                 l_mg->data_next = NULL;
1577         } else {
1578                 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1579                 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1580         }
1581         spin_unlock(&l_mg->free_lock);
1582
1583 out:
1584         return new;
1585 }
1586
1587 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1588 {
1589         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1590         struct pblk_gc *gc = &pblk->gc;
1591
1592         spin_lock(&line->lock);
1593         WARN_ON(line->state != PBLK_LINESTATE_GC);
1594         line->state = PBLK_LINESTATE_FREE;
1595         line->gc_group = PBLK_LINEGC_NONE;
1596         pblk_line_free(line);
1597
1598         if (line->w_err_gc->has_write_err) {
1599                 pblk_rl_werr_line_out(&pblk->rl);
1600                 line->w_err_gc->has_write_err = 0;
1601         }
1602
1603         spin_unlock(&line->lock);
1604         atomic_dec(&gc->pipeline_gc);
1605
1606         spin_lock(&l_mg->free_lock);
1607         list_add_tail(&line->list, &l_mg->free_list);
1608         l_mg->nr_free_lines++;
1609         spin_unlock(&l_mg->free_lock);
1610
1611         pblk_rl_free_lines_inc(&pblk->rl, line);
1612 }
1613
1614 static void pblk_line_put_ws(struct work_struct *work)
1615 {
1616         struct pblk_line_ws *line_put_ws = container_of(work,
1617                                                 struct pblk_line_ws, ws);
1618         struct pblk *pblk = line_put_ws->pblk;
1619         struct pblk_line *line = line_put_ws->line;
1620
1621         __pblk_line_put(pblk, line);
1622         mempool_free(line_put_ws, &pblk->gen_ws_pool);
1623 }
1624
1625 void pblk_line_put(struct kref *ref)
1626 {
1627         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1628         struct pblk *pblk = line->pblk;
1629
1630         __pblk_line_put(pblk, line);
1631 }
1632
1633 void pblk_line_put_wq(struct kref *ref)
1634 {
1635         struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1636         struct pblk *pblk = line->pblk;
1637         struct pblk_line_ws *line_put_ws;
1638
1639         line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1640         if (!line_put_ws)
1641                 return;
1642
1643         line_put_ws->pblk = pblk;
1644         line_put_ws->line = line;
1645         line_put_ws->priv = NULL;
1646
1647         INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1648         queue_work(pblk->r_end_wq, &line_put_ws->ws);
1649 }
1650
1651 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1652 {
1653         struct nvm_rq *rqd;
1654         int err;
1655
1656         rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1657
1658         pblk_setup_e_rq(pblk, rqd, ppa);
1659
1660         rqd->end_io = pblk_end_io_erase;
1661         rqd->private = pblk;
1662
1663         /* The write thread schedules erases so that it minimizes disturbances
1664          * with writes. Thus, there is no need to take the LUN semaphore.
1665          */
1666         err = pblk_submit_io(pblk, rqd);
1667         if (err) {
1668                 struct nvm_tgt_dev *dev = pblk->dev;
1669                 struct nvm_geo *geo = &dev->geo;
1670
1671                 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1672                                         pblk_ppa_to_line(ppa),
1673                                         pblk_ppa_to_pos(geo, ppa));
1674         }
1675
1676         return err;
1677 }
1678
1679 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1680 {
1681         return pblk->l_mg.data_line;
1682 }
1683
1684 /* For now, always erase next line */
1685 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1686 {
1687         return pblk->l_mg.data_next;
1688 }
1689
1690 int pblk_line_is_full(struct pblk_line *line)
1691 {
1692         return (line->left_msecs == 0);
1693 }
1694
1695 static void pblk_line_should_sync_meta(struct pblk *pblk)
1696 {
1697         if (pblk_rl_is_limit(&pblk->rl))
1698                 pblk_line_close_meta_sync(pblk);
1699 }
1700
1701 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1702 {
1703         struct nvm_tgt_dev *dev = pblk->dev;
1704         struct nvm_geo *geo = &dev->geo;
1705         struct pblk_line_meta *lm = &pblk->lm;
1706         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1707         struct list_head *move_list;
1708         int i;
1709
1710 #ifdef CONFIG_NVM_PBLK_DEBUG
1711         WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1712                                 "pblk: corrupt closed line %d\n", line->id);
1713 #endif
1714
1715         spin_lock(&l_mg->free_lock);
1716         WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1717         spin_unlock(&l_mg->free_lock);
1718
1719         spin_lock(&l_mg->gc_lock);
1720         spin_lock(&line->lock);
1721         WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1722         line->state = PBLK_LINESTATE_CLOSED;
1723         move_list = pblk_line_gc_list(pblk, line);
1724
1725         list_add_tail(&line->list, move_list);
1726
1727         kfree(line->map_bitmap);
1728         line->map_bitmap = NULL;
1729         line->smeta = NULL;
1730         line->emeta = NULL;
1731
1732         for (i = 0; i < lm->blk_per_line; i++) {
1733                 struct pblk_lun *rlun = &pblk->luns[i];
1734                 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1735                 int state = line->chks[pos].state;
1736
1737                 if (!(state & NVM_CHK_ST_OFFLINE))
1738                         state = NVM_CHK_ST_CLOSED;
1739         }
1740
1741         spin_unlock(&line->lock);
1742         spin_unlock(&l_mg->gc_lock);
1743 }
1744
1745 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1746 {
1747         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1748         struct pblk_line_meta *lm = &pblk->lm;
1749         struct pblk_emeta *emeta = line->emeta;
1750         struct line_emeta *emeta_buf = emeta->buf;
1751         struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1752
1753         /* No need for exact vsc value; avoid a big line lock and take aprox. */
1754         memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1755         memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1756
1757         wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1758         wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1759         wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1760
1761         emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1762         emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1763
1764         spin_lock(&l_mg->close_lock);
1765         spin_lock(&line->lock);
1766
1767         /* Update the in-memory start address for emeta, in case it has
1768          * shifted due to write errors
1769          */
1770         if (line->emeta_ssec != line->cur_sec)
1771                 line->emeta_ssec = line->cur_sec;
1772
1773         list_add_tail(&line->list, &l_mg->emeta_list);
1774         spin_unlock(&line->lock);
1775         spin_unlock(&l_mg->close_lock);
1776
1777         pblk_line_should_sync_meta(pblk);
1778
1779
1780 }
1781
1782 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1783 {
1784         struct pblk_line_meta *lm = &pblk->lm;
1785         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1786         unsigned int lba_list_size = lm->emeta_len[2];
1787         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1788         struct pblk_emeta *emeta = line->emeta;
1789
1790         w_err_gc->lba_list = pblk_malloc(lba_list_size,
1791                                          l_mg->emeta_alloc_type, GFP_KERNEL);
1792         memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1793                                 lba_list_size);
1794 }
1795
1796 void pblk_line_close_ws(struct work_struct *work)
1797 {
1798         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1799                                                                         ws);
1800         struct pblk *pblk = line_ws->pblk;
1801         struct pblk_line *line = line_ws->line;
1802         struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1803
1804         /* Write errors makes the emeta start address stored in smeta invalid,
1805          * so keep a copy of the lba list until we've gc'd the line
1806          */
1807         if (w_err_gc->has_write_err)
1808                 pblk_save_lba_list(pblk, line);
1809
1810         pblk_line_close(pblk, line);
1811         mempool_free(line_ws, &pblk->gen_ws_pool);
1812 }
1813
1814 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1815                       void (*work)(struct work_struct *), gfp_t gfp_mask,
1816                       struct workqueue_struct *wq)
1817 {
1818         struct pblk_line_ws *line_ws;
1819
1820         line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1821
1822         line_ws->pblk = pblk;
1823         line_ws->line = line;
1824         line_ws->priv = priv;
1825
1826         INIT_WORK(&line_ws->ws, work);
1827         queue_work(wq, &line_ws->ws);
1828 }
1829
1830 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1831                              int nr_ppas, int pos)
1832 {
1833         struct pblk_lun *rlun = &pblk->luns[pos];
1834         int ret;
1835
1836         /*
1837          * Only send one inflight I/O per LUN. Since we map at a page
1838          * granurality, all ppas in the I/O will map to the same LUN
1839          */
1840 #ifdef CONFIG_NVM_PBLK_DEBUG
1841         int i;
1842
1843         for (i = 1; i < nr_ppas; i++)
1844                 WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1845                                 ppa_list[0].a.ch != ppa_list[i].a.ch);
1846 #endif
1847
1848         ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1849         if (ret == -ETIME || ret == -EINTR)
1850                 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1851                                 -ret);
1852 }
1853
1854 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1855 {
1856         struct nvm_tgt_dev *dev = pblk->dev;
1857         struct nvm_geo *geo = &dev->geo;
1858         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1859
1860         __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1861 }
1862
1863 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1864                   unsigned long *lun_bitmap)
1865 {
1866         struct nvm_tgt_dev *dev = pblk->dev;
1867         struct nvm_geo *geo = &dev->geo;
1868         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1869
1870         /* If the LUN has been locked for this same request, do no attempt to
1871          * lock it again
1872          */
1873         if (test_and_set_bit(pos, lun_bitmap))
1874                 return;
1875
1876         __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1877 }
1878
1879 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1880 {
1881         struct nvm_tgt_dev *dev = pblk->dev;
1882         struct nvm_geo *geo = &dev->geo;
1883         struct pblk_lun *rlun;
1884         int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1885
1886 #ifdef CONFIG_NVM_PBLK_DEBUG
1887         int i;
1888
1889         for (i = 1; i < nr_ppas; i++)
1890                 WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1891                                 ppa_list[0].a.ch != ppa_list[i].a.ch);
1892 #endif
1893
1894         rlun = &pblk->luns[pos];
1895         up(&rlun->wr_sem);
1896 }
1897
1898 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1899                 unsigned long *lun_bitmap)
1900 {
1901         struct nvm_tgt_dev *dev = pblk->dev;
1902         struct nvm_geo *geo = &dev->geo;
1903         struct pblk_lun *rlun;
1904         int num_lun = geo->all_luns;
1905         int bit = -1;
1906
1907         while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1908                 rlun = &pblk->luns[bit];
1909                 up(&rlun->wr_sem);
1910         }
1911 }
1912
1913 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1914 {
1915         struct ppa_addr ppa_l2p;
1916
1917         /* logic error: lba out-of-bounds. Ignore update */
1918         if (!(lba < pblk->rl.nr_secs)) {
1919                 WARN(1, "pblk: corrupted L2P map request\n");
1920                 return;
1921         }
1922
1923         spin_lock(&pblk->trans_lock);
1924         ppa_l2p = pblk_trans_map_get(pblk, lba);
1925
1926         if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1927                 pblk_map_invalidate(pblk, ppa_l2p);
1928
1929         pblk_trans_map_set(pblk, lba, ppa);
1930         spin_unlock(&pblk->trans_lock);
1931 }
1932
1933 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1934 {
1935
1936 #ifdef CONFIG_NVM_PBLK_DEBUG
1937         /* Callers must ensure that the ppa points to a cache address */
1938         BUG_ON(!pblk_addr_in_cache(ppa));
1939         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1940 #endif
1941
1942         pblk_update_map(pblk, lba, ppa);
1943 }
1944
1945 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1946                        struct pblk_line *gc_line, u64 paddr_gc)
1947 {
1948         struct ppa_addr ppa_l2p, ppa_gc;
1949         int ret = 1;
1950
1951 #ifdef CONFIG_NVM_PBLK_DEBUG
1952         /* Callers must ensure that the ppa points to a cache address */
1953         BUG_ON(!pblk_addr_in_cache(ppa_new));
1954         BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1955 #endif
1956
1957         /* logic error: lba out-of-bounds. Ignore update */
1958         if (!(lba < pblk->rl.nr_secs)) {
1959                 WARN(1, "pblk: corrupted L2P map request\n");
1960                 return 0;
1961         }
1962
1963         spin_lock(&pblk->trans_lock);
1964         ppa_l2p = pblk_trans_map_get(pblk, lba);
1965         ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1966
1967         if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1968                 spin_lock(&gc_line->lock);
1969                 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1970                                                 "pblk: corrupted GC update");
1971                 spin_unlock(&gc_line->lock);
1972
1973                 ret = 0;
1974                 goto out;
1975         }
1976
1977         pblk_trans_map_set(pblk, lba, ppa_new);
1978 out:
1979         spin_unlock(&pblk->trans_lock);
1980         return ret;
1981 }
1982
1983 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
1984                          struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
1985 {
1986         struct ppa_addr ppa_l2p;
1987
1988 #ifdef CONFIG_NVM_PBLK_DEBUG
1989         /* Callers must ensure that the ppa points to a device address */
1990         BUG_ON(pblk_addr_in_cache(ppa_mapped));
1991 #endif
1992         /* Invalidate and discard padded entries */
1993         if (lba == ADDR_EMPTY) {
1994                 atomic64_inc(&pblk->pad_wa);
1995 #ifdef CONFIG_NVM_PBLK_DEBUG
1996                 atomic_long_inc(&pblk->padded_wb);
1997 #endif
1998                 if (!pblk_ppa_empty(ppa_mapped))
1999                         pblk_map_invalidate(pblk, ppa_mapped);
2000                 return;
2001         }
2002
2003         /* logic error: lba out-of-bounds. Ignore update */
2004         if (!(lba < pblk->rl.nr_secs)) {
2005                 WARN(1, "pblk: corrupted L2P map request\n");
2006                 return;
2007         }
2008
2009         spin_lock(&pblk->trans_lock);
2010         ppa_l2p = pblk_trans_map_get(pblk, lba);
2011
2012         /* Do not update L2P if the cacheline has been updated. In this case,
2013          * the mapped ppa must be invalidated
2014          */
2015         if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2016                 if (!pblk_ppa_empty(ppa_mapped))
2017                         pblk_map_invalidate(pblk, ppa_mapped);
2018                 goto out;
2019         }
2020
2021 #ifdef CONFIG_NVM_PBLK_DEBUG
2022         WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2023 #endif
2024
2025         pblk_trans_map_set(pblk, lba, ppa_mapped);
2026 out:
2027         spin_unlock(&pblk->trans_lock);
2028 }
2029
2030 void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2031                          sector_t blba, int nr_secs)
2032 {
2033         int i;
2034
2035         spin_lock(&pblk->trans_lock);
2036         for (i = 0; i < nr_secs; i++) {
2037                 struct ppa_addr ppa;
2038
2039                 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2040
2041                 /* If the L2P entry maps to a line, the reference is valid */
2042                 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2043                         int line_id = pblk_ppa_to_line(ppa);
2044                         struct pblk_line *line = &pblk->lines[line_id];
2045
2046                         kref_get(&line->ref);
2047                 }
2048         }
2049         spin_unlock(&pblk->trans_lock);
2050 }
2051
2052 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2053                           u64 *lba_list, int nr_secs)
2054 {
2055         u64 lba;
2056         int i;
2057
2058         spin_lock(&pblk->trans_lock);
2059         for (i = 0; i < nr_secs; i++) {
2060                 lba = lba_list[i];
2061                 if (lba != ADDR_EMPTY) {
2062                         /* logic error: lba out-of-bounds. Ignore update */
2063                         if (!(lba < pblk->rl.nr_secs)) {
2064                                 WARN(1, "pblk: corrupted L2P map request\n");
2065                                 continue;
2066                         }
2067                         ppas[i] = pblk_trans_map_get(pblk, lba);
2068                 }
2069         }
2070         spin_unlock(&pblk->trans_lock);
2071 }