]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/gennvm.c
Merge tag 'pm+acpi-4.6-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux.git] / drivers / lightnvm / gennvm.c
1 /*
2  * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version
6  * 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; see the file COPYING.  If not, write to
15  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16  * USA.
17  *
18  * Implementation of a generic nvm manager for Open-Channel SSDs.
19  */
20
21 #include "gennvm.h"
22
23 static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
24 {
25         struct gen_nvm *gn = dev->mp;
26         struct gennvm_area *area, *prev, *next;
27         sector_t begin = 0;
28         sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
29
30         if (len > max_sectors)
31                 return -EINVAL;
32
33         area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
34         if (!area)
35                 return -ENOMEM;
36
37         prev = NULL;
38
39         spin_lock(&dev->lock);
40         list_for_each_entry(next, &gn->area_list, list) {
41                 if (begin + len > next->begin) {
42                         begin = next->end;
43                         prev = next;
44                         continue;
45                 }
46                 break;
47         }
48
49         if ((begin + len) > max_sectors) {
50                 spin_unlock(&dev->lock);
51                 kfree(area);
52                 return -EINVAL;
53         }
54
55         area->begin = *lba = begin;
56         area->end = begin + len;
57
58         if (prev) /* insert into sorted order */
59                 list_add(&area->list, &prev->list);
60         else
61                 list_add(&area->list, &gn->area_list);
62         spin_unlock(&dev->lock);
63
64         return 0;
65 }
66
67 static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
68 {
69         struct gen_nvm *gn = dev->mp;
70         struct gennvm_area *area;
71
72         spin_lock(&dev->lock);
73         list_for_each_entry(area, &gn->area_list, list) {
74                 if (area->begin != begin)
75                         continue;
76
77                 list_del(&area->list);
78                 spin_unlock(&dev->lock);
79                 kfree(area);
80                 return;
81         }
82         spin_unlock(&dev->lock);
83 }
84
85 static void gennvm_blocks_free(struct nvm_dev *dev)
86 {
87         struct gen_nvm *gn = dev->mp;
88         struct gen_lun *lun;
89         int i;
90
91         gennvm_for_each_lun(gn, lun, i) {
92                 if (!lun->vlun.blocks)
93                         break;
94                 vfree(lun->vlun.blocks);
95         }
96 }
97
98 static void gennvm_luns_free(struct nvm_dev *dev)
99 {
100         struct gen_nvm *gn = dev->mp;
101
102         kfree(gn->luns);
103 }
104
105 static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
106 {
107         struct gen_lun *lun;
108         int i;
109
110         gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
111         if (!gn->luns)
112                 return -ENOMEM;
113
114         gennvm_for_each_lun(gn, lun, i) {
115                 spin_lock_init(&lun->vlun.lock);
116                 INIT_LIST_HEAD(&lun->free_list);
117                 INIT_LIST_HEAD(&lun->used_list);
118                 INIT_LIST_HEAD(&lun->bb_list);
119
120                 lun->reserved_blocks = 2; /* for GC only */
121                 lun->vlun.id = i;
122                 lun->vlun.lun_id = i % dev->luns_per_chnl;
123                 lun->vlun.chnl_id = i / dev->luns_per_chnl;
124                 lun->vlun.nr_free_blocks = dev->blks_per_lun;
125                 lun->vlun.nr_open_blocks = 0;
126                 lun->vlun.nr_closed_blocks = 0;
127                 lun->vlun.nr_bad_blocks = 0;
128         }
129         return 0;
130 }
131
132 static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
133                                                                 void *private)
134 {
135         struct gen_nvm *gn = private;
136         struct nvm_dev *dev = gn->dev;
137         struct gen_lun *lun;
138         struct nvm_block *blk;
139         int i;
140
141         lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
142
143         for (i = 0; i < nr_blocks; i++) {
144                 if (blks[i] == 0)
145                         continue;
146
147                 blk = &lun->vlun.blocks[i];
148                 if (!blk) {
149                         pr_err("gennvm: BB data is out of bounds.\n");
150                         return -EINVAL;
151                 }
152
153                 list_move_tail(&blk->list, &lun->bb_list);
154                 lun->vlun.nr_bad_blocks++;
155                 lun->vlun.nr_free_blocks--;
156         }
157
158         return 0;
159 }
160
161 static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
162 {
163         struct nvm_dev *dev = private;
164         struct gen_nvm *gn = dev->mp;
165         u64 elba = slba + nlb;
166         struct gen_lun *lun;
167         struct nvm_block *blk;
168         u64 i;
169         int lun_id;
170
171         if (unlikely(elba > dev->total_secs)) {
172                 pr_err("gennvm: L2P data from device is out of bounds!\n");
173                 return -EINVAL;
174         }
175
176         for (i = 0; i < nlb; i++) {
177                 u64 pba = le64_to_cpu(entries[i]);
178
179                 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
180                         pr_err("gennvm: L2P data entry is out of bounds!\n");
181                         return -EINVAL;
182                 }
183
184                 /* Address zero is a special one. The first page on a disk is
185                  * protected. It often holds internal device boot
186                  * information.
187                  */
188                 if (!pba)
189                         continue;
190
191                 /* resolve block from physical address */
192                 lun_id = div_u64(pba, dev->sec_per_lun);
193                 lun = &gn->luns[lun_id];
194
195                 /* Calculate block offset into lun */
196                 pba = pba - (dev->sec_per_lun * lun_id);
197                 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
198
199                 if (!blk->state) {
200                         /* at this point, we don't know anything about the
201                          * block. It's up to the FTL on top to re-etablish the
202                          * block state. The block is assumed to be open.
203                          */
204                         list_move_tail(&blk->list, &lun->used_list);
205                         blk->state = NVM_BLK_ST_OPEN;
206                         lun->vlun.nr_free_blocks--;
207                         lun->vlun.nr_open_blocks++;
208                 }
209         }
210
211         return 0;
212 }
213
214 static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
215 {
216         struct gen_lun *lun;
217         struct nvm_block *block;
218         sector_t lun_iter, blk_iter, cur_block_id = 0;
219         int ret;
220
221         gennvm_for_each_lun(gn, lun, lun_iter) {
222                 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
223                                                         dev->blks_per_lun);
224                 if (!lun->vlun.blocks)
225                         return -ENOMEM;
226
227                 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
228                         block = &lun->vlun.blocks[blk_iter];
229
230                         INIT_LIST_HEAD(&block->list);
231
232                         block->lun = &lun->vlun;
233                         block->id = cur_block_id++;
234
235                         /* First block is reserved for device */
236                         if (unlikely(lun_iter == 0 && blk_iter == 0)) {
237                                 lun->vlun.nr_free_blocks--;
238                                 continue;
239                         }
240
241                         list_add_tail(&block->list, &lun->free_list);
242                 }
243
244                 if (dev->ops->get_bb_tbl) {
245                         struct ppa_addr ppa;
246
247                         ppa.ppa = 0;
248                         ppa.g.ch = lun->vlun.chnl_id;
249                         ppa.g.lun = lun->vlun.id;
250                         ppa = generic_to_dev_addr(dev, ppa);
251
252                         ret = dev->ops->get_bb_tbl(dev, ppa,
253                                                 dev->blks_per_lun,
254                                                 gennvm_block_bb, gn);
255                         if (ret)
256                                 pr_err("gennvm: could not read BB table\n");
257                 }
258         }
259
260         if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
261                 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
262                                                         gennvm_block_map, dev);
263                 if (ret) {
264                         pr_err("gennvm: could not read L2P table.\n");
265                         pr_warn("gennvm: default block initialization");
266                 }
267         }
268
269         return 0;
270 }
271
272 static void gennvm_free(struct nvm_dev *dev)
273 {
274         gennvm_blocks_free(dev);
275         gennvm_luns_free(dev);
276         kfree(dev->mp);
277         dev->mp = NULL;
278 }
279
280 static int gennvm_register(struct nvm_dev *dev)
281 {
282         struct gen_nvm *gn;
283         int ret;
284
285         if (!try_module_get(THIS_MODULE))
286                 return -ENODEV;
287
288         gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
289         if (!gn)
290                 return -ENOMEM;
291
292         gn->dev = dev;
293         gn->nr_luns = dev->nr_luns;
294         INIT_LIST_HEAD(&gn->area_list);
295         dev->mp = gn;
296
297         ret = gennvm_luns_init(dev, gn);
298         if (ret) {
299                 pr_err("gennvm: could not initialize luns\n");
300                 goto err;
301         }
302
303         ret = gennvm_blocks_init(dev, gn);
304         if (ret) {
305                 pr_err("gennvm: could not initialize blocks\n");
306                 goto err;
307         }
308
309         return 1;
310 err:
311         gennvm_free(dev);
312         module_put(THIS_MODULE);
313         return ret;
314 }
315
316 static void gennvm_unregister(struct nvm_dev *dev)
317 {
318         gennvm_free(dev);
319         module_put(THIS_MODULE);
320 }
321
322 static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
323                                 struct nvm_lun *vlun, unsigned long flags)
324 {
325         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
326         struct nvm_block *blk = NULL;
327         int is_gc = flags & NVM_IOTYPE_GC;
328
329         assert_spin_locked(&vlun->lock);
330
331         if (list_empty(&lun->free_list)) {
332                 pr_err_ratelimited("gennvm: lun %u have no free pages available",
333                                                                 lun->vlun.id);
334                 goto out;
335         }
336
337         if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
338                 goto out;
339
340         blk = list_first_entry(&lun->free_list, struct nvm_block, list);
341         list_move_tail(&blk->list, &lun->used_list);
342         blk->state = NVM_BLK_ST_OPEN;
343
344         lun->vlun.nr_free_blocks--;
345         lun->vlun.nr_open_blocks++;
346
347 out:
348         return blk;
349 }
350
351 static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
352                                 struct nvm_lun *vlun, unsigned long flags)
353 {
354         struct nvm_block *blk;
355
356         spin_lock(&vlun->lock);
357         blk = gennvm_get_blk_unlocked(dev, vlun, flags);
358         spin_unlock(&vlun->lock);
359         return blk;
360 }
361
362 static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
363 {
364         struct nvm_lun *vlun = blk->lun;
365         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
366
367         assert_spin_locked(&vlun->lock);
368
369         if (blk->state & NVM_BLK_ST_OPEN) {
370                 list_move_tail(&blk->list, &lun->free_list);
371                 lun->vlun.nr_open_blocks--;
372                 lun->vlun.nr_free_blocks++;
373                 blk->state = NVM_BLK_ST_FREE;
374         } else if (blk->state & NVM_BLK_ST_CLOSED) {
375                 list_move_tail(&blk->list, &lun->free_list);
376                 lun->vlun.nr_closed_blocks--;
377                 lun->vlun.nr_free_blocks++;
378                 blk->state = NVM_BLK_ST_FREE;
379         } else if (blk->state & NVM_BLK_ST_BAD) {
380                 list_move_tail(&blk->list, &lun->bb_list);
381                 lun->vlun.nr_bad_blocks++;
382                 blk->state = NVM_BLK_ST_BAD;
383         } else {
384                 WARN_ON_ONCE(1);
385                 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
386                                                         blk->id, blk->state);
387                 list_move_tail(&blk->list, &lun->bb_list);
388                 lun->vlun.nr_bad_blocks++;
389                 blk->state = NVM_BLK_ST_BAD;
390         }
391 }
392
393 static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
394 {
395         struct nvm_lun *vlun = blk->lun;
396
397         spin_lock(&vlun->lock);
398         gennvm_put_blk_unlocked(dev, blk);
399         spin_unlock(&vlun->lock);
400 }
401
402 static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
403                                                                 int type)
404 {
405         struct gen_nvm *gn = dev->mp;
406         struct gen_lun *lun;
407         struct nvm_block *blk;
408
409         if (unlikely(ppa->g.ch > dev->nr_chnls ||
410                                         ppa->g.lun > dev->luns_per_chnl ||
411                                         ppa->g.blk > dev->blks_per_lun)) {
412                 WARN_ON_ONCE(1);
413                 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
414                                 ppa->g.ch, dev->nr_chnls,
415                                 ppa->g.lun, dev->luns_per_chnl,
416                                 ppa->g.blk, dev->blks_per_lun);
417                 return;
418         }
419
420         lun = &gn->luns[ppa->g.lun * ppa->g.ch];
421         blk = &lun->vlun.blocks[ppa->g.blk];
422
423         /* will be moved to bb list on put_blk from target */
424         blk->state = type;
425 }
426
427 /* mark block bad. It is expected the target recover from the error. */
428 static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
429 {
430         int i;
431
432         if (!dev->ops->set_bb_tbl)
433                 return;
434
435         if (dev->ops->set_bb_tbl(dev, rqd, 1))
436                 return;
437
438         nvm_addr_to_generic_mode(dev, rqd);
439
440         /* look up blocks and mark them as bad */
441         if (rqd->nr_pages > 1)
442                 for (i = 0; i < rqd->nr_pages; i++)
443                         gennvm_blk_set_type(dev, &rqd->ppa_list[i],
444                                                 NVM_BLK_ST_BAD);
445         else
446                 gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
447 }
448
449 static void gennvm_end_io(struct nvm_rq *rqd)
450 {
451         struct nvm_tgt_instance *ins = rqd->ins;
452
453         switch (rqd->error) {
454         case NVM_RSP_SUCCESS:
455         case NVM_RSP_ERR_EMPTYPAGE:
456                 break;
457         case NVM_RSP_ERR_FAILWRITE:
458                 gennvm_mark_blk_bad(rqd->dev, rqd);
459         }
460
461         ins->tt->end_io(rqd);
462 }
463
464 static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
465 {
466         if (!dev->ops->submit_io)
467                 return -ENODEV;
468
469         /* Convert address space */
470         nvm_generic_to_addr_mode(dev, rqd);
471
472         rqd->dev = dev;
473         rqd->end_io = gennvm_end_io;
474         return dev->ops->submit_io(dev, rqd);
475 }
476
477 static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
478                                                         unsigned long flags)
479 {
480         struct ppa_addr addr = block_to_ppa(dev, blk);
481
482         return nvm_erase_ppa(dev, &addr, 1);
483 }
484
485 static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
486 {
487         return test_and_set_bit(lunid, dev->lun_map);
488 }
489
490 static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
491 {
492         WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
493 }
494
495 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
496 {
497         struct gen_nvm *gn = dev->mp;
498
499         if (unlikely(lunid >= dev->nr_luns))
500                 return NULL;
501
502         return &gn->luns[lunid].vlun;
503 }
504
505 static void gennvm_lun_info_print(struct nvm_dev *dev)
506 {
507         struct gen_nvm *gn = dev->mp;
508         struct gen_lun *lun;
509         unsigned int i;
510
511
512         gennvm_for_each_lun(gn, lun, i) {
513                 spin_lock(&lun->vlun.lock);
514
515                 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
516                                 dev->name, i,
517                                 lun->vlun.nr_free_blocks,
518                                 lun->vlun.nr_open_blocks,
519                                 lun->vlun.nr_closed_blocks,
520                                 lun->vlun.nr_bad_blocks);
521
522                 spin_unlock(&lun->vlun.lock);
523         }
524 }
525
526 static struct nvmm_type gennvm = {
527         .name                   = "gennvm",
528         .version                = {0, 1, 0},
529
530         .register_mgr           = gennvm_register,
531         .unregister_mgr         = gennvm_unregister,
532
533         .get_blk_unlocked       = gennvm_get_blk_unlocked,
534         .put_blk_unlocked       = gennvm_put_blk_unlocked,
535
536         .get_blk                = gennvm_get_blk,
537         .put_blk                = gennvm_put_blk,
538
539         .submit_io              = gennvm_submit_io,
540         .erase_blk              = gennvm_erase_blk,
541
542         .get_lun                = gennvm_get_lun,
543         .reserve_lun            = gennvm_reserve_lun,
544         .release_lun            = gennvm_release_lun,
545         .lun_info_print         = gennvm_lun_info_print,
546
547         .get_area               = gennvm_get_area,
548         .put_area               = gennvm_put_area,
549
550 };
551
552 static int __init gennvm_module_init(void)
553 {
554         return nvm_register_mgr(&gennvm);
555 }
556
557 static void gennvm_module_exit(void)
558 {
559         nvm_unregister_mgr(&gennvm);
560 }
561
562 module_init(gennvm_module_init);
563 module_exit(gennvm_module_exit);
564 MODULE_LICENSE("GPL v2");
565 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");