2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a general nvm manager for Open-Channel SSDs.
23 static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
25 struct nvm_target *tgt;
27 list_for_each_entry(tgt, &gn->targets, list)
28 if (!strcmp(name, tgt->disk->disk_name))
34 static const struct block_device_operations gen_fops = {
38 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
40 struct gen_dev *gn = dev->mp;
41 struct nvm_ioctl_create_simple *s = &create->conf.s;
42 struct request_queue *tqueue;
43 struct gendisk *tdisk;
44 struct nvm_tgt_type *tt;
48 tt = nvm_find_target_type(create->tgttype, 1);
50 pr_err("nvm: target type %s not found\n", create->tgttype);
54 mutex_lock(&gn->lock);
55 t = gen_find_target(gn, create->tgtname);
57 pr_err("nvm: target name already exists.\n");
58 mutex_unlock(&gn->lock);
61 mutex_unlock(&gn->lock);
63 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
67 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
70 blk_queue_make_request(tqueue, tt->make_rq);
72 tdisk = alloc_disk(0);
76 sprintf(tdisk->disk_name, "%s", create->tgtname);
77 tdisk->flags = GENHD_FL_EXT_DEVT;
79 tdisk->first_minor = 0;
80 tdisk->fops = &gen_fops;
81 tdisk->queue = tqueue;
83 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
84 if (IS_ERR(targetdata))
87 tdisk->private_data = targetdata;
88 tqueue->queuedata = targetdata;
90 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
92 set_capacity(tdisk, tt->capacity(targetdata));
99 mutex_lock(&gn->lock);
100 list_add_tail(&t->list, &gn->targets);
101 mutex_unlock(&gn->lock);
107 blk_cleanup_queue(tqueue);
113 static void __gen_remove_target(struct nvm_target *t)
115 struct nvm_tgt_type *tt = t->type;
116 struct gendisk *tdisk = t->disk;
117 struct request_queue *q = tdisk->queue;
120 blk_cleanup_queue(q);
123 tt->exit(tdisk->private_data);
132 * gen_remove_tgt - Removes a target from the media manager
134 * @remove: ioctl structure with target name to remove.
141 static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
143 struct gen_dev *gn = dev->mp;
144 struct nvm_target *t;
149 mutex_lock(&gn->lock);
150 t = gen_find_target(gn, remove->tgtname);
152 mutex_unlock(&gn->lock);
155 __gen_remove_target(t);
156 mutex_unlock(&gn->lock);
161 static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
163 struct gen_dev *gn = dev->mp;
164 struct gen_area *area, *prev, *next;
166 sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
168 if (len > max_sectors)
171 area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
177 spin_lock(&dev->lock);
178 list_for_each_entry(next, &gn->area_list, list) {
179 if (begin + len > next->begin) {
187 if ((begin + len) > max_sectors) {
188 spin_unlock(&dev->lock);
193 area->begin = *lba = begin;
194 area->end = begin + len;
196 if (prev) /* insert into sorted order */
197 list_add(&area->list, &prev->list);
199 list_add(&area->list, &gn->area_list);
200 spin_unlock(&dev->lock);
205 static void gen_put_area(struct nvm_dev *dev, sector_t begin)
207 struct gen_dev *gn = dev->mp;
208 struct gen_area *area;
210 spin_lock(&dev->lock);
211 list_for_each_entry(area, &gn->area_list, list) {
212 if (area->begin != begin)
215 list_del(&area->list);
216 spin_unlock(&dev->lock);
220 spin_unlock(&dev->lock);
223 static void gen_blocks_free(struct nvm_dev *dev)
225 struct gen_dev *gn = dev->mp;
229 gen_for_each_lun(gn, lun, i) {
230 if (!lun->vlun.blocks)
232 vfree(lun->vlun.blocks);
236 static void gen_luns_free(struct nvm_dev *dev)
238 struct gen_dev *gn = dev->mp;
243 static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
248 gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
252 gen_for_each_lun(gn, lun, i) {
253 spin_lock_init(&lun->vlun.lock);
254 INIT_LIST_HEAD(&lun->free_list);
255 INIT_LIST_HEAD(&lun->used_list);
256 INIT_LIST_HEAD(&lun->bb_list);
258 lun->reserved_blocks = 2; /* for GC only */
260 lun->vlun.lun_id = i % dev->luns_per_chnl;
261 lun->vlun.chnl_id = i / dev->luns_per_chnl;
262 lun->vlun.nr_free_blocks = dev->blks_per_lun;
267 static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
268 u8 *blks, int nr_blks)
270 struct nvm_dev *dev = gn->dev;
272 struct nvm_block *blk;
275 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
279 lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
281 for (i = 0; i < nr_blks; i++) {
285 blk = &lun->vlun.blocks[i];
286 list_move_tail(&blk->list, &lun->bb_list);
287 lun->vlun.nr_free_blocks--;
293 static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
295 struct nvm_dev *dev = private;
296 struct gen_dev *gn = dev->mp;
297 u64 elba = slba + nlb;
299 struct nvm_block *blk;
303 if (unlikely(elba > dev->total_secs)) {
304 pr_err("gen: L2P data from device is out of bounds!\n");
308 for (i = 0; i < nlb; i++) {
309 u64 pba = le64_to_cpu(entries[i]);
311 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
312 pr_err("gen: L2P data entry is out of bounds!\n");
316 /* Address zero is a special one. The first page on a disk is
317 * protected. It often holds internal device boot
323 /* resolve block from physical address */
324 lun_id = div_u64(pba, dev->sec_per_lun);
325 lun = &gn->luns[lun_id];
327 /* Calculate block offset into lun */
328 pba = pba - (dev->sec_per_lun * lun_id);
329 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
332 /* at this point, we don't know anything about the
333 * block. It's up to the FTL on top to re-etablish the
334 * block state. The block is assumed to be open.
336 list_move_tail(&blk->list, &lun->used_list);
337 blk->state = NVM_BLK_ST_TGT;
338 lun->vlun.nr_free_blocks--;
345 static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
348 struct nvm_block *block;
349 sector_t lun_iter, blk_iter, cur_block_id = 0;
353 nr_blks = dev->blks_per_lun * dev->plane_mode;
354 blks = kmalloc(nr_blks, GFP_KERNEL);
358 gen_for_each_lun(gn, lun, lun_iter) {
359 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
361 if (!lun->vlun.blocks) {
366 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
367 block = &lun->vlun.blocks[blk_iter];
369 INIT_LIST_HEAD(&block->list);
371 block->lun = &lun->vlun;
372 block->id = cur_block_id++;
374 list_add_tail(&block->list, &lun->free_list);
377 if (dev->ops->get_bb_tbl) {
381 ppa.g.ch = lun->vlun.chnl_id;
382 ppa.g.lun = lun->vlun.lun_id;
384 ret = nvm_get_bb_tbl(dev, ppa, blks);
386 pr_err("gen: could not get BB table\n");
388 ret = gen_block_bb(gn, ppa, blks, nr_blks);
390 pr_err("gen: BB table map failed\n");
394 if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
395 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
398 pr_err("gen: could not read L2P table.\n");
399 pr_warn("gen: default block initialization");
407 static void gen_free(struct nvm_dev *dev)
409 gen_blocks_free(dev);
415 static int gen_register(struct nvm_dev *dev)
420 if (!try_module_get(THIS_MODULE))
423 gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
428 gn->nr_luns = dev->nr_luns;
429 INIT_LIST_HEAD(&gn->area_list);
430 mutex_init(&gn->lock);
431 INIT_LIST_HEAD(&gn->targets);
434 ret = gen_luns_init(dev, gn);
436 pr_err("gen: could not initialize luns\n");
440 ret = gen_blocks_init(dev, gn);
442 pr_err("gen: could not initialize blocks\n");
449 module_put(THIS_MODULE);
453 static void gen_unregister(struct nvm_dev *dev)
455 struct gen_dev *gn = dev->mp;
456 struct nvm_target *t, *tmp;
458 mutex_lock(&gn->lock);
459 list_for_each_entry_safe(t, tmp, &gn->targets, list) {
462 __gen_remove_target(t);
464 mutex_unlock(&gn->lock);
467 module_put(THIS_MODULE);
470 static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
471 struct nvm_lun *vlun, unsigned long flags)
473 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
474 struct nvm_block *blk = NULL;
475 int is_gc = flags & NVM_IOTYPE_GC;
477 spin_lock(&vlun->lock);
478 if (list_empty(&lun->free_list)) {
479 pr_err_ratelimited("gen: lun %u have no free pages available",
484 if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
487 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
489 list_move_tail(&blk->list, &lun->used_list);
490 blk->state = NVM_BLK_ST_TGT;
491 lun->vlun.nr_free_blocks--;
493 spin_unlock(&vlun->lock);
497 static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
499 struct nvm_lun *vlun = blk->lun;
500 struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
502 spin_lock(&vlun->lock);
503 if (blk->state & NVM_BLK_ST_TGT) {
504 list_move_tail(&blk->list, &lun->free_list);
505 lun->vlun.nr_free_blocks++;
506 blk->state = NVM_BLK_ST_FREE;
507 } else if (blk->state & NVM_BLK_ST_BAD) {
508 list_move_tail(&blk->list, &lun->bb_list);
509 blk->state = NVM_BLK_ST_BAD;
512 pr_err("gen: erroneous block type (%lu -> %u)\n",
513 blk->id, blk->state);
514 list_move_tail(&blk->list, &lun->bb_list);
516 spin_unlock(&vlun->lock);
519 static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
521 struct gen_dev *gn = dev->mp;
523 struct nvm_block *blk;
525 pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
526 ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
528 if (unlikely(ppa.g.ch > dev->nr_chnls ||
529 ppa.g.lun > dev->luns_per_chnl ||
530 ppa.g.blk > dev->blks_per_lun)) {
532 pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
533 ppa.g.ch, dev->nr_chnls,
534 ppa.g.lun, dev->luns_per_chnl,
535 ppa.g.blk, dev->blks_per_lun);
539 lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
540 blk = &lun->vlun.blocks[ppa.g.blk];
542 /* will be moved to bb list on put_blk from target */
546 static void gen_end_io(struct nvm_rq *rqd)
548 struct nvm_tgt_instance *ins = rqd->ins;
550 ins->tt->end_io(rqd);
553 static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
555 if (!dev->ops->submit_io)
558 /* Convert address space */
559 nvm_generic_to_addr_mode(dev, rqd);
562 rqd->end_io = gen_end_io;
563 return dev->ops->submit_io(dev, rqd);
566 static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
568 struct ppa_addr addr = block_to_ppa(dev, blk);
570 return nvm_erase_ppa(dev, &addr, 1, flags);
573 static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
575 return test_and_set_bit(lunid, dev->lun_map);
578 static void gen_release_lun(struct nvm_dev *dev, int lunid)
580 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
583 static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
585 struct gen_dev *gn = dev->mp;
587 if (unlikely(lunid >= dev->nr_luns))
590 return &gn->luns[lunid].vlun;
593 static void gen_lun_info_print(struct nvm_dev *dev)
595 struct gen_dev *gn = dev->mp;
600 gen_for_each_lun(gn, lun, i) {
601 spin_lock(&lun->vlun.lock);
603 pr_info("%s: lun%8u\t%u\n", dev->name, i,
604 lun->vlun.nr_free_blocks);
606 spin_unlock(&lun->vlun.lock);
610 static struct nvmm_type gen = {
612 .version = {0, 1, 0},
614 .register_mgr = gen_register,
615 .unregister_mgr = gen_unregister,
617 .create_tgt = gen_create_tgt,
618 .remove_tgt = gen_remove_tgt,
620 .get_blk = gen_get_blk,
621 .put_blk = gen_put_blk,
623 .submit_io = gen_submit_io,
624 .erase_blk = gen_erase_blk,
626 .mark_blk = gen_mark_blk,
628 .get_lun = gen_get_lun,
629 .reserve_lun = gen_reserve_lun,
630 .release_lun = gen_release_lun,
631 .lun_info_print = gen_lun_info_print,
633 .get_area = gen_get_area,
634 .put_area = gen_put_area,
638 static int __init gen_module_init(void)
640 return nvm_register_mgr(&gen);
643 static void gen_module_exit(void)
645 nvm_unregister_mgr(&gen);
648 module_init(gen_module_init);
649 module_exit(gen_module_exit);
650 MODULE_LICENSE("GPL v2");
651 MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");