2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
30 static LIST_HEAD(nvm_tgt_types);
31 static DECLARE_RWSEM(nvm_tgtt_lock);
32 static LIST_HEAD(nvm_mgrs);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
36 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
38 struct nvm_tgt_type *tmp, *tt = NULL;
41 down_write(&nvm_tgtt_lock);
43 list_for_each_entry(tmp, &nvm_tgt_types, list)
44 if (!strcmp(name, tmp->name)) {
50 up_write(&nvm_tgtt_lock);
53 EXPORT_SYMBOL(nvm_find_target_type);
55 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
59 down_write(&nvm_tgtt_lock);
60 if (nvm_find_target_type(tt->name, 0))
63 list_add(&tt->list, &nvm_tgt_types);
64 up_write(&nvm_tgtt_lock);
68 EXPORT_SYMBOL(nvm_register_tgt_type);
70 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
75 down_write(&nvm_lock);
79 EXPORT_SYMBOL(nvm_unregister_tgt_type);
81 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
82 dma_addr_t *dma_handler)
84 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
87 EXPORT_SYMBOL(nvm_dev_dma_alloc);
89 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
90 dma_addr_t dma_handler)
92 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
94 EXPORT_SYMBOL(nvm_dev_dma_free);
96 static struct nvmm_type *nvm_find_mgr_type(const char *name)
100 list_for_each_entry(mt, &nvm_mgrs, list)
101 if (!strcmp(name, mt->name))
107 static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
109 struct nvmm_type *mt;
112 lockdep_assert_held(&nvm_lock);
114 list_for_each_entry(mt, &nvm_mgrs, list) {
115 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
118 ret = mt->register_mgr(dev);
120 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
122 return NULL; /* initialization failed */
130 int nvm_register_mgr(struct nvmm_type *mt)
135 down_write(&nvm_lock);
136 if (nvm_find_mgr_type(mt->name)) {
140 list_add(&mt->list, &nvm_mgrs);
143 /* try to register media mgr if any device have none configured */
144 list_for_each_entry(dev, &nvm_devices, devices) {
148 dev->mt = nvm_init_mgr(dev);
155 EXPORT_SYMBOL(nvm_register_mgr);
157 void nvm_unregister_mgr(struct nvmm_type *mt)
162 down_write(&nvm_lock);
166 EXPORT_SYMBOL(nvm_unregister_mgr);
168 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
172 list_for_each_entry(dev, &nvm_devices, devices)
173 if (!strcmp(name, dev->name))
179 struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
182 return dev->mt->get_blk(dev, lun, flags);
184 EXPORT_SYMBOL(nvm_get_blk);
186 /* Assumes that all valid pages have already been moved on release to bm */
187 void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
189 return dev->mt->put_blk(dev, blk);
191 EXPORT_SYMBOL(nvm_put_blk);
193 void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
195 return dev->mt->mark_blk(dev, ppa, type);
197 EXPORT_SYMBOL(nvm_mark_blk);
199 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
201 return dev->mt->submit_io(dev, rqd);
203 EXPORT_SYMBOL(nvm_submit_io);
205 int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, int flags)
207 return dev->mt->erase_blk(dev, blk, flags);
209 EXPORT_SYMBOL(nvm_erase_blk);
211 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
215 if (rqd->nr_ppas > 1) {
216 for (i = 0; i < rqd->nr_ppas; i++)
217 rqd->ppa_list[i] = dev_to_generic_addr(dev,
220 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
223 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
225 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
229 if (rqd->nr_ppas > 1) {
230 for (i = 0; i < rqd->nr_ppas; i++)
231 rqd->ppa_list[i] = generic_to_dev_addr(dev,
234 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
237 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
239 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
240 const struct ppa_addr *ppas, int nr_ppas, int vblk)
242 int i, plane_cnt, pl_idx;
245 if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
246 rqd->nr_ppas = nr_ppas;
247 rqd->ppa_addr = ppas[0];
252 rqd->nr_ppas = nr_ppas;
253 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
254 if (!rqd->ppa_list) {
255 pr_err("nvm: failed to allocate dma memory\n");
260 for (i = 0; i < nr_ppas; i++)
261 rqd->ppa_list[i] = ppas[i];
263 plane_cnt = dev->plane_mode;
264 rqd->nr_ppas *= plane_cnt;
266 for (i = 0; i < nr_ppas; i++) {
267 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
270 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
277 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
279 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
284 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
286 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
288 int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
294 if (!dev->ops->erase_block)
297 memset(&rqd, 0, sizeof(struct nvm_rq));
299 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
303 nvm_generic_to_addr_mode(dev, &rqd);
307 ret = dev->ops->erase_block(dev, &rqd);
309 nvm_free_rqd_ppalist(dev, &rqd);
313 EXPORT_SYMBOL(nvm_erase_ppa);
315 void nvm_end_io(struct nvm_rq *rqd, int error)
320 EXPORT_SYMBOL(nvm_end_io);
322 static void nvm_end_io_sync(struct nvm_rq *rqd)
324 struct completion *waiting = rqd->wait;
331 static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
332 int flags, void *buf, int len)
334 DECLARE_COMPLETION_ONSTACK(wait);
337 unsigned long hang_check;
339 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
340 if (IS_ERR_OR_NULL(bio))
343 nvm_generic_to_addr_mode(dev, rqd);
346 rqd->opcode = opcode;
350 rqd->end_io = nvm_end_io_sync;
352 ret = dev->ops->submit_io(dev, rqd);
358 /* Prevent hang_check timer from firing at us during very long I/O */
359 hang_check = sysctl_hung_task_timeout_secs;
361 while (!wait_for_completion_io_timeout(&wait,
362 hang_check * (HZ/2)))
365 wait_for_completion_io(&wait);
371 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
372 * take to free ppa list if necessary.
374 * @ppa_list: user created ppa_list
375 * @nr_ppas: length of ppa_list
376 * @opcode: device opcode
377 * @flags: device flags
379 * @len: data buffer length
381 int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
382 int nr_ppas, int opcode, int flags, void *buf, int len)
386 if (dev->ops->max_phys_sect < nr_ppas)
389 memset(&rqd, 0, sizeof(struct nvm_rq));
391 rqd.nr_ppas = nr_ppas;
393 rqd.ppa_list = ppa_list;
395 rqd.ppa_addr = ppa_list[0];
397 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
399 EXPORT_SYMBOL(nvm_submit_ppa_list);
402 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
403 * as single, dual, quad plane PPAs depending on device type.
405 * @ppa: user created ppa_list
406 * @nr_ppas: length of ppa_list
407 * @opcode: device opcode
408 * @flags: device flags
410 * @len: data buffer length
412 int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
413 int opcode, int flags, void *buf, int len)
418 memset(&rqd, 0, sizeof(struct nvm_rq));
419 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
423 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
425 nvm_free_rqd_ppalist(dev, &rqd);
429 EXPORT_SYMBOL(nvm_submit_ppa);
432 * folds a bad block list from its plane representation to its virtual
433 * block representation. The fold is done in place and reduced size is
436 * If any of the planes status are bad or grown bad block, the virtual block
437 * is marked bad. If not bad, the first plane state acts as the block state.
439 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
441 int blk, offset, pl, blktype;
443 if (nr_blks != dev->blks_per_lun * dev->plane_mode)
446 for (blk = 0; blk < dev->blks_per_lun; blk++) {
447 offset = blk * dev->plane_mode;
448 blktype = blks[offset];
450 /* Bad blocks on any planes take precedence over other types */
451 for (pl = 0; pl < dev->plane_mode; pl++) {
452 if (blks[offset + pl] &
453 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
454 blktype = blks[offset + pl];
462 return dev->blks_per_lun;
464 EXPORT_SYMBOL(nvm_bb_tbl_fold);
466 int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
468 ppa = generic_to_dev_addr(dev, ppa);
470 return dev->ops->get_bb_tbl(dev, ppa, blks);
472 EXPORT_SYMBOL(nvm_get_bb_tbl);
474 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
478 dev->lps_per_blk = dev->pgs_per_blk;
479 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
483 /* Just a linear array */
484 for (i = 0; i < dev->lps_per_blk; i++)
490 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
493 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
498 dev->lps_per_blk = mlc->num_pairs;
499 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
503 /* The lower page table encoding consists of a list of bytes, where each
504 * has a lower and an upper half. The first half byte maintains the
505 * increment value and every value after is an offset added to the
506 * previous incrementation value
508 dev->lptbl[0] = mlc->pairs[0] & 0xF;
509 for (i = 1; i < dev->lps_per_blk; i++) {
510 p = mlc->pairs[i >> 1];
511 if (i & 0x1) /* upper */
512 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
514 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
520 static int nvm_core_init(struct nvm_dev *dev)
522 struct nvm_id *id = &dev->identity;
523 struct nvm_id_group *grp = &id->groups[0];
527 dev->nr_chnls = grp->num_ch;
528 dev->luns_per_chnl = grp->num_lun;
529 dev->pgs_per_blk = grp->num_pg;
530 dev->blks_per_lun = grp->num_blk;
531 dev->nr_planes = grp->num_pln;
532 dev->fpg_size = grp->fpg_sz;
533 dev->pfpg_size = grp->fpg_sz * grp->num_pln;
534 dev->sec_size = grp->csecs;
535 dev->oob_size = grp->sos;
536 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
537 dev->mccap = grp->mccap;
538 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
540 dev->plane_mode = NVM_PLANE_SINGLE;
541 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
543 if (grp->mpos & 0x020202)
544 dev->plane_mode = NVM_PLANE_DOUBLE;
545 if (grp->mpos & 0x040404)
546 dev->plane_mode = NVM_PLANE_QUAD;
548 if (grp->mtype != 0) {
549 pr_err("nvm: memory type not supported\n");
553 /* calculated values */
554 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
555 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
556 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
557 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
559 dev->total_secs = dev->nr_luns * dev->sec_per_lun;
560 dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
561 sizeof(unsigned long), GFP_KERNEL);
565 switch (grp->fmtype) {
566 case NVM_ID_FMTYPE_SLC:
567 if (nvm_init_slc_tbl(dev, grp)) {
572 case NVM_ID_FMTYPE_MLC:
573 if (nvm_init_mlc_tbl(dev, grp)) {
579 pr_err("nvm: flash type not supported\n");
584 mutex_init(&dev->mlock);
585 spin_lock_init(&dev->lock);
587 blk_queue_logical_block_size(dev->q, dev->sec_size);
595 static void nvm_free_mgr(struct nvm_dev *dev)
600 dev->mt->unregister_mgr(dev);
604 void nvm_free(struct nvm_dev *dev)
612 dev->ops->destroy_dma_pool(dev->dma_pool);
619 static int nvm_init(struct nvm_dev *dev)
623 if (!dev->q || !dev->ops)
626 if (dev->ops->identity(dev, &dev->identity)) {
627 pr_err("nvm: device could not be identified\n");
631 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
632 dev->identity.ver_id, dev->identity.vmnt,
633 dev->identity.cgrps);
635 if (dev->identity.ver_id != 1) {
636 pr_err("nvm: device not supported by kernel.");
640 if (dev->identity.cgrps != 1) {
641 pr_err("nvm: only one group configuration supported.");
645 ret = nvm_core_init(dev);
647 pr_err("nvm: could not initialize core structures.\n");
651 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
652 dev->name, dev->sec_per_pg, dev->nr_planes,
653 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
657 pr_err("nvm: failed to initialize nvm\n");
661 struct nvm_dev *nvm_alloc_dev(int node)
663 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
665 EXPORT_SYMBOL(nvm_alloc_dev);
667 int nvm_register(struct nvm_dev *dev)
675 if (dev->ops->max_phys_sect > 256) {
676 pr_info("nvm: max sectors supported is 256.\n");
681 if (dev->ops->max_phys_sect > 1) {
682 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
683 if (!dev->dma_pool) {
684 pr_err("nvm: could not create dma pool\n");
690 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
691 ret = nvm_get_sysblock(dev, &dev->sb);
693 pr_err("nvm: device not initialized.\n");
695 pr_err("nvm: err (%d) on device initialization\n", ret);
698 /* register device with a supported media manager */
699 down_write(&nvm_lock);
701 dev->mt = nvm_init_mgr(dev);
702 list_add(&dev->devices, &nvm_devices);
710 EXPORT_SYMBOL(nvm_register);
712 void nvm_unregister(struct nvm_dev *dev)
714 down_write(&nvm_lock);
715 list_del(&dev->devices);
720 EXPORT_SYMBOL(nvm_unregister);
722 static int __nvm_configure_create(struct nvm_ioctl_create *create)
725 struct nvm_ioctl_create_simple *s;
727 down_write(&nvm_lock);
728 dev = nvm_find_nvm_dev(create->dev);
732 pr_err("nvm: device not found\n");
737 pr_info("nvm: device has no media manager registered.\n");
741 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
742 pr_err("nvm: config type not valid\n");
747 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
748 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
749 s->lun_begin, s->lun_end, dev->nr_luns);
753 return dev->mt->create_tgt(dev, create);
756 #ifdef CONFIG_NVM_DEBUG
757 static int nvm_configure_show(const char *val)
760 char opcode, devname[DISK_NAME_LEN];
763 ret = sscanf(val, "%c %32s", &opcode, devname);
765 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
769 down_write(&nvm_lock);
770 dev = nvm_find_nvm_dev(devname);
773 pr_err("nvm: device not found\n");
780 dev->mt->lun_info_print(dev);
785 static int nvm_configure_remove(const char *val)
787 struct nvm_ioctl_remove remove;
792 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
794 pr_err("nvm: invalid command. Use \"d targetname\".\n");
800 list_for_each_entry(dev, &nvm_devices, devices) {
801 ret = dev->mt->remove_tgt(dev, &remove);
809 static int nvm_configure_create(const char *val)
811 struct nvm_ioctl_create create;
813 int lun_begin, lun_end, ret;
815 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
816 create.tgtname, create.tgttype,
817 &lun_begin, &lun_end);
819 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
824 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
825 create.conf.s.lun_begin = lun_begin;
826 create.conf.s.lun_end = lun_end;
828 return __nvm_configure_create(&create);
832 /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
833 static int nvm_configure_by_str_event(const char *val,
834 const struct kernel_param *kp)
839 ret = sscanf(val, "%c", &opcode);
841 pr_err("nvm: string must have the format of \"cmd ...\"\n");
847 return nvm_configure_create(val);
849 return nvm_configure_remove(val);
851 return nvm_configure_show(val);
853 pr_err("nvm: invalid command\n");
860 static int nvm_configure_get(char *buf, const struct kernel_param *kp)
865 sz = sprintf(buf, "available devices:\n");
866 down_write(&nvm_lock);
867 list_for_each_entry(dev, &nvm_devices, devices) {
868 if (sz > 4095 - DISK_NAME_LEN - 2)
870 sz += sprintf(buf + sz, " %32s\n", dev->name);
877 static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
878 .set = nvm_configure_by_str_event,
879 .get = nvm_configure_get,
882 #undef MODULE_PARAM_PREFIX
883 #define MODULE_PARAM_PREFIX "lnvm."
885 module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
888 #endif /* CONFIG_NVM_DEBUG */
890 static long nvm_ioctl_info(struct file *file, void __user *arg)
892 struct nvm_ioctl_info *info;
893 struct nvm_tgt_type *tt;
896 if (!capable(CAP_SYS_ADMIN))
899 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
903 info->version[0] = NVM_VERSION_MAJOR;
904 info->version[1] = NVM_VERSION_MINOR;
905 info->version[2] = NVM_VERSION_PATCH;
907 down_write(&nvm_lock);
908 list_for_each_entry(tt, &nvm_tgt_types, list) {
909 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
911 tgt->version[0] = tt->version[0];
912 tgt->version[1] = tt->version[1];
913 tgt->version[2] = tt->version[2];
914 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
919 info->tgtsize = tgt_iter;
922 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
931 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
933 struct nvm_ioctl_get_devices *devices;
937 if (!capable(CAP_SYS_ADMIN))
940 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
944 down_write(&nvm_lock);
945 list_for_each_entry(dev, &nvm_devices, devices) {
946 struct nvm_ioctl_device_info *info = &devices->info[i];
948 sprintf(info->devname, "%s", dev->name);
950 info->bmversion[0] = dev->mt->version[0];
951 info->bmversion[1] = dev->mt->version[1];
952 info->bmversion[2] = dev->mt->version[2];
953 sprintf(info->bmname, "%s", dev->mt->name);
955 sprintf(info->bmname, "none");
960 pr_err("nvm: max 31 devices can be reported.\n");
966 devices->nr_devices = i;
968 if (copy_to_user(arg, devices,
969 sizeof(struct nvm_ioctl_get_devices))) {
978 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
980 struct nvm_ioctl_create create;
982 if (!capable(CAP_SYS_ADMIN))
985 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
988 create.dev[DISK_NAME_LEN - 1] = '\0';
989 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
990 create.tgtname[DISK_NAME_LEN - 1] = '\0';
992 if (create.flags != 0) {
993 pr_err("nvm: no flags supported\n");
997 return __nvm_configure_create(&create);
1000 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1002 struct nvm_ioctl_remove remove;
1003 struct nvm_dev *dev;
1006 if (!capable(CAP_SYS_ADMIN))
1009 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1012 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1014 if (remove.flags != 0) {
1015 pr_err("nvm: no flags supported\n");
1019 list_for_each_entry(dev, &nvm_devices, devices) {
1020 ret = dev->mt->remove_tgt(dev, &remove);
1028 static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1031 info->erase_cnt = 0;
1035 static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1037 struct nvm_dev *dev;
1038 struct nvm_sb_info info;
1041 down_write(&nvm_lock);
1042 dev = nvm_find_nvm_dev(init->dev);
1043 up_write(&nvm_lock);
1045 pr_err("nvm: device not found\n");
1049 nvm_setup_nvm_sb_info(&info);
1051 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1052 info.fs_ppa.ppa = -1;
1054 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1055 ret = nvm_init_sysblock(dev, &info);
1060 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1062 down_write(&nvm_lock);
1063 dev->mt = nvm_init_mgr(dev);
1064 up_write(&nvm_lock);
1069 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1071 struct nvm_ioctl_dev_init init;
1073 if (!capable(CAP_SYS_ADMIN))
1076 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1079 if (init.flags != 0) {
1080 pr_err("nvm: no flags supported\n");
1084 init.dev[DISK_NAME_LEN - 1] = '\0';
1086 return __nvm_ioctl_dev_init(&init);
1089 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1091 struct nvm_ioctl_dev_factory fact;
1092 struct nvm_dev *dev;
1094 if (!capable(CAP_SYS_ADMIN))
1097 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1100 fact.dev[DISK_NAME_LEN - 1] = '\0';
1102 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1105 down_write(&nvm_lock);
1106 dev = nvm_find_nvm_dev(fact.dev);
1107 up_write(&nvm_lock);
1109 pr_err("nvm: device not found\n");
1115 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1116 return nvm_dev_factory(dev, fact.flags);
1121 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1123 void __user *argp = (void __user *)arg;
1127 return nvm_ioctl_info(file, argp);
1128 case NVM_GET_DEVICES:
1129 return nvm_ioctl_get_devices(file, argp);
1130 case NVM_DEV_CREATE:
1131 return nvm_ioctl_dev_create(file, argp);
1132 case NVM_DEV_REMOVE:
1133 return nvm_ioctl_dev_remove(file, argp);
1135 return nvm_ioctl_dev_init(file, argp);
1136 case NVM_DEV_FACTORY:
1137 return nvm_ioctl_dev_factory(file, argp);
1142 static const struct file_operations _ctl_fops = {
1143 .open = nonseekable_open,
1144 .unlocked_ioctl = nvm_ctl_ioctl,
1145 .owner = THIS_MODULE,
1146 .llseek = noop_llseek,
1149 static struct miscdevice _nvm_misc = {
1150 .minor = MISC_DYNAMIC_MINOR,
1152 .nodename = "lightnvm/control",
1155 module_misc_device(_nvm_misc);
1157 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
1159 MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1160 MODULE_LICENSE("GPL v2");
1161 MODULE_VERSION("0.1");