]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/lightnvm/core.c
lightnvm: introduce max_phys_sects helper function
[linux.git] / drivers / lightnvm / core.c
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; see the file COPYING.  If not, write to
16  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  * USA.
18  *
19  */
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
29
30 static LIST_HEAD(nvm_tgt_types);
31 static DECLARE_RWSEM(nvm_tgtt_lock);
32 static LIST_HEAD(nvm_mgrs);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
35
36 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
37 {
38         struct nvm_tgt_type *tmp, *tt = NULL;
39
40         if (lock)
41                 down_write(&nvm_tgtt_lock);
42
43         list_for_each_entry(tmp, &nvm_tgt_types, list)
44                 if (!strcmp(name, tmp->name)) {
45                         tt = tmp;
46                         break;
47                 }
48
49         if (lock)
50                 up_write(&nvm_tgtt_lock);
51         return tt;
52 }
53 EXPORT_SYMBOL(nvm_find_target_type);
54
55 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
56 {
57         int ret = 0;
58
59         down_write(&nvm_tgtt_lock);
60         if (nvm_find_target_type(tt->name, 0))
61                 ret = -EEXIST;
62         else
63                 list_add(&tt->list, &nvm_tgt_types);
64         up_write(&nvm_tgtt_lock);
65
66         return ret;
67 }
68 EXPORT_SYMBOL(nvm_register_tgt_type);
69
70 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
71 {
72         if (!tt)
73                 return;
74
75         down_write(&nvm_lock);
76         list_del(&tt->list);
77         up_write(&nvm_lock);
78 }
79 EXPORT_SYMBOL(nvm_unregister_tgt_type);
80
81 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
82                                                         dma_addr_t *dma_handler)
83 {
84         return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
85                                                                 dma_handler);
86 }
87 EXPORT_SYMBOL(nvm_dev_dma_alloc);
88
89 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
90                                                         dma_addr_t dma_handler)
91 {
92         dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
93 }
94 EXPORT_SYMBOL(nvm_dev_dma_free);
95
96 static struct nvmm_type *nvm_find_mgr_type(const char *name)
97 {
98         struct nvmm_type *mt;
99
100         list_for_each_entry(mt, &nvm_mgrs, list)
101                 if (!strcmp(name, mt->name))
102                         return mt;
103
104         return NULL;
105 }
106
107 static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
108 {
109         struct nvmm_type *mt;
110         int ret;
111
112         lockdep_assert_held(&nvm_lock);
113
114         list_for_each_entry(mt, &nvm_mgrs, list) {
115                 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
116                         continue;
117
118                 ret = mt->register_mgr(dev);
119                 if (ret < 0) {
120                         pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
121                                                                 ret, dev->name);
122                         return NULL; /* initialization failed */
123                 } else if (ret > 0)
124                         return mt;
125         }
126
127         return NULL;
128 }
129
130 int nvm_register_mgr(struct nvmm_type *mt)
131 {
132         struct nvm_dev *dev;
133         int ret = 0;
134
135         down_write(&nvm_lock);
136         if (nvm_find_mgr_type(mt->name)) {
137                 ret = -EEXIST;
138                 goto finish;
139         } else {
140                 list_add(&mt->list, &nvm_mgrs);
141         }
142
143         /* try to register media mgr if any device have none configured */
144         list_for_each_entry(dev, &nvm_devices, devices) {
145                 if (dev->mt)
146                         continue;
147
148                 dev->mt = nvm_init_mgr(dev);
149         }
150 finish:
151         up_write(&nvm_lock);
152
153         return ret;
154 }
155 EXPORT_SYMBOL(nvm_register_mgr);
156
157 void nvm_unregister_mgr(struct nvmm_type *mt)
158 {
159         if (!mt)
160                 return;
161
162         down_write(&nvm_lock);
163         list_del(&mt->list);
164         up_write(&nvm_lock);
165 }
166 EXPORT_SYMBOL(nvm_unregister_mgr);
167
168 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
169 {
170         struct nvm_dev *dev;
171
172         list_for_each_entry(dev, &nvm_devices, devices)
173                 if (!strcmp(name, dev->name))
174                         return dev;
175
176         return NULL;
177 }
178
179 int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
180                                                                 int type)
181 {
182         struct nvm_rq rqd;
183         int ret;
184
185         if (nr_ppas > dev->ops->max_phys_sect) {
186                 pr_err("nvm: unable to update all sysblocks atomically\n");
187                 return -EINVAL;
188         }
189
190         memset(&rqd, 0, sizeof(struct nvm_rq));
191
192         nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
193         nvm_generic_to_addr_mode(dev, &rqd);
194
195         ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
196         nvm_free_rqd_ppalist(dev, &rqd);
197         if (ret) {
198                 pr_err("nvm: sysblk failed bb mark\n");
199                 return -EINVAL;
200         }
201
202         return 0;
203 }
204 EXPORT_SYMBOL(nvm_set_bb_tbl);
205
206 int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
207 {
208         struct nvm_dev *dev = tgt_dev->parent;
209
210         return dev->ops->max_phys_sect;
211 }
212 EXPORT_SYMBOL(nvm_max_phys_sects);
213
214 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
215 {
216         struct nvm_dev *dev = tgt_dev->parent;
217
218         return dev->mt->submit_io(tgt_dev, rqd);
219 }
220 EXPORT_SYMBOL(nvm_submit_io);
221
222 int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
223 {
224         struct nvm_dev *dev = tgt_dev->parent;
225
226         return dev->mt->erase_blk(tgt_dev, p, flags);
227 }
228 EXPORT_SYMBOL(nvm_erase_blk);
229
230 int nvm_get_l2p_tbl(struct nvm_dev *dev, u64 slba, u32 nlb,
231                     nvm_l2p_update_fn *update_l2p, void *priv)
232 {
233         if (!dev->ops->get_l2p_tbl)
234                 return 0;
235
236         return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
237 }
238 EXPORT_SYMBOL(nvm_get_l2p_tbl);
239
240 int nvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
241 {
242         return dev->mt->get_area(dev, lba, len);
243 }
244 EXPORT_SYMBOL(nvm_get_area);
245
246 void nvm_put_area(struct nvm_dev *dev, sector_t lba)
247 {
248         dev->mt->put_area(dev, lba);
249 }
250 EXPORT_SYMBOL(nvm_put_area);
251
252 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
253 {
254         int i;
255
256         if (rqd->nr_ppas > 1) {
257                 for (i = 0; i < rqd->nr_ppas; i++)
258                         rqd->ppa_list[i] = dev_to_generic_addr(dev,
259                                                         rqd->ppa_list[i]);
260         } else {
261                 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
262         }
263 }
264 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
265
266 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
267 {
268         int i;
269
270         if (rqd->nr_ppas > 1) {
271                 for (i = 0; i < rqd->nr_ppas; i++)
272                         rqd->ppa_list[i] = generic_to_dev_addr(dev,
273                                                         rqd->ppa_list[i]);
274         } else {
275                 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
276         }
277 }
278 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
279
280 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
281                         const struct ppa_addr *ppas, int nr_ppas, int vblk)
282 {
283         struct nvm_geo *geo = &dev->geo;
284         int i, plane_cnt, pl_idx;
285         struct ppa_addr ppa;
286
287         if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
288                 rqd->nr_ppas = nr_ppas;
289                 rqd->ppa_addr = ppas[0];
290
291                 return 0;
292         }
293
294         rqd->nr_ppas = nr_ppas;
295         rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
296         if (!rqd->ppa_list) {
297                 pr_err("nvm: failed to allocate dma memory\n");
298                 return -ENOMEM;
299         }
300
301         if (!vblk) {
302                 for (i = 0; i < nr_ppas; i++)
303                         rqd->ppa_list[i] = ppas[i];
304         } else {
305                 plane_cnt = geo->plane_mode;
306                 rqd->nr_ppas *= plane_cnt;
307
308                 for (i = 0; i < nr_ppas; i++) {
309                         for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
310                                 ppa = ppas[i];
311                                 ppa.g.pl = pl_idx;
312                                 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
313                         }
314                 }
315         }
316
317         return 0;
318 }
319 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
320
321 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
322 {
323         if (!rqd->ppa_list)
324                 return;
325
326         nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
327 }
328 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
329
330 int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
331                                                                 int flags)
332 {
333         struct nvm_rq rqd;
334         int ret;
335
336         if (!dev->ops->erase_block)
337                 return 0;
338
339         memset(&rqd, 0, sizeof(struct nvm_rq));
340
341         ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
342         if (ret)
343                 return ret;
344
345         nvm_generic_to_addr_mode(dev, &rqd);
346
347         rqd.flags = flags;
348
349         ret = dev->ops->erase_block(dev, &rqd);
350
351         nvm_free_rqd_ppalist(dev, &rqd);
352
353         return ret;
354 }
355 EXPORT_SYMBOL(nvm_erase_ppa);
356
357 void nvm_end_io(struct nvm_rq *rqd, int error)
358 {
359         rqd->error = error;
360         rqd->end_io(rqd);
361 }
362 EXPORT_SYMBOL(nvm_end_io);
363
364 static void nvm_end_io_sync(struct nvm_rq *rqd)
365 {
366         struct completion *waiting = rqd->wait;
367
368         rqd->wait = NULL;
369
370         complete(waiting);
371 }
372
373 static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
374                                                 int flags, void *buf, int len)
375 {
376         DECLARE_COMPLETION_ONSTACK(wait);
377         struct bio *bio;
378         int ret;
379         unsigned long hang_check;
380
381         bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
382         if (IS_ERR_OR_NULL(bio))
383                 return -ENOMEM;
384
385         nvm_generic_to_addr_mode(dev, rqd);
386
387         rqd->dev = NULL;
388         rqd->opcode = opcode;
389         rqd->flags = flags;
390         rqd->bio = bio;
391         rqd->wait = &wait;
392         rqd->end_io = nvm_end_io_sync;
393
394         ret = dev->ops->submit_io(dev, rqd);
395         if (ret) {
396                 bio_put(bio);
397                 return ret;
398         }
399
400         /* Prevent hang_check timer from firing at us during very long I/O */
401         hang_check = sysctl_hung_task_timeout_secs;
402         if (hang_check)
403                 while (!wait_for_completion_io_timeout(&wait,
404                                                         hang_check * (HZ/2)))
405                         ;
406         else
407                 wait_for_completion_io(&wait);
408
409         return rqd->error;
410 }
411
412 /**
413  * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
414  *                       take to free ppa list if necessary.
415  * @dev:        device
416  * @ppa_list:   user created ppa_list
417  * @nr_ppas:    length of ppa_list
418  * @opcode:     device opcode
419  * @flags:      device flags
420  * @buf:        data buffer
421  * @len:        data buffer length
422  */
423 int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
424                         int nr_ppas, int opcode, int flags, void *buf, int len)
425 {
426         struct nvm_rq rqd;
427
428         if (dev->ops->max_phys_sect < nr_ppas)
429                 return -EINVAL;
430
431         memset(&rqd, 0, sizeof(struct nvm_rq));
432
433         rqd.nr_ppas = nr_ppas;
434         if (nr_ppas > 1)
435                 rqd.ppa_list = ppa_list;
436         else
437                 rqd.ppa_addr = ppa_list[0];
438
439         return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
440 }
441 EXPORT_SYMBOL(nvm_submit_ppa_list);
442
443 /**
444  * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
445  *                  as single, dual, quad plane PPAs depending on device type.
446  * @dev:        device
447  * @ppa:        user created ppa_list
448  * @nr_ppas:    length of ppa_list
449  * @opcode:     device opcode
450  * @flags:      device flags
451  * @buf:        data buffer
452  * @len:        data buffer length
453  */
454 int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
455                                 int opcode, int flags, void *buf, int len)
456 {
457         struct nvm_rq rqd;
458         int ret;
459
460         memset(&rqd, 0, sizeof(struct nvm_rq));
461         ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
462         if (ret)
463                 return ret;
464
465         ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
466
467         nvm_free_rqd_ppalist(dev, &rqd);
468
469         return ret;
470 }
471 EXPORT_SYMBOL(nvm_submit_ppa);
472
473 /*
474  * folds a bad block list from its plane representation to its virtual
475  * block representation. The fold is done in place and reduced size is
476  * returned.
477  *
478  * If any of the planes status are bad or grown bad block, the virtual block
479  * is marked bad. If not bad, the first plane state acts as the block state.
480  */
481 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
482 {
483         struct nvm_geo *geo = &dev->geo;
484         int blk, offset, pl, blktype;
485
486         if (nr_blks != geo->blks_per_lun * geo->plane_mode)
487                 return -EINVAL;
488
489         for (blk = 0; blk < geo->blks_per_lun; blk++) {
490                 offset = blk * geo->plane_mode;
491                 blktype = blks[offset];
492
493                 /* Bad blocks on any planes take precedence over other types */
494                 for (pl = 0; pl < geo->plane_mode; pl++) {
495                         if (blks[offset + pl] &
496                                         (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
497                                 blktype = blks[offset + pl];
498                                 break;
499                         }
500                 }
501
502                 blks[blk] = blktype;
503         }
504
505         return geo->blks_per_lun;
506 }
507 EXPORT_SYMBOL(nvm_bb_tbl_fold);
508
509 int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
510 {
511         ppa = generic_to_dev_addr(dev, ppa);
512
513         return dev->ops->get_bb_tbl(dev, ppa, blks);
514 }
515 EXPORT_SYMBOL(nvm_get_bb_tbl);
516
517 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
518 {
519         struct nvm_geo *geo = &dev->geo;
520         int i;
521
522         dev->lps_per_blk = geo->pgs_per_blk;
523         dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
524         if (!dev->lptbl)
525                 return -ENOMEM;
526
527         /* Just a linear array */
528         for (i = 0; i < dev->lps_per_blk; i++)
529                 dev->lptbl[i] = i;
530
531         return 0;
532 }
533
534 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
535 {
536         int i, p;
537         struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
538
539         if (!mlc->num_pairs)
540                 return 0;
541
542         dev->lps_per_blk = mlc->num_pairs;
543         dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
544         if (!dev->lptbl)
545                 return -ENOMEM;
546
547         /* The lower page table encoding consists of a list of bytes, where each
548          * has a lower and an upper half. The first half byte maintains the
549          * increment value and every value after is an offset added to the
550          * previous incrementation value
551          */
552         dev->lptbl[0] = mlc->pairs[0] & 0xF;
553         for (i = 1; i < dev->lps_per_blk; i++) {
554                 p = mlc->pairs[i >> 1];
555                 if (i & 0x1) /* upper */
556                         dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
557                 else /* lower */
558                         dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
559         }
560
561         return 0;
562 }
563
564 static int nvm_core_init(struct nvm_dev *dev)
565 {
566         struct nvm_id *id = &dev->identity;
567         struct nvm_id_group *grp = &id->groups[0];
568         struct nvm_geo *geo = &dev->geo;
569         int ret;
570
571         /* Whole device values */
572         geo->nr_chnls = grp->num_ch;
573         geo->luns_per_chnl = grp->num_lun;
574
575         /* Generic device values */
576         geo->pgs_per_blk = grp->num_pg;
577         geo->blks_per_lun = grp->num_blk;
578         geo->nr_planes = grp->num_pln;
579         geo->fpg_size = grp->fpg_sz;
580         geo->pfpg_size = grp->fpg_sz * grp->num_pln;
581         geo->sec_size = grp->csecs;
582         geo->oob_size = grp->sos;
583         geo->sec_per_pg = grp->fpg_sz / grp->csecs;
584         geo->mccap = grp->mccap;
585         memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
586
587         geo->plane_mode = NVM_PLANE_SINGLE;
588         geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
589
590         if (grp->mpos & 0x020202)
591                 geo->plane_mode = NVM_PLANE_DOUBLE;
592         if (grp->mpos & 0x040404)
593                 geo->plane_mode = NVM_PLANE_QUAD;
594
595         if (grp->mtype != 0) {
596                 pr_err("nvm: memory type not supported\n");
597                 return -EINVAL;
598         }
599
600         /* calculated values */
601         geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
602         geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
603         geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
604         geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
605
606         dev->total_secs = geo->nr_luns * geo->sec_per_lun;
607         dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
608                                         sizeof(unsigned long), GFP_KERNEL);
609         if (!dev->lun_map)
610                 return -ENOMEM;
611
612         switch (grp->fmtype) {
613         case NVM_ID_FMTYPE_SLC:
614                 if (nvm_init_slc_tbl(dev, grp)) {
615                         ret = -ENOMEM;
616                         goto err_fmtype;
617                 }
618                 break;
619         case NVM_ID_FMTYPE_MLC:
620                 if (nvm_init_mlc_tbl(dev, grp)) {
621                         ret = -ENOMEM;
622                         goto err_fmtype;
623                 }
624                 break;
625         default:
626                 pr_err("nvm: flash type not supported\n");
627                 ret = -EINVAL;
628                 goto err_fmtype;
629         }
630
631         mutex_init(&dev->mlock);
632         spin_lock_init(&dev->lock);
633
634         blk_queue_logical_block_size(dev->q, geo->sec_size);
635
636         return 0;
637 err_fmtype:
638         kfree(dev->lun_map);
639         return ret;
640 }
641
642 static void nvm_free_mgr(struct nvm_dev *dev)
643 {
644         if (!dev->mt)
645                 return;
646
647         dev->mt->unregister_mgr(dev);
648         dev->mt = NULL;
649 }
650
651 void nvm_free(struct nvm_dev *dev)
652 {
653         if (!dev)
654                 return;
655
656         nvm_free_mgr(dev);
657
658         if (dev->dma_pool)
659                 dev->ops->destroy_dma_pool(dev->dma_pool);
660
661         kfree(dev->lptbl);
662         kfree(dev->lun_map);
663         kfree(dev);
664 }
665
666 static int nvm_init(struct nvm_dev *dev)
667 {
668         struct nvm_geo *geo = &dev->geo;
669         int ret = -EINVAL;
670
671         if (!dev->q || !dev->ops)
672                 return ret;
673
674         if (dev->ops->identity(dev, &dev->identity)) {
675                 pr_err("nvm: device could not be identified\n");
676                 goto err;
677         }
678
679         pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
680                         dev->identity.ver_id, dev->identity.vmnt,
681                                                         dev->identity.cgrps);
682
683         if (dev->identity.ver_id != 1) {
684                 pr_err("nvm: device not supported by kernel.");
685                 goto err;
686         }
687
688         if (dev->identity.cgrps != 1) {
689                 pr_err("nvm: only one group configuration supported.");
690                 goto err;
691         }
692
693         ret = nvm_core_init(dev);
694         if (ret) {
695                 pr_err("nvm: could not initialize core structures.\n");
696                 goto err;
697         }
698
699         pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
700                         dev->name, geo->sec_per_pg, geo->nr_planes,
701                         geo->pgs_per_blk, geo->blks_per_lun,
702                         geo->nr_luns, geo->nr_chnls);
703         return 0;
704 err:
705         pr_err("nvm: failed to initialize nvm\n");
706         return ret;
707 }
708
709 struct nvm_dev *nvm_alloc_dev(int node)
710 {
711         return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
712 }
713 EXPORT_SYMBOL(nvm_alloc_dev);
714
715 int nvm_register(struct nvm_dev *dev)
716 {
717         int ret;
718
719         ret = nvm_init(dev);
720         if (ret)
721                 goto err_init;
722
723         if (dev->ops->max_phys_sect > 256) {
724                 pr_info("nvm: max sectors supported is 256.\n");
725                 ret = -EINVAL;
726                 goto err_init;
727         }
728
729         if (dev->ops->max_phys_sect > 1) {
730                 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
731                 if (!dev->dma_pool) {
732                         pr_err("nvm: could not create dma pool\n");
733                         ret = -ENOMEM;
734                         goto err_init;
735                 }
736         }
737
738         if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
739                 ret = nvm_get_sysblock(dev, &dev->sb);
740                 if (!ret)
741                         pr_err("nvm: device not initialized.\n");
742                 else if (ret < 0)
743                         pr_err("nvm: err (%d) on device initialization\n", ret);
744         }
745
746         /* register device with a supported media manager */
747         down_write(&nvm_lock);
748         if (ret > 0)
749                 dev->mt = nvm_init_mgr(dev);
750         list_add(&dev->devices, &nvm_devices);
751         up_write(&nvm_lock);
752
753         return 0;
754 err_init:
755         kfree(dev->lun_map);
756         return ret;
757 }
758 EXPORT_SYMBOL(nvm_register);
759
760 void nvm_unregister(struct nvm_dev *dev)
761 {
762         down_write(&nvm_lock);
763         list_del(&dev->devices);
764         up_write(&nvm_lock);
765
766         nvm_free(dev);
767 }
768 EXPORT_SYMBOL(nvm_unregister);
769
770 static int __nvm_configure_create(struct nvm_ioctl_create *create)
771 {
772         struct nvm_dev *dev;
773         struct nvm_ioctl_create_simple *s;
774
775         down_write(&nvm_lock);
776         dev = nvm_find_nvm_dev(create->dev);
777         up_write(&nvm_lock);
778
779         if (!dev) {
780                 pr_err("nvm: device not found\n");
781                 return -EINVAL;
782         }
783
784         if (!dev->mt) {
785                 pr_info("nvm: device has no media manager registered.\n");
786                 return -ENODEV;
787         }
788
789         if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
790                 pr_err("nvm: config type not valid\n");
791                 return -EINVAL;
792         }
793         s = &create->conf.s;
794
795         if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
796                 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
797                         s->lun_begin, s->lun_end, dev->geo.nr_luns);
798                 return -EINVAL;
799         }
800
801         return dev->mt->create_tgt(dev, create);
802 }
803
804 static long nvm_ioctl_info(struct file *file, void __user *arg)
805 {
806         struct nvm_ioctl_info *info;
807         struct nvm_tgt_type *tt;
808         int tgt_iter = 0;
809
810         if (!capable(CAP_SYS_ADMIN))
811                 return -EPERM;
812
813         info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
814         if (IS_ERR(info))
815                 return -EFAULT;
816
817         info->version[0] = NVM_VERSION_MAJOR;
818         info->version[1] = NVM_VERSION_MINOR;
819         info->version[2] = NVM_VERSION_PATCH;
820
821         down_write(&nvm_lock);
822         list_for_each_entry(tt, &nvm_tgt_types, list) {
823                 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
824
825                 tgt->version[0] = tt->version[0];
826                 tgt->version[1] = tt->version[1];
827                 tgt->version[2] = tt->version[2];
828                 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
829
830                 tgt_iter++;
831         }
832
833         info->tgtsize = tgt_iter;
834         up_write(&nvm_lock);
835
836         if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
837                 kfree(info);
838                 return -EFAULT;
839         }
840
841         kfree(info);
842         return 0;
843 }
844
845 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
846 {
847         struct nvm_ioctl_get_devices *devices;
848         struct nvm_dev *dev;
849         int i = 0;
850
851         if (!capable(CAP_SYS_ADMIN))
852                 return -EPERM;
853
854         devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
855         if (!devices)
856                 return -ENOMEM;
857
858         down_write(&nvm_lock);
859         list_for_each_entry(dev, &nvm_devices, devices) {
860                 struct nvm_ioctl_device_info *info = &devices->info[i];
861
862                 sprintf(info->devname, "%s", dev->name);
863                 if (dev->mt) {
864                         info->bmversion[0] = dev->mt->version[0];
865                         info->bmversion[1] = dev->mt->version[1];
866                         info->bmversion[2] = dev->mt->version[2];
867                         sprintf(info->bmname, "%s", dev->mt->name);
868                 } else {
869                         sprintf(info->bmname, "none");
870                 }
871
872                 i++;
873                 if (i > 31) {
874                         pr_err("nvm: max 31 devices can be reported.\n");
875                         break;
876                 }
877         }
878         up_write(&nvm_lock);
879
880         devices->nr_devices = i;
881
882         if (copy_to_user(arg, devices,
883                          sizeof(struct nvm_ioctl_get_devices))) {
884                 kfree(devices);
885                 return -EFAULT;
886         }
887
888         kfree(devices);
889         return 0;
890 }
891
892 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
893 {
894         struct nvm_ioctl_create create;
895
896         if (!capable(CAP_SYS_ADMIN))
897                 return -EPERM;
898
899         if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
900                 return -EFAULT;
901
902         create.dev[DISK_NAME_LEN - 1] = '\0';
903         create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
904         create.tgtname[DISK_NAME_LEN - 1] = '\0';
905
906         if (create.flags != 0) {
907                 pr_err("nvm: no flags supported\n");
908                 return -EINVAL;
909         }
910
911         return __nvm_configure_create(&create);
912 }
913
914 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
915 {
916         struct nvm_ioctl_remove remove;
917         struct nvm_dev *dev;
918         int ret = 0;
919
920         if (!capable(CAP_SYS_ADMIN))
921                 return -EPERM;
922
923         if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
924                 return -EFAULT;
925
926         remove.tgtname[DISK_NAME_LEN - 1] = '\0';
927
928         if (remove.flags != 0) {
929                 pr_err("nvm: no flags supported\n");
930                 return -EINVAL;
931         }
932
933         list_for_each_entry(dev, &nvm_devices, devices) {
934                 ret = dev->mt->remove_tgt(dev, &remove);
935                 if (!ret)
936                         break;
937         }
938
939         return ret;
940 }
941
942 static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
943 {
944         info->seqnr = 1;
945         info->erase_cnt = 0;
946         info->version = 1;
947 }
948
949 static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
950 {
951         struct nvm_dev *dev;
952         struct nvm_sb_info info;
953         int ret;
954
955         down_write(&nvm_lock);
956         dev = nvm_find_nvm_dev(init->dev);
957         up_write(&nvm_lock);
958         if (!dev) {
959                 pr_err("nvm: device not found\n");
960                 return -EINVAL;
961         }
962
963         nvm_setup_nvm_sb_info(&info);
964
965         strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
966         info.fs_ppa.ppa = -1;
967
968         if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
969                 ret = nvm_init_sysblock(dev, &info);
970                 if (ret)
971                         return ret;
972         }
973
974         memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
975
976         down_write(&nvm_lock);
977         dev->mt = nvm_init_mgr(dev);
978         up_write(&nvm_lock);
979
980         return 0;
981 }
982
983 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
984 {
985         struct nvm_ioctl_dev_init init;
986
987         if (!capable(CAP_SYS_ADMIN))
988                 return -EPERM;
989
990         if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
991                 return -EFAULT;
992
993         if (init.flags != 0) {
994                 pr_err("nvm: no flags supported\n");
995                 return -EINVAL;
996         }
997
998         init.dev[DISK_NAME_LEN - 1] = '\0';
999
1000         return __nvm_ioctl_dev_init(&init);
1001 }
1002
1003 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1004 {
1005         struct nvm_ioctl_dev_factory fact;
1006         struct nvm_dev *dev;
1007
1008         if (!capable(CAP_SYS_ADMIN))
1009                 return -EPERM;
1010
1011         if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1012                 return -EFAULT;
1013
1014         fact.dev[DISK_NAME_LEN - 1] = '\0';
1015
1016         if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1017                 return -EINVAL;
1018
1019         down_write(&nvm_lock);
1020         dev = nvm_find_nvm_dev(fact.dev);
1021         up_write(&nvm_lock);
1022         if (!dev) {
1023                 pr_err("nvm: device not found\n");
1024                 return -EINVAL;
1025         }
1026
1027         nvm_free_mgr(dev);
1028
1029         if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1030                 return nvm_dev_factory(dev, fact.flags);
1031
1032         return 0;
1033 }
1034
1035 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1036 {
1037         void __user *argp = (void __user *)arg;
1038
1039         switch (cmd) {
1040         case NVM_INFO:
1041                 return nvm_ioctl_info(file, argp);
1042         case NVM_GET_DEVICES:
1043                 return nvm_ioctl_get_devices(file, argp);
1044         case NVM_DEV_CREATE:
1045                 return nvm_ioctl_dev_create(file, argp);
1046         case NVM_DEV_REMOVE:
1047                 return nvm_ioctl_dev_remove(file, argp);
1048         case NVM_DEV_INIT:
1049                 return nvm_ioctl_dev_init(file, argp);
1050         case NVM_DEV_FACTORY:
1051                 return nvm_ioctl_dev_factory(file, argp);
1052         }
1053         return 0;
1054 }
1055
1056 static const struct file_operations _ctl_fops = {
1057         .open = nonseekable_open,
1058         .unlocked_ioctl = nvm_ctl_ioctl,
1059         .owner = THIS_MODULE,
1060         .llseek  = noop_llseek,
1061 };
1062
1063 static struct miscdevice _nvm_misc = {
1064         .minor          = MISC_DYNAMIC_MINOR,
1065         .name           = "lightnvm",
1066         .nodename       = "lightnvm/control",
1067         .fops           = &_ctl_fops,
1068 };
1069 module_misc_device(_nvm_misc);
1070
1071 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
1072
1073 MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1074 MODULE_LICENSE("GPL v2");
1075 MODULE_VERSION("0.1");