]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/nvdimm/dimm_devs.c
fm10k: bump driver version to match out-of-tree release
[linux.git] / drivers / nvdimm / dimm_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include "nd-core.h"
22 #include "label.h"
23 #include "pmem.h"
24 #include "nd.h"
25
26 static DEFINE_IDA(dimm_ida);
27
28 /*
29  * Retrieve bus and dimm handle and return if this bus supports
30  * get_config_data commands
31  */
32 int nvdimm_check_config_data(struct device *dev)
33 {
34         struct nvdimm *nvdimm = to_nvdimm(dev);
35
36         if (!nvdimm->cmd_mask ||
37             !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
38                 if (test_bit(NDD_ALIASING, &nvdimm->flags))
39                         return -ENXIO;
40                 else
41                         return -ENOTTY;
42         }
43
44         return 0;
45 }
46
47 static int validate_dimm(struct nvdimm_drvdata *ndd)
48 {
49         int rc;
50
51         if (!ndd)
52                 return -EINVAL;
53
54         rc = nvdimm_check_config_data(ndd->dev);
55         if (rc)
56                 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
57                                 __builtin_return_address(0), __func__, rc);
58         return rc;
59 }
60
61 /**
62  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
63  * @nvdimm: dimm to initialize
64  */
65 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
66 {
67         struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
68         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
69         struct nvdimm_bus_descriptor *nd_desc;
70         int rc = validate_dimm(ndd);
71         int cmd_rc = 0;
72
73         if (rc)
74                 return rc;
75
76         if (cmd->config_size)
77                 return 0; /* already valid */
78
79         memset(cmd, 0, sizeof(*cmd));
80         nd_desc = nvdimm_bus->nd_desc;
81         rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
82                         ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
83         if (rc < 0)
84                 return rc;
85         return cmd_rc;
86 }
87
88 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
89                            size_t offset, size_t len)
90 {
91         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
92         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
93         int rc = validate_dimm(ndd), cmd_rc = 0;
94         struct nd_cmd_get_config_data_hdr *cmd;
95         size_t max_cmd_size, buf_offset;
96
97         if (rc)
98                 return rc;
99
100         if (offset + len > ndd->nsarea.config_size)
101                 return -ENXIO;
102
103         max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
104         cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
105         if (!cmd)
106                 return -ENOMEM;
107
108         for (buf_offset = 0; len;
109              len -= cmd->in_length, buf_offset += cmd->in_length) {
110                 size_t cmd_size;
111
112                 cmd->in_offset = offset + buf_offset;
113                 cmd->in_length = min(max_cmd_size, len);
114
115                 cmd_size = sizeof(*cmd) + cmd->in_length;
116
117                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
118                                 ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
119                 if (rc < 0)
120                         break;
121                 if (cmd_rc < 0) {
122                         rc = cmd_rc;
123                         break;
124                 }
125
126                 /* out_buf should be valid, copy it into our output buffer */
127                 memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
128         }
129         kvfree(cmd);
130
131         return rc;
132 }
133
134 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
135                 void *buf, size_t len)
136 {
137         size_t max_cmd_size, buf_offset;
138         struct nd_cmd_set_config_hdr *cmd;
139         int rc = validate_dimm(ndd), cmd_rc = 0;
140         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
141         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
142
143         if (rc)
144                 return rc;
145
146         if (offset + len > ndd->nsarea.config_size)
147                 return -ENXIO;
148
149         max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
150         cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
151         if (!cmd)
152                 return -ENOMEM;
153
154         for (buf_offset = 0; len; len -= cmd->in_length,
155                         buf_offset += cmd->in_length) {
156                 size_t cmd_size;
157
158                 cmd->in_offset = offset + buf_offset;
159                 cmd->in_length = min(max_cmd_size, len);
160                 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
161
162                 /* status is output in the last 4-bytes of the command buffer */
163                 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
164
165                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
166                                 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
167                 if (rc < 0)
168                         break;
169                 if (cmd_rc < 0) {
170                         rc = cmd_rc;
171                         break;
172                 }
173         }
174         kvfree(cmd);
175
176         return rc;
177 }
178
179 void nvdimm_set_aliasing(struct device *dev)
180 {
181         struct nvdimm *nvdimm = to_nvdimm(dev);
182
183         set_bit(NDD_ALIASING, &nvdimm->flags);
184 }
185
186 void nvdimm_set_locked(struct device *dev)
187 {
188         struct nvdimm *nvdimm = to_nvdimm(dev);
189
190         set_bit(NDD_LOCKED, &nvdimm->flags);
191 }
192
193 void nvdimm_clear_locked(struct device *dev)
194 {
195         struct nvdimm *nvdimm = to_nvdimm(dev);
196
197         clear_bit(NDD_LOCKED, &nvdimm->flags);
198 }
199
200 static void nvdimm_release(struct device *dev)
201 {
202         struct nvdimm *nvdimm = to_nvdimm(dev);
203
204         ida_simple_remove(&dimm_ida, nvdimm->id);
205         kfree(nvdimm);
206 }
207
208 static struct device_type nvdimm_device_type = {
209         .name = "nvdimm",
210         .release = nvdimm_release,
211 };
212
213 bool is_nvdimm(struct device *dev)
214 {
215         return dev->type == &nvdimm_device_type;
216 }
217
218 struct nvdimm *to_nvdimm(struct device *dev)
219 {
220         struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
221
222         WARN_ON(!is_nvdimm(dev));
223         return nvdimm;
224 }
225 EXPORT_SYMBOL_GPL(to_nvdimm);
226
227 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
228 {
229         struct nd_region *nd_region = &ndbr->nd_region;
230         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
231
232         return nd_mapping->nvdimm;
233 }
234 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
235
236 unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
237 {
238         /* pmem mapping properties are private to libnvdimm */
239         return ARCH_MEMREMAP_PMEM;
240 }
241 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
242
243 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
244 {
245         struct nvdimm *nvdimm = nd_mapping->nvdimm;
246
247         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
248
249         return dev_get_drvdata(&nvdimm->dev);
250 }
251 EXPORT_SYMBOL(to_ndd);
252
253 void nvdimm_drvdata_release(struct kref *kref)
254 {
255         struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
256         struct device *dev = ndd->dev;
257         struct resource *res, *_r;
258
259         dev_dbg(dev, "trace\n");
260         nvdimm_bus_lock(dev);
261         for_each_dpa_resource_safe(ndd, res, _r)
262                 nvdimm_free_dpa(ndd, res);
263         nvdimm_bus_unlock(dev);
264
265         kvfree(ndd->data);
266         kfree(ndd);
267         put_device(dev);
268 }
269
270 void get_ndd(struct nvdimm_drvdata *ndd)
271 {
272         kref_get(&ndd->kref);
273 }
274
275 void put_ndd(struct nvdimm_drvdata *ndd)
276 {
277         if (ndd)
278                 kref_put(&ndd->kref, nvdimm_drvdata_release);
279 }
280
281 const char *nvdimm_name(struct nvdimm *nvdimm)
282 {
283         return dev_name(&nvdimm->dev);
284 }
285 EXPORT_SYMBOL_GPL(nvdimm_name);
286
287 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
288 {
289         return &nvdimm->dev.kobj;
290 }
291 EXPORT_SYMBOL_GPL(nvdimm_kobj);
292
293 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
294 {
295         return nvdimm->cmd_mask;
296 }
297 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
298
299 void *nvdimm_provider_data(struct nvdimm *nvdimm)
300 {
301         if (nvdimm)
302                 return nvdimm->provider_data;
303         return NULL;
304 }
305 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
306
307 static ssize_t commands_show(struct device *dev,
308                 struct device_attribute *attr, char *buf)
309 {
310         struct nvdimm *nvdimm = to_nvdimm(dev);
311         int cmd, len = 0;
312
313         if (!nvdimm->cmd_mask)
314                 return sprintf(buf, "\n");
315
316         for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
317                 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
318         len += sprintf(buf + len, "\n");
319         return len;
320 }
321 static DEVICE_ATTR_RO(commands);
322
323 static ssize_t flags_show(struct device *dev,
324                 struct device_attribute *attr, char *buf)
325 {
326         struct nvdimm *nvdimm = to_nvdimm(dev);
327
328         return sprintf(buf, "%s%s\n",
329                         test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
330                         test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
331 }
332 static DEVICE_ATTR_RO(flags);
333
334 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
335                 char *buf)
336 {
337         struct nvdimm *nvdimm = to_nvdimm(dev);
338
339         /*
340          * The state may be in the process of changing, userspace should
341          * quiesce probing if it wants a static answer
342          */
343         nvdimm_bus_lock(dev);
344         nvdimm_bus_unlock(dev);
345         return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
346                         ? "active" : "idle");
347 }
348 static DEVICE_ATTR_RO(state);
349
350 static ssize_t available_slots_show(struct device *dev,
351                 struct device_attribute *attr, char *buf)
352 {
353         struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
354         ssize_t rc;
355         u32 nfree;
356
357         if (!ndd)
358                 return -ENXIO;
359
360         nvdimm_bus_lock(dev);
361         nfree = nd_label_nfree(ndd);
362         if (nfree - 1 > nfree) {
363                 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
364                 nfree = 0;
365         } else
366                 nfree--;
367         rc = sprintf(buf, "%d\n", nfree);
368         nvdimm_bus_unlock(dev);
369         return rc;
370 }
371 static DEVICE_ATTR_RO(available_slots);
372
373 static struct attribute *nvdimm_attributes[] = {
374         &dev_attr_state.attr,
375         &dev_attr_flags.attr,
376         &dev_attr_commands.attr,
377         &dev_attr_available_slots.attr,
378         NULL,
379 };
380
381 struct attribute_group nvdimm_attribute_group = {
382         .attrs = nvdimm_attributes,
383 };
384 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
385
386 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
387                 const struct attribute_group **groups, unsigned long flags,
388                 unsigned long cmd_mask, int num_flush,
389                 struct resource *flush_wpq)
390 {
391         struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
392         struct device *dev;
393
394         if (!nvdimm)
395                 return NULL;
396
397         nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
398         if (nvdimm->id < 0) {
399                 kfree(nvdimm);
400                 return NULL;
401         }
402         nvdimm->provider_data = provider_data;
403         nvdimm->flags = flags;
404         nvdimm->cmd_mask = cmd_mask;
405         nvdimm->num_flush = num_flush;
406         nvdimm->flush_wpq = flush_wpq;
407         atomic_set(&nvdimm->busy, 0);
408         dev = &nvdimm->dev;
409         dev_set_name(dev, "nmem%d", nvdimm->id);
410         dev->parent = &nvdimm_bus->dev;
411         dev->type = &nvdimm_device_type;
412         dev->devt = MKDEV(nvdimm_major, nvdimm->id);
413         dev->groups = groups;
414         nd_device_register(dev);
415
416         return nvdimm;
417 }
418 EXPORT_SYMBOL_GPL(nvdimm_create);
419
420 int alias_dpa_busy(struct device *dev, void *data)
421 {
422         resource_size_t map_end, blk_start, new;
423         struct blk_alloc_info *info = data;
424         struct nd_mapping *nd_mapping;
425         struct nd_region *nd_region;
426         struct nvdimm_drvdata *ndd;
427         struct resource *res;
428         int i;
429
430         if (!is_memory(dev))
431                 return 0;
432
433         nd_region = to_nd_region(dev);
434         for (i = 0; i < nd_region->ndr_mappings; i++) {
435                 nd_mapping  = &nd_region->mapping[i];
436                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
437                         break;
438         }
439
440         if (i >= nd_region->ndr_mappings)
441                 return 0;
442
443         ndd = to_ndd(nd_mapping);
444         map_end = nd_mapping->start + nd_mapping->size - 1;
445         blk_start = nd_mapping->start;
446
447         /*
448          * In the allocation case ->res is set to free space that we are
449          * looking to validate against PMEM aliasing collision rules
450          * (i.e. BLK is allocated after all aliased PMEM).
451          */
452         if (info->res) {
453                 if (info->res->start >= nd_mapping->start
454                                 && info->res->start < map_end)
455                         /* pass */;
456                 else
457                         return 0;
458         }
459
460  retry:
461         /*
462          * Find the free dpa from the end of the last pmem allocation to
463          * the end of the interleave-set mapping.
464          */
465         for_each_dpa_resource(ndd, res) {
466                 if (strncmp(res->name, "pmem", 4) != 0)
467                         continue;
468                 if ((res->start >= blk_start && res->start < map_end)
469                                 || (res->end >= blk_start
470                                         && res->end <= map_end)) {
471                         new = max(blk_start, min(map_end + 1, res->end + 1));
472                         if (new != blk_start) {
473                                 blk_start = new;
474                                 goto retry;
475                         }
476                 }
477         }
478
479         /* update the free space range with the probed blk_start */
480         if (info->res && blk_start > info->res->start) {
481                 info->res->start = max(info->res->start, blk_start);
482                 if (info->res->start > info->res->end)
483                         info->res->end = info->res->start - 1;
484                 return 1;
485         }
486
487         info->available -= blk_start - nd_mapping->start;
488
489         return 0;
490 }
491
492 /**
493  * nd_blk_available_dpa - account the unused dpa of BLK region
494  * @nd_mapping: container of dpa-resource-root + labels
495  *
496  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
497  * we arrange for them to never start at an lower dpa than the last
498  * PMEM allocation in an aliased region.
499  */
500 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
501 {
502         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
503         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
504         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
505         struct blk_alloc_info info = {
506                 .nd_mapping = nd_mapping,
507                 .available = nd_mapping->size,
508                 .res = NULL,
509         };
510         struct resource *res;
511
512         if (!ndd)
513                 return 0;
514
515         device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
516
517         /* now account for busy blk allocations in unaliased dpa */
518         for_each_dpa_resource(ndd, res) {
519                 if (strncmp(res->name, "blk", 3) != 0)
520                         continue;
521                 info.available -= resource_size(res);
522         }
523
524         return info.available;
525 }
526
527 /**
528  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
529  *                         contiguous unallocated dpa range.
530  * @nd_region: constrain available space check to this reference region
531  * @nd_mapping: container of dpa-resource-root + labels
532  */
533 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
534                                            struct nd_mapping *nd_mapping)
535 {
536         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
537         struct nvdimm_bus *nvdimm_bus;
538         resource_size_t max = 0;
539         struct resource *res;
540
541         /* if a dimm is disabled the available capacity is zero */
542         if (!ndd)
543                 return 0;
544
545         nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
546         if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
547                 return 0;
548         for_each_dpa_resource(ndd, res) {
549                 if (strcmp(res->name, "pmem-reserve") != 0)
550                         continue;
551                 if (resource_size(res) > max)
552                         max = resource_size(res);
553         }
554         release_free_pmem(nvdimm_bus, nd_mapping);
555         return max;
556 }
557
558 /**
559  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
560  * @nd_mapping: container of dpa-resource-root + labels
561  * @nd_region: constrain available space check to this reference region
562  * @overlap: calculate available space assuming this level of overlap
563  *
564  * Validate that a PMEM label, if present, aligns with the start of an
565  * interleave set and truncate the available size at the lowest BLK
566  * overlap point.
567  *
568  * The expectation is that this routine is called multiple times as it
569  * probes for the largest BLK encroachment for any single member DIMM of
570  * the interleave set.  Once that value is determined the PMEM-limit for
571  * the set can be established.
572  */
573 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
574                 struct nd_mapping *nd_mapping, resource_size_t *overlap)
575 {
576         resource_size_t map_start, map_end, busy = 0, available, blk_start;
577         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
578         struct resource *res;
579         const char *reason;
580
581         if (!ndd)
582                 return 0;
583
584         map_start = nd_mapping->start;
585         map_end = map_start + nd_mapping->size - 1;
586         blk_start = max(map_start, map_end + 1 - *overlap);
587         for_each_dpa_resource(ndd, res) {
588                 if (res->start >= map_start && res->start < map_end) {
589                         if (strncmp(res->name, "blk", 3) == 0)
590                                 blk_start = min(blk_start,
591                                                 max(map_start, res->start));
592                         else if (res->end > map_end) {
593                                 reason = "misaligned to iset";
594                                 goto err;
595                         } else
596                                 busy += resource_size(res);
597                 } else if (res->end >= map_start && res->end <= map_end) {
598                         if (strncmp(res->name, "blk", 3) == 0) {
599                                 /*
600                                  * If a BLK allocation overlaps the start of
601                                  * PMEM the entire interleave set may now only
602                                  * be used for BLK.
603                                  */
604                                 blk_start = map_start;
605                         } else
606                                 busy += resource_size(res);
607                 } else if (map_start > res->start && map_start < res->end) {
608                         /* total eclipse of the mapping */
609                         busy += nd_mapping->size;
610                         blk_start = map_start;
611                 }
612         }
613
614         *overlap = map_end + 1 - blk_start;
615         available = blk_start - map_start;
616         if (busy < available)
617                 return available - busy;
618         return 0;
619
620  err:
621         nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
622         return 0;
623 }
624
625 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
626 {
627         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
628         kfree(res->name);
629         __release_region(&ndd->dpa, res->start, resource_size(res));
630 }
631
632 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
633                 struct nd_label_id *label_id, resource_size_t start,
634                 resource_size_t n)
635 {
636         char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
637         struct resource *res;
638
639         if (!name)
640                 return NULL;
641
642         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
643         res = __request_region(&ndd->dpa, start, n, name, 0);
644         if (!res)
645                 kfree(name);
646         return res;
647 }
648
649 /**
650  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
651  * @nvdimm: container of dpa-resource-root + labels
652  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
653  */
654 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
655                 struct nd_label_id *label_id)
656 {
657         resource_size_t allocated = 0;
658         struct resource *res;
659
660         for_each_dpa_resource(ndd, res)
661                 if (strcmp(res->name, label_id->id) == 0)
662                         allocated += resource_size(res);
663
664         return allocated;
665 }
666
667 static int count_dimms(struct device *dev, void *c)
668 {
669         int *count = c;
670
671         if (is_nvdimm(dev))
672                 (*count)++;
673         return 0;
674 }
675
676 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
677 {
678         int count = 0;
679         /* Flush any possible dimm registration failures */
680         nd_synchronize();
681
682         device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
683         dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
684         if (count != dimm_count)
685                 return -ENXIO;
686         return 0;
687 }
688 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
689
690 void __exit nvdimm_devs_exit(void)
691 {
692         ida_destroy(&dimm_ida);
693 }