1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/libnvdimm.h>
7 #include <linux/sched/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/blkdev.h>
12 #include <linux/fcntl.h>
13 #include <linux/async.h>
14 #include <linux/genhd.h>
15 #include <linux/ndctl.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
28 static int nvdimm_bus_major;
29 static struct class *nd_class;
30 static DEFINE_IDA(nd_ida);
32 static int to_nd_device_type(struct device *dev)
35 return ND_DEVICE_DIMM;
36 else if (is_memory(dev))
37 return ND_DEVICE_REGION_PMEM;
38 else if (is_nd_blk(dev))
39 return ND_DEVICE_REGION_BLK;
40 else if (is_nd_dax(dev))
41 return ND_DEVICE_DAX_PMEM;
42 else if (is_nd_region(dev->parent))
43 return nd_region_to_nstype(to_nd_region(dev->parent));
48 static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
50 return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
51 to_nd_device_type(dev));
54 static struct module *to_bus_provider(struct device *dev)
56 /* pin bus providers while regions are enabled */
57 if (is_nd_region(dev)) {
58 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
60 return nvdimm_bus->nd_desc->module;
65 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
67 nvdimm_bus_lock(&nvdimm_bus->dev);
68 nvdimm_bus->probe_active++;
69 nvdimm_bus_unlock(&nvdimm_bus->dev);
72 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
74 nvdimm_bus_lock(&nvdimm_bus->dev);
75 if (--nvdimm_bus->probe_active == 0)
76 wake_up(&nvdimm_bus->probe_wait);
77 nvdimm_bus_unlock(&nvdimm_bus->dev);
80 static int nvdimm_bus_probe(struct device *dev)
82 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
83 struct module *provider = to_bus_provider(dev);
84 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
87 if (!try_module_get(provider))
90 dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
91 dev->driver->name, dev_name(dev));
93 nvdimm_bus_probe_start(nvdimm_bus);
94 rc = nd_drv->probe(dev);
96 nd_region_probe_success(nvdimm_bus, dev);
98 nd_region_disable(nvdimm_bus, dev);
99 nvdimm_bus_probe_end(nvdimm_bus);
101 dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
105 module_put(provider);
109 static int nvdimm_bus_remove(struct device *dev)
111 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
112 struct module *provider = to_bus_provider(dev);
113 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
117 rc = nd_drv->remove(dev);
118 nd_region_disable(nvdimm_bus, dev);
120 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
122 module_put(provider);
126 static void nvdimm_bus_shutdown(struct device *dev)
128 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
129 struct nd_device_driver *nd_drv = NULL;
132 nd_drv = to_nd_device_driver(dev->driver);
134 if (nd_drv && nd_drv->shutdown) {
135 nd_drv->shutdown(dev);
136 dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
137 dev->driver->name, dev_name(dev));
141 void nd_device_notify(struct device *dev, enum nvdimm_event event)
145 struct nd_device_driver *nd_drv;
147 nd_drv = to_nd_device_driver(dev->driver);
149 nd_drv->notify(dev, event);
153 EXPORT_SYMBOL(nd_device_notify);
155 void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
157 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
162 /* caller is responsible for holding a reference on the device */
163 nd_device_notify(&nd_region->dev, event);
165 EXPORT_SYMBOL_GPL(nvdimm_region_notify);
167 struct clear_badblocks_context {
168 resource_size_t phys, cleared;
171 static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
173 struct clear_badblocks_context *ctx = data;
174 struct nd_region *nd_region;
175 resource_size_t ndr_end;
178 /* make sure device is a region */
179 if (!is_nd_pmem(dev))
182 nd_region = to_nd_region(dev);
183 ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
185 /* make sure we are in the region */
186 if (ctx->phys < nd_region->ndr_start
187 || (ctx->phys + ctx->cleared) > ndr_end)
190 sector = (ctx->phys - nd_region->ndr_start) / 512;
191 badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
193 if (nd_region->bb_state)
194 sysfs_notify_dirent(nd_region->bb_state);
199 static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
200 phys_addr_t phys, u64 cleared)
202 struct clear_badblocks_context ctx = {
207 device_for_each_child(&nvdimm_bus->dev, &ctx,
208 nvdimm_clear_badblocks_region);
211 static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
212 phys_addr_t phys, u64 cleared)
215 badrange_forget(&nvdimm_bus->badrange, phys, cleared);
217 if (cleared > 0 && cleared / 512)
218 nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
221 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
224 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
225 struct nvdimm_bus_descriptor *nd_desc;
226 struct nd_cmd_clear_error clear_err;
227 struct nd_cmd_ars_cap ars_cap;
228 u32 clear_err_unit, mask;
229 unsigned int noio_flag;
235 nd_desc = nvdimm_bus->nd_desc;
237 * if ndctl does not exist, it's PMEM_LEGACY and
238 * we want to just pretend everything is handled.
243 memset(&ars_cap, 0, sizeof(ars_cap));
244 ars_cap.address = phys;
245 ars_cap.length = len;
246 noio_flag = memalloc_noio_save();
247 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
248 sizeof(ars_cap), &cmd_rc);
249 memalloc_noio_restore(noio_flag);
254 clear_err_unit = ars_cap.clear_err_unit;
255 if (!clear_err_unit || !is_power_of_2(clear_err_unit))
258 mask = clear_err_unit - 1;
259 if ((phys | len) & mask)
261 memset(&clear_err, 0, sizeof(clear_err));
262 clear_err.address = phys;
263 clear_err.length = len;
264 noio_flag = memalloc_noio_save();
265 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
266 sizeof(clear_err), &cmd_rc);
267 memalloc_noio_restore(noio_flag);
273 nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
275 return clear_err.cleared;
277 EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
279 static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
281 static struct bus_type nvdimm_bus_type = {
283 .uevent = nvdimm_bus_uevent,
284 .match = nvdimm_bus_match,
285 .probe = nvdimm_bus_probe,
286 .remove = nvdimm_bus_remove,
287 .shutdown = nvdimm_bus_shutdown,
290 static void nvdimm_bus_release(struct device *dev)
292 struct nvdimm_bus *nvdimm_bus;
294 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
295 ida_simple_remove(&nd_ida, nvdimm_bus->id);
299 static bool is_nvdimm_bus(struct device *dev)
301 return dev->release == nvdimm_bus_release;
304 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
308 for (dev = nd_dev; dev; dev = dev->parent)
309 if (is_nvdimm_bus(dev))
311 dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
313 return to_nvdimm_bus(dev);
317 struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
319 struct nvdimm_bus *nvdimm_bus;
321 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
322 WARN_ON(!is_nvdimm_bus(dev));
325 EXPORT_SYMBOL_GPL(to_nvdimm_bus);
327 struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
329 return to_nvdimm_bus(nvdimm->dev.parent);
331 EXPORT_SYMBOL_GPL(nvdimm_to_bus);
333 struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
334 struct nvdimm_bus_descriptor *nd_desc)
336 struct nvdimm_bus *nvdimm_bus;
339 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
342 INIT_LIST_HEAD(&nvdimm_bus->list);
343 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
344 init_waitqueue_head(&nvdimm_bus->probe_wait);
345 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
346 if (nvdimm_bus->id < 0) {
350 mutex_init(&nvdimm_bus->reconfig_mutex);
351 badrange_init(&nvdimm_bus->badrange);
352 nvdimm_bus->nd_desc = nd_desc;
353 nvdimm_bus->dev.parent = parent;
354 nvdimm_bus->dev.release = nvdimm_bus_release;
355 nvdimm_bus->dev.groups = nd_desc->attr_groups;
356 nvdimm_bus->dev.bus = &nvdimm_bus_type;
357 nvdimm_bus->dev.of_node = nd_desc->of_node;
358 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
359 rc = device_register(&nvdimm_bus->dev);
361 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
367 put_device(&nvdimm_bus->dev);
370 EXPORT_SYMBOL_GPL(nvdimm_bus_register);
372 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
376 device_unregister(&nvdimm_bus->dev);
378 EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
380 static int child_unregister(struct device *dev, void *data)
383 * the singular ndctl class device per bus needs to be
384 * "device_destroy"ed, so skip it here
386 * i.e. remove classless children
391 if (is_nvdimm(dev)) {
392 struct nvdimm *nvdimm = to_nvdimm(dev);
393 bool dev_put = false;
395 /* We are shutting down. Make state frozen artificially. */
396 nvdimm_bus_lock(dev);
397 nvdimm->sec.state = NVDIMM_SECURITY_FROZEN;
398 if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
400 nvdimm_bus_unlock(dev);
401 cancel_delayed_work_sync(&nvdimm->dwork);
405 nd_device_unregister(dev, ND_SYNC);
410 static void free_badrange_list(struct list_head *badrange_list)
412 struct badrange_entry *bre, *next;
414 list_for_each_entry_safe(bre, next, badrange_list, list) {
415 list_del(&bre->list);
418 list_del_init(badrange_list);
421 static int nd_bus_remove(struct device *dev)
423 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
425 mutex_lock(&nvdimm_bus_list_mutex);
426 list_del_init(&nvdimm_bus->list);
427 mutex_unlock(&nvdimm_bus_list_mutex);
430 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
432 spin_lock(&nvdimm_bus->badrange.lock);
433 free_badrange_list(&nvdimm_bus->badrange.list);
434 spin_unlock(&nvdimm_bus->badrange.lock);
436 nvdimm_bus_destroy_ndctl(nvdimm_bus);
441 static int nd_bus_probe(struct device *dev)
443 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
446 rc = nvdimm_bus_create_ndctl(nvdimm_bus);
450 mutex_lock(&nvdimm_bus_list_mutex);
451 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
452 mutex_unlock(&nvdimm_bus_list_mutex);
454 /* enable bus provider attributes to look up their local context */
455 dev_set_drvdata(dev, nvdimm_bus->nd_desc);
460 static struct nd_device_driver nd_bus_driver = {
461 .probe = nd_bus_probe,
462 .remove = nd_bus_remove,
465 .suppress_bind_attrs = true,
466 .bus = &nvdimm_bus_type,
467 .owner = THIS_MODULE,
468 .mod_name = KBUILD_MODNAME,
472 static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
474 struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
476 if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
479 return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
482 static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
484 void nd_synchronize(void)
486 async_synchronize_full_domain(&nd_async_domain);
488 EXPORT_SYMBOL_GPL(nd_synchronize);
490 static void nd_async_device_register(void *d, async_cookie_t cookie)
492 struct device *dev = d;
494 if (device_add(dev) != 0) {
495 dev_err(dev, "%s: failed\n", __func__);
500 put_device(dev->parent);
503 static void nd_async_device_unregister(void *d, async_cookie_t cookie)
505 struct device *dev = d;
507 /* flush bus operations before delete */
508 nvdimm_bus_lock(dev);
509 nvdimm_bus_unlock(dev);
511 device_unregister(dev);
515 void __nd_device_register(struct device *dev)
521 * Ensure that region devices always have their NUMA node set as
522 * early as possible. This way we are able to make certain that
523 * any memory associated with the creation and the creation
524 * itself of the region is associated with the correct node.
526 if (is_nd_region(dev))
527 set_dev_node(dev, to_nd_region(dev)->numa_node);
529 dev->bus = &nvdimm_bus_type;
531 get_device(dev->parent);
532 if (dev_to_node(dev) == NUMA_NO_NODE)
533 set_dev_node(dev, dev_to_node(dev->parent));
537 async_schedule_dev_domain(nd_async_device_register, dev,
541 void nd_device_register(struct device *dev)
543 device_initialize(dev);
544 __nd_device_register(dev);
546 EXPORT_SYMBOL(nd_device_register);
548 void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
555 * In the async case this is being triggered with the
556 * device lock held and the unregistration work needs to
557 * be moved out of line iff this is thread has won the
558 * race to schedule the deletion.
560 if (!kill_device(dev))
564 async_schedule_domain(nd_async_device_unregister, dev,
569 * In the sync case the device is being unregistered due
570 * to a state change of the parent. Claim the kill state
571 * to synchronize against other unregistration requests,
572 * or otherwise let the async path handle it if the
573 * unregistration was already queued.
576 killed = kill_device(dev);
583 device_unregister(dev);
587 EXPORT_SYMBOL(nd_device_unregister);
590 * __nd_driver_register() - register a region or a namespace driver
591 * @nd_drv: driver to register
592 * @owner: automatically set by nd_driver_register() macro
593 * @mod_name: automatically set by nd_driver_register() macro
595 int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
596 const char *mod_name)
598 struct device_driver *drv = &nd_drv->drv;
601 pr_debug("driver type bitmask not set (%ps)\n",
602 __builtin_return_address(0));
606 if (!nd_drv->probe) {
607 pr_debug("%s ->probe() must be specified\n", mod_name);
611 drv->bus = &nvdimm_bus_type;
613 drv->mod_name = mod_name;
615 return driver_register(drv);
617 EXPORT_SYMBOL(__nd_driver_register);
619 int nvdimm_revalidate_disk(struct gendisk *disk)
621 struct device *dev = disk_to_dev(disk)->parent;
622 struct nd_region *nd_region = to_nd_region(dev->parent);
623 int disk_ro = get_disk_ro(disk);
626 * Upgrade to read-only if the region is read-only preserve as
627 * read-only if the disk is already read-only.
629 if (disk_ro || nd_region->ro == disk_ro)
632 dev_info(dev, "%s read-only, marking %s read-only\n",
633 dev_name(&nd_region->dev), disk->disk_name);
634 set_disk_ro(disk, 1);
639 EXPORT_SYMBOL(nvdimm_revalidate_disk);
641 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
644 return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
645 to_nd_device_type(dev));
647 static DEVICE_ATTR_RO(modalias);
649 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
652 return sprintf(buf, "%s\n", dev->type->name);
654 static DEVICE_ATTR_RO(devtype);
656 static struct attribute *nd_device_attributes[] = {
657 &dev_attr_modalias.attr,
658 &dev_attr_devtype.attr,
663 * nd_device_attribute_group - generic attributes for all devices on an nd bus
665 struct attribute_group nd_device_attribute_group = {
666 .attrs = nd_device_attributes,
668 EXPORT_SYMBOL_GPL(nd_device_attribute_group);
670 static ssize_t numa_node_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
673 return sprintf(buf, "%d\n", dev_to_node(dev));
675 static DEVICE_ATTR_RO(numa_node);
677 static struct attribute *nd_numa_attributes[] = {
678 &dev_attr_numa_node.attr,
682 static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
685 if (!IS_ENABLED(CONFIG_NUMA))
692 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
694 struct attribute_group nd_numa_attribute_group = {
695 .attrs = nd_numa_attributes,
696 .is_visible = nd_numa_attr_visible,
698 EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
700 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
702 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
705 dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
706 "ndctl%d", nvdimm_bus->id);
709 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
710 nvdimm_bus->id, PTR_ERR(dev));
711 return PTR_ERR_OR_ZERO(dev);
714 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
716 device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
719 static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
720 [ND_CMD_IMPLEMENTED] = { },
723 .out_sizes = { 4, 128, },
725 [ND_CMD_SMART_THRESHOLD] = {
727 .out_sizes = { 4, 8, },
729 [ND_CMD_DIMM_FLAGS] = {
731 .out_sizes = { 4, 4 },
733 [ND_CMD_GET_CONFIG_SIZE] = {
735 .out_sizes = { 4, 4, 4, },
737 [ND_CMD_GET_CONFIG_DATA] = {
739 .in_sizes = { 4, 4, },
741 .out_sizes = { 4, UINT_MAX, },
743 [ND_CMD_SET_CONFIG_DATA] = {
745 .in_sizes = { 4, 4, UINT_MAX, },
751 .in_sizes = { 4, 4, UINT_MAX, },
753 .out_sizes = { 4, 4, UINT_MAX, },
757 .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
759 .out_sizes = { UINT_MAX, },
763 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
765 if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs))
766 return &__nd_cmd_dimm_descs[cmd];
769 EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc);
771 static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
772 [ND_CMD_IMPLEMENTED] = { },
775 .in_sizes = { 8, 8, },
777 .out_sizes = { 4, 4, 4, 4, },
779 [ND_CMD_ARS_START] = {
781 .in_sizes = { 8, 8, 2, 1, 5, },
783 .out_sizes = { 4, 4, },
785 [ND_CMD_ARS_STATUS] = {
787 .out_sizes = { 4, 4, UINT_MAX, },
789 [ND_CMD_CLEAR_ERROR] = {
791 .in_sizes = { 8, 8, },
793 .out_sizes = { 4, 4, 8, },
797 .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
799 .out_sizes = { UINT_MAX, },
803 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
805 if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs))
806 return &__nd_cmd_bus_descs[cmd];
809 EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
811 u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
812 const struct nd_cmd_desc *desc, int idx, void *buf)
814 if (idx >= desc->in_num)
817 if (desc->in_sizes[idx] < UINT_MAX)
818 return desc->in_sizes[idx];
820 if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) {
821 struct nd_cmd_set_config_hdr *hdr = buf;
823 return hdr->in_length;
824 } else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) {
825 struct nd_cmd_vendor_hdr *hdr = buf;
827 return hdr->in_length;
828 } else if (cmd == ND_CMD_CALL) {
829 struct nd_cmd_pkg *pkg = buf;
831 return pkg->nd_size_in;
836 EXPORT_SYMBOL_GPL(nd_cmd_in_size);
838 u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
839 const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
840 const u32 *out_field, unsigned long remainder)
842 if (idx >= desc->out_num)
845 if (desc->out_sizes[idx] < UINT_MAX)
846 return desc->out_sizes[idx];
848 if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
850 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
852 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
854 * Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
855 * "Size of Output Buffer in bytes, including this
858 if (out_field[1] < 4)
861 * ACPI 6.1 is ambiguous if 'status' is included in the
862 * output size. If we encounter an output size that
863 * overshoots the remainder by 4 bytes, assume it was
864 * including 'status'.
866 if (out_field[1] - 4 == remainder)
868 return out_field[1] - 8;
869 } else if (cmd == ND_CMD_CALL) {
870 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
872 return pkg->nd_size_out;
878 EXPORT_SYMBOL_GPL(nd_cmd_out_size);
880 void wait_nvdimm_bus_probe_idle(struct device *dev)
882 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
885 if (nvdimm_bus->probe_active == 0)
887 nvdimm_bus_unlock(&nvdimm_bus->dev);
888 wait_event(nvdimm_bus->probe_wait,
889 nvdimm_bus->probe_active == 0);
890 nvdimm_bus_lock(&nvdimm_bus->dev);
894 static int nd_pmem_forget_poison_check(struct device *dev, void *data)
896 struct nd_cmd_clear_error *clear_err =
897 (struct nd_cmd_clear_error *)data;
898 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
899 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
900 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
901 struct nd_namespace_common *ndns = NULL;
902 struct nd_namespace_io *nsio;
903 resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend;
905 if (nd_dax || !dev->driver)
908 start = clear_err->address;
909 end = clear_err->address + clear_err->cleared - 1;
911 if (nd_btt || nd_pfn || nd_dax) {
917 ndns = nd_dax->nd_pfn.ndns;
924 nsio = to_nd_namespace_io(&ndns->dev);
925 pstart = nsio->res.start + offset;
926 pend = nsio->res.end - end_trunc;
928 if ((pstart >= start) && (pend <= end))
935 static int nd_ns_forget_poison_check(struct device *dev, void *data)
937 return device_for_each_child(dev, data, nd_pmem_forget_poison_check);
940 /* set_config requires an idle interleave set */
941 static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
942 struct nvdimm *nvdimm, unsigned int cmd, void *data)
944 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
946 /* ask the bus provider if it would like to block this request */
947 if (nd_desc->clear_to_send) {
948 int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd, data);
954 /* require clear error to go through the pmem driver */
955 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
956 return device_for_each_child(&nvdimm_bus->dev, data,
957 nd_ns_forget_poison_check);
959 if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
962 /* prevent label manipulation while the kernel owns label updates */
963 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
964 if (atomic_read(&nvdimm->busy))
969 static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
970 int read_only, unsigned int ioctl_cmd, unsigned long arg)
972 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
973 const struct nd_cmd_desc *desc = NULL;
974 unsigned int cmd = _IOC_NR(ioctl_cmd);
975 struct device *dev = &nvdimm_bus->dev;
976 void __user *p = (void __user *) arg;
977 char *out_env = NULL, *in_env = NULL;
978 const char *cmd_name, *dimm_name;
979 u32 in_len = 0, out_len = 0;
980 unsigned int func = cmd;
981 unsigned long cmd_mask;
982 struct nd_cmd_pkg pkg;
988 desc = nd_cmd_dimm_desc(cmd);
989 cmd_name = nvdimm_cmd_name(cmd);
990 cmd_mask = nvdimm->cmd_mask;
991 dimm_name = dev_name(&nvdimm->dev);
993 desc = nd_cmd_bus_desc(cmd);
994 cmd_name = nvdimm_bus_cmd_name(cmd);
995 cmd_mask = nd_desc->cmd_mask;
999 if (cmd == ND_CMD_CALL) {
1000 if (copy_from_user(&pkg, p, sizeof(pkg)))
1004 if (!desc || (desc->out_num + desc->in_num == 0) ||
1005 !test_bit(cmd, &cmd_mask))
1008 /* fail write commands (when read-only) */
1012 case ND_CMD_SET_CONFIG_DATA:
1013 case ND_CMD_ARS_START:
1014 case ND_CMD_CLEAR_ERROR:
1016 dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
1017 nvdimm ? nvdimm_cmd_name(cmd)
1018 : nvdimm_bus_cmd_name(cmd));
1024 /* process an input envelope */
1025 in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
1028 for (i = 0; i < desc->in_num; i++) {
1031 in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
1032 if (in_size == UINT_MAX) {
1033 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
1034 __func__, dimm_name, cmd_name, i);
1038 if (in_len < ND_CMD_MAX_ENVELOPE)
1039 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
1042 if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
1049 if (cmd == ND_CMD_CALL) {
1050 func = pkg.nd_command;
1051 dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
1052 dimm_name, pkg.nd_command,
1053 in_len, out_len, buf_len);
1056 /* process an output envelope */
1057 out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
1063 for (i = 0; i < desc->out_num; i++) {
1064 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
1065 (u32 *) in_env, (u32 *) out_env, 0);
1068 if (out_size == UINT_MAX) {
1069 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
1070 dimm_name, cmd_name, i);
1074 if (out_len < ND_CMD_MAX_ENVELOPE)
1075 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
1078 if (copy && copy_from_user(&out_env[out_len],
1079 p + in_len + out_len, copy)) {
1083 out_len += out_size;
1086 buf_len = (u64) out_len + (u64) in_len;
1087 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
1088 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
1089 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
1094 buf = vmalloc(buf_len);
1100 if (copy_from_user(buf, p, buf_len)) {
1105 nvdimm_bus_lock(&nvdimm_bus->dev);
1106 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
1110 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
1114 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
1115 struct nd_cmd_clear_error *clear_err = buf;
1117 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
1118 clear_err->cleared);
1121 if (copy_to_user(p, buf, buf_len))
1125 nvdimm_bus_unlock(&nvdimm_bus->dev);
1133 static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1135 long id = (long) file->private_data;
1136 int rc = -ENXIO, ro;
1137 struct nvdimm_bus *nvdimm_bus;
1139 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
1140 mutex_lock(&nvdimm_bus_list_mutex);
1141 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1142 if (nvdimm_bus->id == id) {
1143 rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
1147 mutex_unlock(&nvdimm_bus_list_mutex);
1152 static int match_dimm(struct device *dev, void *data)
1154 long id = (long) data;
1156 if (is_nvdimm(dev)) {
1157 struct nvdimm *nvdimm = to_nvdimm(dev);
1159 return nvdimm->id == id;
1165 static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1167 int rc = -ENXIO, ro;
1168 struct nvdimm_bus *nvdimm_bus;
1170 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
1171 mutex_lock(&nvdimm_bus_list_mutex);
1172 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1173 struct device *dev = device_find_child(&nvdimm_bus->dev,
1174 file->private_data, match_dimm);
1175 struct nvdimm *nvdimm;
1180 nvdimm = to_nvdimm(dev);
1181 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
1185 mutex_unlock(&nvdimm_bus_list_mutex);
1190 static int nd_open(struct inode *inode, struct file *file)
1192 long minor = iminor(inode);
1194 file->private_data = (void *) minor;
1198 static const struct file_operations nvdimm_bus_fops = {
1199 .owner = THIS_MODULE,
1201 .unlocked_ioctl = nd_ioctl,
1202 .compat_ioctl = nd_ioctl,
1203 .llseek = noop_llseek,
1206 static const struct file_operations nvdimm_fops = {
1207 .owner = THIS_MODULE,
1209 .unlocked_ioctl = nvdimm_ioctl,
1210 .compat_ioctl = nvdimm_ioctl,
1211 .llseek = noop_llseek,
1214 int __init nvdimm_bus_init(void)
1218 rc = bus_register(&nvdimm_bus_type);
1222 rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops);
1224 goto err_bus_chrdev;
1225 nvdimm_bus_major = rc;
1227 rc = register_chrdev(0, "dimmctl", &nvdimm_fops);
1229 goto err_dimm_chrdev;
1232 nd_class = class_create(THIS_MODULE, "nd");
1233 if (IS_ERR(nd_class)) {
1234 rc = PTR_ERR(nd_class);
1238 rc = driver_register(&nd_bus_driver.drv);
1245 class_destroy(nd_class);
1247 unregister_chrdev(nvdimm_major, "dimmctl");
1249 unregister_chrdev(nvdimm_bus_major, "ndctl");
1251 bus_unregister(&nvdimm_bus_type);
1256 void nvdimm_bus_exit(void)
1258 driver_unregister(&nd_bus_driver.drv);
1259 class_destroy(nd_class);
1260 unregister_chrdev(nvdimm_bus_major, "ndctl");
1261 unregister_chrdev(nvdimm_major, "dimmctl");
1262 bus_unregister(&nvdimm_bus_type);
1263 ida_destroy(&nd_ida);