1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/hash.h>
10 #include <linux/sort.h>
17 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
20 #include <linux/io-64-nonatomic-hi-lo.h>
22 static DEFINE_IDA(region_ida);
23 static DEFINE_PER_CPU(int, flush_idx);
25 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26 struct nd_region_data *ndrd)
30 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
32 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
33 struct resource *res = &nvdimm->flush_wpq[i];
34 unsigned long pfn = PHYS_PFN(res->start);
35 void __iomem *flush_page;
37 /* check if flush hints share a page */
38 for (j = 0; j < i; j++) {
39 struct resource *res_j = &nvdimm->flush_wpq[j];
40 unsigned long pfn_j = PHYS_PFN(res_j->start);
47 flush_page = (void __iomem *) ((unsigned long)
48 ndrd_get_flush_wpq(ndrd, dimm, j)
51 flush_page = devm_nvdimm_ioremap(dev,
52 PFN_PHYS(pfn), PAGE_SIZE);
55 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
56 + (res->start & ~PAGE_MASK));
62 int nd_region_activate(struct nd_region *nd_region)
64 int i, j, num_flush = 0;
65 struct nd_region_data *ndrd;
66 struct device *dev = &nd_region->dev;
67 size_t flush_data_size = sizeof(void *);
69 nvdimm_bus_lock(&nd_region->dev);
70 for (i = 0; i < nd_region->ndr_mappings; i++) {
71 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
72 struct nvdimm *nvdimm = nd_mapping->nvdimm;
74 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
75 nvdimm_bus_unlock(&nd_region->dev);
79 /* at least one null hint slot per-dimm for the "no-hint" case */
80 flush_data_size += sizeof(void *);
81 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
82 if (!nvdimm->num_flush)
84 flush_data_size += nvdimm->num_flush * sizeof(void *);
86 nvdimm_bus_unlock(&nd_region->dev);
88 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
91 dev_set_drvdata(dev, ndrd);
96 ndrd->hints_shift = ilog2(num_flush);
97 for (i = 0; i < nd_region->ndr_mappings; i++) {
98 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
99 struct nvdimm *nvdimm = nd_mapping->nvdimm;
100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
107 * Clear out entries that are duplicates. This should prevent the
110 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
111 /* ignore if NULL already */
112 if (!ndrd_get_flush_wpq(ndrd, i, 0))
115 for (j = i + 1; j < nd_region->ndr_mappings; j++)
116 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
117 ndrd_get_flush_wpq(ndrd, j, 0))
118 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
124 static void nd_region_release(struct device *dev)
126 struct nd_region *nd_region = to_nd_region(dev);
129 for (i = 0; i < nd_region->ndr_mappings; i++) {
130 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
131 struct nvdimm *nvdimm = nd_mapping->nvdimm;
133 put_device(&nvdimm->dev);
135 free_percpu(nd_region->lane);
136 ida_simple_remove(®ion_ida, nd_region->id);
138 kfree(to_nd_blk_region(dev));
143 static struct device_type nd_blk_device_type = {
145 .release = nd_region_release,
148 static struct device_type nd_pmem_device_type = {
150 .release = nd_region_release,
153 static struct device_type nd_volatile_device_type = {
154 .name = "nd_volatile",
155 .release = nd_region_release,
158 bool is_nd_pmem(struct device *dev)
160 return dev ? dev->type == &nd_pmem_device_type : false;
163 bool is_nd_blk(struct device *dev)
165 return dev ? dev->type == &nd_blk_device_type : false;
168 bool is_nd_volatile(struct device *dev)
170 return dev ? dev->type == &nd_volatile_device_type : false;
173 struct nd_region *to_nd_region(struct device *dev)
175 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
177 WARN_ON(dev->type->release != nd_region_release);
180 EXPORT_SYMBOL_GPL(to_nd_region);
182 struct device *nd_region_dev(struct nd_region *nd_region)
186 return &nd_region->dev;
188 EXPORT_SYMBOL_GPL(nd_region_dev);
190 struct nd_blk_region *to_nd_blk_region(struct device *dev)
192 struct nd_region *nd_region = to_nd_region(dev);
194 WARN_ON(!is_nd_blk(dev));
195 return container_of(nd_region, struct nd_blk_region, nd_region);
197 EXPORT_SYMBOL_GPL(to_nd_blk_region);
199 void *nd_region_provider_data(struct nd_region *nd_region)
201 return nd_region->provider_data;
203 EXPORT_SYMBOL_GPL(nd_region_provider_data);
205 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
207 return ndbr->blk_provider_data;
209 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
211 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
213 ndbr->blk_provider_data = data;
215 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
218 * nd_region_to_nstype() - region to an integer namespace type
219 * @nd_region: region-device to interrogate
221 * This is the 'nstype' attribute of a region as well, an input to the
222 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
223 * namespace devices with namespace drivers.
225 int nd_region_to_nstype(struct nd_region *nd_region)
227 if (is_memory(&nd_region->dev)) {
230 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
231 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
232 struct nvdimm *nvdimm = nd_mapping->nvdimm;
234 if (test_bit(NDD_ALIASING, &nvdimm->flags))
238 return ND_DEVICE_NAMESPACE_PMEM;
240 return ND_DEVICE_NAMESPACE_IO;
241 } else if (is_nd_blk(&nd_region->dev)) {
242 return ND_DEVICE_NAMESPACE_BLK;
247 EXPORT_SYMBOL(nd_region_to_nstype);
249 static ssize_t size_show(struct device *dev,
250 struct device_attribute *attr, char *buf)
252 struct nd_region *nd_region = to_nd_region(dev);
253 unsigned long long size = 0;
255 if (is_memory(dev)) {
256 size = nd_region->ndr_size;
257 } else if (nd_region->ndr_mappings == 1) {
258 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
260 size = nd_mapping->size;
263 return sprintf(buf, "%llu\n", size);
265 static DEVICE_ATTR_RO(size);
267 static ssize_t deep_flush_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
270 struct nd_region *nd_region = to_nd_region(dev);
273 * NOTE: in the nvdimm_has_flush() error case this attribute is
276 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
279 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
280 const char *buf, size_t len)
283 int rc = strtobool(buf, &flush);
284 struct nd_region *nd_region = to_nd_region(dev);
290 rc = nvdimm_flush(nd_region, NULL);
296 static DEVICE_ATTR_RW(deep_flush);
298 static ssize_t mappings_show(struct device *dev,
299 struct device_attribute *attr, char *buf)
301 struct nd_region *nd_region = to_nd_region(dev);
303 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
305 static DEVICE_ATTR_RO(mappings);
307 static ssize_t nstype_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
310 struct nd_region *nd_region = to_nd_region(dev);
312 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
314 static DEVICE_ATTR_RO(nstype);
316 static ssize_t set_cookie_show(struct device *dev,
317 struct device_attribute *attr, char *buf)
319 struct nd_region *nd_region = to_nd_region(dev);
320 struct nd_interleave_set *nd_set = nd_region->nd_set;
323 if (is_memory(dev) && nd_set)
324 /* pass, should be precluded by region_visible */;
329 * The cookie to show depends on which specification of the
330 * labels we are using. If there are not labels then default to
331 * the v1.1 namespace label cookie definition. To read all this
332 * data we need to wait for probing to settle.
335 nvdimm_bus_lock(dev);
336 wait_nvdimm_bus_probe_idle(dev);
337 if (nd_region->ndr_mappings) {
338 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
339 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
342 struct nd_namespace_index *nsindex;
344 nsindex = to_namespace_index(ndd, ndd->ns_current);
345 rc = sprintf(buf, "%#llx\n",
346 nd_region_interleave_set_cookie(nd_region,
350 nvdimm_bus_unlock(dev);
355 return sprintf(buf, "%#llx\n", nd_set->cookie1);
357 static DEVICE_ATTR_RO(set_cookie);
359 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
361 resource_size_t blk_max_overlap = 0, available, overlap;
364 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
368 overlap = blk_max_overlap;
369 for (i = 0; i < nd_region->ndr_mappings; i++) {
370 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
371 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
373 /* if a dimm is disabled the available capacity is zero */
377 if (is_memory(&nd_region->dev)) {
378 available += nd_pmem_available_dpa(nd_region,
379 nd_mapping, &overlap);
380 if (overlap > blk_max_overlap) {
381 blk_max_overlap = overlap;
384 } else if (is_nd_blk(&nd_region->dev))
385 available += nd_blk_available_dpa(nd_region);
391 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
393 resource_size_t available = 0;
396 if (is_memory(&nd_region->dev))
397 available = PHYS_ADDR_MAX;
399 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
400 for (i = 0; i < nd_region->ndr_mappings; i++) {
401 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
403 if (is_memory(&nd_region->dev))
404 available = min(available,
405 nd_pmem_max_contiguous_dpa(nd_region,
407 else if (is_nd_blk(&nd_region->dev))
408 available += nd_blk_available_dpa(nd_region);
410 if (is_memory(&nd_region->dev))
411 return available * nd_region->ndr_mappings;
415 static ssize_t available_size_show(struct device *dev,
416 struct device_attribute *attr, char *buf)
418 struct nd_region *nd_region = to_nd_region(dev);
419 unsigned long long available = 0;
422 * Flush in-flight updates and grab a snapshot of the available
423 * size. Of course, this value is potentially invalidated the
424 * memory nvdimm_bus_lock() is dropped, but that's userspace's
425 * problem to not race itself.
427 nvdimm_bus_lock(dev);
428 wait_nvdimm_bus_probe_idle(dev);
429 available = nd_region_available_dpa(nd_region);
430 nvdimm_bus_unlock(dev);
432 return sprintf(buf, "%llu\n", available);
434 static DEVICE_ATTR_RO(available_size);
436 static ssize_t max_available_extent_show(struct device *dev,
437 struct device_attribute *attr, char *buf)
439 struct nd_region *nd_region = to_nd_region(dev);
440 unsigned long long available = 0;
442 nvdimm_bus_lock(dev);
443 wait_nvdimm_bus_probe_idle(dev);
444 available = nd_region_allocatable_dpa(nd_region);
445 nvdimm_bus_unlock(dev);
447 return sprintf(buf, "%llu\n", available);
449 static DEVICE_ATTR_RO(max_available_extent);
451 static ssize_t init_namespaces_show(struct device *dev,
452 struct device_attribute *attr, char *buf)
454 struct nd_region_data *ndrd = dev_get_drvdata(dev);
457 nvdimm_bus_lock(dev);
459 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
462 nvdimm_bus_unlock(dev);
466 static DEVICE_ATTR_RO(init_namespaces);
468 static ssize_t namespace_seed_show(struct device *dev,
469 struct device_attribute *attr, char *buf)
471 struct nd_region *nd_region = to_nd_region(dev);
474 nvdimm_bus_lock(dev);
475 if (nd_region->ns_seed)
476 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
478 rc = sprintf(buf, "\n");
479 nvdimm_bus_unlock(dev);
482 static DEVICE_ATTR_RO(namespace_seed);
484 static ssize_t btt_seed_show(struct device *dev,
485 struct device_attribute *attr, char *buf)
487 struct nd_region *nd_region = to_nd_region(dev);
490 nvdimm_bus_lock(dev);
491 if (nd_region->btt_seed)
492 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
494 rc = sprintf(buf, "\n");
495 nvdimm_bus_unlock(dev);
499 static DEVICE_ATTR_RO(btt_seed);
501 static ssize_t pfn_seed_show(struct device *dev,
502 struct device_attribute *attr, char *buf)
504 struct nd_region *nd_region = to_nd_region(dev);
507 nvdimm_bus_lock(dev);
508 if (nd_region->pfn_seed)
509 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
511 rc = sprintf(buf, "\n");
512 nvdimm_bus_unlock(dev);
516 static DEVICE_ATTR_RO(pfn_seed);
518 static ssize_t dax_seed_show(struct device *dev,
519 struct device_attribute *attr, char *buf)
521 struct nd_region *nd_region = to_nd_region(dev);
524 nvdimm_bus_lock(dev);
525 if (nd_region->dax_seed)
526 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
528 rc = sprintf(buf, "\n");
529 nvdimm_bus_unlock(dev);
533 static DEVICE_ATTR_RO(dax_seed);
535 static ssize_t read_only_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
538 struct nd_region *nd_region = to_nd_region(dev);
540 return sprintf(buf, "%d\n", nd_region->ro);
543 static ssize_t read_only_store(struct device *dev,
544 struct device_attribute *attr, const char *buf, size_t len)
547 int rc = strtobool(buf, &ro);
548 struct nd_region *nd_region = to_nd_region(dev);
556 static DEVICE_ATTR_RW(read_only);
558 static ssize_t region_badblocks_show(struct device *dev,
559 struct device_attribute *attr, char *buf)
561 struct nd_region *nd_region = to_nd_region(dev);
566 rc = badblocks_show(&nd_region->bb, buf, 0);
573 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
575 static ssize_t resource_show(struct device *dev,
576 struct device_attribute *attr, char *buf)
578 struct nd_region *nd_region = to_nd_region(dev);
580 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
582 static DEVICE_ATTR_RO(resource);
584 static ssize_t persistence_domain_show(struct device *dev,
585 struct device_attribute *attr, char *buf)
587 struct nd_region *nd_region = to_nd_region(dev);
589 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
590 return sprintf(buf, "cpu_cache\n");
591 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
592 return sprintf(buf, "memory_controller\n");
594 return sprintf(buf, "\n");
596 static DEVICE_ATTR_RO(persistence_domain);
598 static struct attribute *nd_region_attributes[] = {
600 &dev_attr_nstype.attr,
601 &dev_attr_mappings.attr,
602 &dev_attr_btt_seed.attr,
603 &dev_attr_pfn_seed.attr,
604 &dev_attr_dax_seed.attr,
605 &dev_attr_deep_flush.attr,
606 &dev_attr_read_only.attr,
607 &dev_attr_set_cookie.attr,
608 &dev_attr_available_size.attr,
609 &dev_attr_max_available_extent.attr,
610 &dev_attr_namespace_seed.attr,
611 &dev_attr_init_namespaces.attr,
612 &dev_attr_badblocks.attr,
613 &dev_attr_resource.attr,
614 &dev_attr_persistence_domain.attr,
618 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
620 struct device *dev = container_of(kobj, typeof(*dev), kobj);
621 struct nd_region *nd_region = to_nd_region(dev);
622 struct nd_interleave_set *nd_set = nd_region->nd_set;
623 int type = nd_region_to_nstype(nd_region);
625 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
628 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
631 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
634 if (a == &dev_attr_resource.attr) {
641 if (a == &dev_attr_deep_flush.attr) {
642 int has_flush = nvdimm_has_flush(nd_region);
646 else if (has_flush == 0)
652 if (a == &dev_attr_persistence_domain.attr) {
653 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
654 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
659 if (a != &dev_attr_set_cookie.attr
660 && a != &dev_attr_available_size.attr)
663 if ((type == ND_DEVICE_NAMESPACE_PMEM
664 || type == ND_DEVICE_NAMESPACE_BLK)
665 && a == &dev_attr_available_size.attr)
667 else if (is_memory(dev) && nd_set)
673 struct attribute_group nd_region_attribute_group = {
674 .attrs = nd_region_attributes,
675 .is_visible = region_visible,
677 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
679 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
680 struct nd_namespace_index *nsindex)
682 struct nd_interleave_set *nd_set = nd_region->nd_set;
687 if (nsindex && __le16_to_cpu(nsindex->major) == 1
688 && __le16_to_cpu(nsindex->minor) == 1)
689 return nd_set->cookie1;
690 return nd_set->cookie2;
693 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
695 struct nd_interleave_set *nd_set = nd_region->nd_set;
698 return nd_set->altcookie;
702 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
704 struct nd_label_ent *label_ent, *e;
706 lockdep_assert_held(&nd_mapping->lock);
707 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
708 list_del(&label_ent->list);
714 * Upon successful probe/remove, take/release a reference on the
715 * associated interleave set (if present), and plant new btt + namespace
716 * seeds. Also, on the removal of a BLK region, notify the provider to
717 * disable the region.
719 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
720 struct device *dev, bool probe)
722 struct nd_region *nd_region;
724 if (!probe && is_nd_region(dev)) {
727 nd_region = to_nd_region(dev);
728 for (i = 0; i < nd_region->ndr_mappings; i++) {
729 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
730 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
731 struct nvdimm *nvdimm = nd_mapping->nvdimm;
733 mutex_lock(&nd_mapping->lock);
734 nd_mapping_free_labels(nd_mapping);
735 mutex_unlock(&nd_mapping->lock);
738 nd_mapping->ndd = NULL;
740 atomic_dec(&nvdimm->busy);
743 if (dev->parent && is_nd_region(dev->parent) && probe) {
744 nd_region = to_nd_region(dev->parent);
745 nvdimm_bus_lock(dev);
746 if (nd_region->ns_seed == dev)
747 nd_region_create_ns_seed(nd_region);
748 nvdimm_bus_unlock(dev);
750 if (is_nd_btt(dev) && probe) {
751 struct nd_btt *nd_btt = to_nd_btt(dev);
753 nd_region = to_nd_region(dev->parent);
754 nvdimm_bus_lock(dev);
755 if (nd_region->btt_seed == dev)
756 nd_region_create_btt_seed(nd_region);
757 if (nd_region->ns_seed == &nd_btt->ndns->dev)
758 nd_region_create_ns_seed(nd_region);
759 nvdimm_bus_unlock(dev);
761 if (is_nd_pfn(dev) && probe) {
762 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
764 nd_region = to_nd_region(dev->parent);
765 nvdimm_bus_lock(dev);
766 if (nd_region->pfn_seed == dev)
767 nd_region_create_pfn_seed(nd_region);
768 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
769 nd_region_create_ns_seed(nd_region);
770 nvdimm_bus_unlock(dev);
772 if (is_nd_dax(dev) && probe) {
773 struct nd_dax *nd_dax = to_nd_dax(dev);
775 nd_region = to_nd_region(dev->parent);
776 nvdimm_bus_lock(dev);
777 if (nd_region->dax_seed == dev)
778 nd_region_create_dax_seed(nd_region);
779 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
780 nd_region_create_ns_seed(nd_region);
781 nvdimm_bus_unlock(dev);
785 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
787 nd_region_notify_driver_action(nvdimm_bus, dev, true);
790 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
792 nd_region_notify_driver_action(nvdimm_bus, dev, false);
795 static ssize_t mappingN(struct device *dev, char *buf, int n)
797 struct nd_region *nd_region = to_nd_region(dev);
798 struct nd_mapping *nd_mapping;
799 struct nvdimm *nvdimm;
801 if (n >= nd_region->ndr_mappings)
803 nd_mapping = &nd_region->mapping[n];
804 nvdimm = nd_mapping->nvdimm;
806 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
807 nd_mapping->start, nd_mapping->size,
808 nd_mapping->position);
811 #define REGION_MAPPING(idx) \
812 static ssize_t mapping##idx##_show(struct device *dev, \
813 struct device_attribute *attr, char *buf) \
815 return mappingN(dev, buf, idx); \
817 static DEVICE_ATTR_RO(mapping##idx)
820 * 32 should be enough for a while, even in the presence of socket
821 * interleave a 32-way interleave set is a degenerate case.
856 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
858 struct device *dev = container_of(kobj, struct device, kobj);
859 struct nd_region *nd_region = to_nd_region(dev);
861 if (n < nd_region->ndr_mappings)
866 static struct attribute *mapping_attributes[] = {
867 &dev_attr_mapping0.attr,
868 &dev_attr_mapping1.attr,
869 &dev_attr_mapping2.attr,
870 &dev_attr_mapping3.attr,
871 &dev_attr_mapping4.attr,
872 &dev_attr_mapping5.attr,
873 &dev_attr_mapping6.attr,
874 &dev_attr_mapping7.attr,
875 &dev_attr_mapping8.attr,
876 &dev_attr_mapping9.attr,
877 &dev_attr_mapping10.attr,
878 &dev_attr_mapping11.attr,
879 &dev_attr_mapping12.attr,
880 &dev_attr_mapping13.attr,
881 &dev_attr_mapping14.attr,
882 &dev_attr_mapping15.attr,
883 &dev_attr_mapping16.attr,
884 &dev_attr_mapping17.attr,
885 &dev_attr_mapping18.attr,
886 &dev_attr_mapping19.attr,
887 &dev_attr_mapping20.attr,
888 &dev_attr_mapping21.attr,
889 &dev_attr_mapping22.attr,
890 &dev_attr_mapping23.attr,
891 &dev_attr_mapping24.attr,
892 &dev_attr_mapping25.attr,
893 &dev_attr_mapping26.attr,
894 &dev_attr_mapping27.attr,
895 &dev_attr_mapping28.attr,
896 &dev_attr_mapping29.attr,
897 &dev_attr_mapping30.attr,
898 &dev_attr_mapping31.attr,
902 struct attribute_group nd_mapping_attribute_group = {
903 .is_visible = mapping_visible,
904 .attrs = mapping_attributes,
906 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
908 int nd_blk_region_init(struct nd_region *nd_region)
910 struct device *dev = &nd_region->dev;
911 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
916 if (nd_region->ndr_mappings < 1) {
917 dev_dbg(dev, "invalid BLK region\n");
921 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
925 * nd_region_acquire_lane - allocate and lock a lane
926 * @nd_region: region id and number of lanes possible
928 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
929 * We optimize for the common case where there are 256 lanes, one
930 * per-cpu. For larger systems we need to lock to share lanes. For now
931 * this implementation assumes the cost of maintaining an allocator for
932 * free lanes is on the order of the lock hold time, so it implements a
933 * static lane = cpu % num_lanes mapping.
935 * In the case of a BTT instance on top of a BLK namespace a lane may be
936 * acquired recursively. We lock on the first instance.
938 * In the case of a BTT instance on top of PMEM, we only acquire a lane
939 * for the BTT metadata updates.
941 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
943 unsigned int cpu, lane;
946 if (nd_region->num_lanes < nr_cpu_ids) {
947 struct nd_percpu_lane *ndl_lock, *ndl_count;
949 lane = cpu % nd_region->num_lanes;
950 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
951 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
952 if (ndl_count->count++ == 0)
953 spin_lock(&ndl_lock->lock);
959 EXPORT_SYMBOL(nd_region_acquire_lane);
961 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
963 if (nd_region->num_lanes < nr_cpu_ids) {
964 unsigned int cpu = get_cpu();
965 struct nd_percpu_lane *ndl_lock, *ndl_count;
967 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
968 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
969 if (--ndl_count->count == 0)
970 spin_unlock(&ndl_lock->lock);
975 EXPORT_SYMBOL(nd_region_release_lane);
977 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
978 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
981 struct nd_region *nd_region;
987 for (i = 0; i < ndr_desc->num_mappings; i++) {
988 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
989 struct nvdimm *nvdimm = mapping->nvdimm;
991 if ((mapping->start | mapping->size) % SZ_4K) {
992 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
993 caller, dev_name(&nvdimm->dev), i);
998 if (test_bit(NDD_UNARMED, &nvdimm->flags))
1001 if (test_bit(NDD_NOBLK, &nvdimm->flags)
1002 && dev_type == &nd_blk_device_type) {
1003 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
1004 caller, dev_name(&nvdimm->dev), i);
1009 if (dev_type == &nd_blk_device_type) {
1010 struct nd_blk_region_desc *ndbr_desc;
1011 struct nd_blk_region *ndbr;
1013 ndbr_desc = to_blk_region_desc(ndr_desc);
1014 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1015 * ndr_desc->num_mappings,
1018 nd_region = &ndbr->nd_region;
1019 ndbr->enable = ndbr_desc->enable;
1020 ndbr->do_io = ndbr_desc->do_io;
1024 nd_region = kzalloc(sizeof(struct nd_region)
1025 + sizeof(struct nd_mapping)
1026 * ndr_desc->num_mappings,
1028 region_buf = nd_region;
1033 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
1034 if (nd_region->id < 0)
1037 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1038 if (!nd_region->lane)
1041 for (i = 0; i < nr_cpu_ids; i++) {
1042 struct nd_percpu_lane *ndl;
1044 ndl = per_cpu_ptr(nd_region->lane, i);
1045 spin_lock_init(&ndl->lock);
1049 for (i = 0; i < ndr_desc->num_mappings; i++) {
1050 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1051 struct nvdimm *nvdimm = mapping->nvdimm;
1053 nd_region->mapping[i].nvdimm = nvdimm;
1054 nd_region->mapping[i].start = mapping->start;
1055 nd_region->mapping[i].size = mapping->size;
1056 nd_region->mapping[i].position = mapping->position;
1057 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1058 mutex_init(&nd_region->mapping[i].lock);
1060 get_device(&nvdimm->dev);
1062 nd_region->ndr_mappings = ndr_desc->num_mappings;
1063 nd_region->provider_data = ndr_desc->provider_data;
1064 nd_region->nd_set = ndr_desc->nd_set;
1065 nd_region->num_lanes = ndr_desc->num_lanes;
1066 nd_region->flags = ndr_desc->flags;
1068 nd_region->numa_node = ndr_desc->numa_node;
1069 nd_region->target_node = ndr_desc->target_node;
1070 ida_init(&nd_region->ns_ida);
1071 ida_init(&nd_region->btt_ida);
1072 ida_init(&nd_region->pfn_ida);
1073 ida_init(&nd_region->dax_ida);
1074 dev = &nd_region->dev;
1075 dev_set_name(dev, "region%d", nd_region->id);
1076 dev->parent = &nvdimm_bus->dev;
1077 dev->type = dev_type;
1078 dev->groups = ndr_desc->attr_groups;
1079 dev->of_node = ndr_desc->of_node;
1080 nd_region->ndr_size = resource_size(ndr_desc->res);
1081 nd_region->ndr_start = ndr_desc->res->start;
1082 if (ndr_desc->flush)
1083 nd_region->flush = ndr_desc->flush;
1085 nd_region->flush = NULL;
1087 nd_device_register(dev);
1092 ida_simple_remove(®ion_ida, nd_region->id);
1098 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1099 struct nd_region_desc *ndr_desc)
1101 ndr_desc->num_lanes = ND_MAX_LANES;
1102 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1105 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1107 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1108 struct nd_region_desc *ndr_desc)
1110 if (ndr_desc->num_mappings > 1)
1112 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1113 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1116 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1118 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1119 struct nd_region_desc *ndr_desc)
1121 ndr_desc->num_lanes = ND_MAX_LANES;
1122 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1125 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1127 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1131 if (!nd_region->flush)
1132 rc = generic_nvdimm_flush(nd_region);
1134 if (nd_region->flush(nd_region, bio))
1141 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1142 * @nd_region: blk or interleaved pmem region
1144 int generic_nvdimm_flush(struct nd_region *nd_region)
1146 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1150 * Try to encourage some diversity in flush hint addresses
1151 * across cpus assuming a limited number of flush hints.
1153 idx = this_cpu_read(flush_idx);
1154 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1157 * The first wmb() is needed to 'sfence' all previous writes
1158 * such that they are architecturally visible for the platform
1159 * buffer flush. Note that we've already arranged for pmem
1160 * writes to avoid the cache via memcpy_flushcache(). The final
1161 * wmb() ensures ordering for the NVDIMM flush write.
1164 for (i = 0; i < nd_region->ndr_mappings; i++)
1165 if (ndrd_get_flush_wpq(ndrd, i, 0))
1166 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1171 EXPORT_SYMBOL_GPL(nvdimm_flush);
1174 * nvdimm_has_flush - determine write flushing requirements
1175 * @nd_region: blk or interleaved pmem region
1177 * Returns 1 if writes require flushing
1178 * Returns 0 if writes do not require flushing
1179 * Returns -ENXIO if flushing capability can not be determined
1181 int nvdimm_has_flush(struct nd_region *nd_region)
1185 /* no nvdimm or pmem api == flushing capability unknown */
1186 if (nd_region->ndr_mappings == 0
1187 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1190 for (i = 0; i < nd_region->ndr_mappings; i++) {
1191 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1192 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1194 /* flush hints present / available */
1195 if (nvdimm->num_flush)
1200 * The platform defines dimm devices without hints, assume
1201 * platform persistence mechanism like ADR
1205 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1207 int nvdimm_has_cache(struct nd_region *nd_region)
1209 return is_nd_pmem(&nd_region->dev) &&
1210 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1212 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1214 bool is_nvdimm_sync(struct nd_region *nd_region)
1216 return is_nd_pmem(&nd_region->dev) &&
1217 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1219 EXPORT_SYMBOL_GPL(is_nvdimm_sync);
1221 struct conflict_context {
1222 struct nd_region *nd_region;
1223 resource_size_t start, size;
1226 static int region_conflict(struct device *dev, void *data)
1228 struct nd_region *nd_region;
1229 struct conflict_context *ctx = data;
1230 resource_size_t res_end, region_end, region_start;
1232 if (!is_memory(dev))
1235 nd_region = to_nd_region(dev);
1236 if (nd_region == ctx->nd_region)
1239 res_end = ctx->start + ctx->size;
1240 region_start = nd_region->ndr_start;
1241 region_end = region_start + nd_region->ndr_size;
1242 if (ctx->start >= region_start && ctx->start < region_end)
1244 if (res_end > region_start && res_end <= region_end)
1249 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1250 resource_size_t size)
1252 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1253 struct conflict_context ctx = {
1254 .nd_region = nd_region,
1259 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1262 void __exit nd_region_devs_exit(void)
1264 ida_destroy(®ion_ida);