2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
28 static u32 best_seq(u32 a, u32 b)
30 a &= NSINDEX_SEQ_MASK;
31 b &= NSINDEX_SEQ_MASK;
37 else if (nd_inc_seq(a) == b)
43 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
45 return ndd->nslabel_size;
48 static size_t __sizeof_namespace_index(u32 nslot)
50 return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
54 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
57 return (ndd->nsarea.config_size - index_size * 2) /
58 sizeof_namespace_label(ndd);
61 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
65 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
66 n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
68 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
71 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
73 u32 nslot, space, size;
76 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
77 * enough to hold 2 index blocks and 2 labels. The minimum index
78 * block size is 256 bytes. The label size is 128 for namespaces
79 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
81 nslot = nvdimm_num_label_slots(ndd);
82 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
83 size = __sizeof_namespace_index(nslot) * 2;
84 if (size <= space && nslot >= 2)
87 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
88 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
92 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
95 * On media label format consists of two index blocks followed
96 * by an array of labels. None of these structures are ever
97 * updated in place. A sequence number tracks the current
98 * active index and the next one to write, while labels are
99 * written to free slots.
121 struct nd_namespace_index *nsindex[] = {
122 to_namespace_index(ndd, 0),
123 to_namespace_index(ndd, 1),
125 const int num_index = ARRAY_SIZE(nsindex);
126 struct device *dev = ndd->dev;
127 bool valid[2] = { 0 };
128 int i, num_valid = 0;
131 for (i = 0; i < num_index; i++) {
133 u8 sig[NSINDEX_SIG_LEN];
134 u64 sum_save, sum, size;
135 unsigned int version, labelsize;
137 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
138 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
139 dev_dbg(dev, "nsindex%d signature invalid\n", i);
143 /* label sizes larger than 128 arrived with v1.2 */
144 version = __le16_to_cpu(nsindex[i]->major) * 100
145 + __le16_to_cpu(nsindex[i]->minor);
147 labelsize = 1 << (7 + nsindex[i]->labelsize);
151 if (labelsize != sizeof_namespace_label(ndd)) {
152 dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
153 i, nsindex[i]->labelsize);
157 sum_save = __le64_to_cpu(nsindex[i]->checksum);
158 nsindex[i]->checksum = __cpu_to_le64(0);
159 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
160 nsindex[i]->checksum = __cpu_to_le64(sum_save);
161 if (sum != sum_save) {
162 dev_dbg(dev, "nsindex%d checksum invalid\n", i);
166 seq = __le32_to_cpu(nsindex[i]->seq);
167 if ((seq & NSINDEX_SEQ_MASK) == 0) {
168 dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
172 /* sanity check the index against expected values */
173 if (__le64_to_cpu(nsindex[i]->myoff)
174 != i * sizeof_namespace_index(ndd)) {
175 dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
176 i, (unsigned long long)
177 __le64_to_cpu(nsindex[i]->myoff));
180 if (__le64_to_cpu(nsindex[i]->otheroff)
181 != (!i) * sizeof_namespace_index(ndd)) {
182 dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
183 i, (unsigned long long)
184 __le64_to_cpu(nsindex[i]->otheroff));
187 if (__le64_to_cpu(nsindex[i]->labeloff)
188 != 2 * sizeof_namespace_index(ndd)) {
189 dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
190 i, (unsigned long long)
191 __le64_to_cpu(nsindex[i]->labeloff));
195 size = __le64_to_cpu(nsindex[i]->mysize);
196 if (size > sizeof_namespace_index(ndd)
197 || size < sizeof(struct nd_namespace_index)) {
198 dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
202 nslot = __le32_to_cpu(nsindex[i]->nslot);
203 if (nslot * sizeof_namespace_label(ndd)
204 + 2 * sizeof_namespace_index(ndd)
205 > ndd->nsarea.config_size) {
206 dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
207 i, nslot, ndd->nsarea.config_size);
218 for (i = 0; i < num_index; i++)
221 /* can't have num_valid > 0 but valid[] = { false, false } */
225 /* pick the best index... */
226 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
227 __le32_to_cpu(nsindex[1]->seq));
228 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
238 static int nd_label_validate(struct nvdimm_drvdata *ndd)
241 * In order to probe for and validate namespace index blocks we
242 * need to know the size of the labels, and we can't trust the
243 * size of the labels until we validate the index blocks.
244 * Resolve this dependency loop by probing for known label
245 * sizes, but default to v1.2 256-byte namespace labels if
248 int label_size[] = { 128, 256 };
251 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
252 ndd->nslabel_size = label_size[i];
253 rc = __nd_label_validate(ndd);
261 static void nd_label_copy(struct nvdimm_drvdata *ndd,
262 struct nd_namespace_index *dst,
263 struct nd_namespace_index *src)
265 /* just exit if either destination or source is NULL */
269 memcpy(dst, src, sizeof_namespace_index(ndd));
272 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
274 void *base = to_namespace_index(ndd, 0);
276 return base + 2 * sizeof_namespace_index(ndd);
279 static int to_slot(struct nvdimm_drvdata *ndd,
280 struct nd_namespace_label *nd_label)
282 unsigned long label, base;
284 label = (unsigned long) nd_label;
285 base = (unsigned long) nd_label_base(ndd);
287 return (label - base) / sizeof_namespace_label(ndd);
290 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
292 unsigned long label, base;
294 base = (unsigned long) nd_label_base(ndd);
295 label = base + sizeof_namespace_label(ndd) * slot;
297 return (struct nd_namespace_label *) label;
300 #define for_each_clear_bit_le(bit, addr, size) \
301 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
303 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
306 * preamble_index - common variable initialization for nd_label_* routines
307 * @ndd: dimm container for the relevant label set
308 * @idx: namespace_index index
309 * @nsindex_out: on return set to the currently active namespace index
310 * @free: on return set to the free label bitmap in the index
311 * @nslot: on return set to the number of slots in the label space
313 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
314 struct nd_namespace_index **nsindex_out,
315 unsigned long **free, u32 *nslot)
317 struct nd_namespace_index *nsindex;
319 nsindex = to_namespace_index(ndd, idx);
323 *free = (unsigned long *) nsindex->free;
324 *nslot = __le32_to_cpu(nsindex->nslot);
325 *nsindex_out = nsindex;
330 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
332 if (!label_id || !uuid)
334 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
335 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
339 static bool preamble_current(struct nvdimm_drvdata *ndd,
340 struct nd_namespace_index **nsindex,
341 unsigned long **free, u32 *nslot)
343 return preamble_index(ndd, ndd->ns_current, nsindex,
347 static bool preamble_next(struct nvdimm_drvdata *ndd,
348 struct nd_namespace_index **nsindex,
349 unsigned long **free, u32 *nslot)
351 return preamble_index(ndd, ndd->ns_next, nsindex,
355 static bool slot_valid(struct nvdimm_drvdata *ndd,
356 struct nd_namespace_label *nd_label, u32 slot)
358 /* check that we are written where we expect to be written */
359 if (slot != __le32_to_cpu(nd_label->slot))
362 /* check that DPA allocations are page aligned */
363 if ((__le64_to_cpu(nd_label->dpa)
364 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
368 if (namespace_label_has(ndd, checksum)) {
371 sum_save = __le64_to_cpu(nd_label->checksum);
372 nd_label->checksum = __cpu_to_le64(0);
373 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
374 nd_label->checksum = __cpu_to_le64(sum_save);
375 if (sum != sum_save) {
376 dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
385 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
387 struct nd_namespace_index *nsindex;
391 if (!preamble_current(ndd, &nsindex, &free, &nslot))
392 return 0; /* no label, nothing to reserve */
394 for_each_clear_bit_le(slot, free, nslot) {
395 struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
396 struct nd_namespace_label *nd_label;
397 struct nd_region *nd_region = NULL;
398 u8 label_uuid[NSLABEL_UUID_LEN];
399 struct nd_label_id label_id;
400 struct resource *res;
403 nd_label = to_label(ndd, slot);
405 if (!slot_valid(ndd, nd_label, slot))
408 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
409 flags = __le32_to_cpu(nd_label->flags);
410 if (test_bit(NDD_NOBLK, &nvdimm->flags))
411 flags &= ~NSLABEL_FLAG_LOCAL;
412 nd_label_gen_id(&label_id, label_uuid, flags);
413 res = nvdimm_allocate_dpa(ndd, &label_id,
414 __le64_to_cpu(nd_label->dpa),
415 __le64_to_cpu(nd_label->rawsize));
416 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
424 int nd_label_data_init(struct nvdimm_drvdata *ndd)
426 size_t config_size, read_size, max_xfer, offset;
427 struct nd_namespace_index *nsindex;
435 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
436 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
437 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
442 * We need to determine the maximum index area as this is the section
443 * we must read and validate before we can start processing labels.
445 * If the area is too small to contain the two indexes and 2 labels
448 * Start at a label size of 128 as this should result in the largest
449 * possible namespace index size.
451 ndd->nslabel_size = 128;
452 read_size = sizeof_namespace_index(ndd) * 2;
456 /* Allocate config data */
457 config_size = ndd->nsarea.config_size;
458 ndd->data = kvzalloc(config_size, GFP_KERNEL);
463 * We want to guarantee as few reads as possible while conserving
464 * memory. To do that we figure out how much unused space will be left
465 * in the last read, divide that by the total number of reads it is
466 * going to take given our maximum transfer size, and then reduce our
467 * maximum transfer size based on that result.
469 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
470 if (read_size < max_xfer) {
472 max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
473 DIV_ROUND_UP(config_size, max_xfer);
474 /* make certain we read indexes in exactly 1 read */
475 if (max_xfer < read_size)
476 max_xfer = read_size;
479 /* Make our initial read size a multiple of max_xfer size */
480 read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
483 /* Read the index data */
484 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
488 /* Validate index data, if not valid assume all labels are invalid */
489 ndd->ns_current = nd_label_validate(ndd);
490 if (ndd->ns_current < 0)
493 /* Record our index values */
494 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
496 /* Copy "current" index on top of the "next" index */
497 nsindex = to_current_namespace_index(ndd);
498 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
500 /* Determine starting offset for label data */
501 offset = __le64_to_cpu(nsindex->labeloff);
502 nslot = __le32_to_cpu(nsindex->nslot);
504 /* Loop through the free list pulling in any active labels */
505 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
506 size_t label_read_size;
508 /* zero out the unused labels */
509 if (test_bit_le(i, nsindex->free)) {
510 memset(ndd->data + offset, 0, ndd->nslabel_size);
514 /* if we already read past here then just continue */
515 if (offset + ndd->nslabel_size <= read_size)
518 /* if we haven't read in a while reset our read_size offset */
519 if (read_size < offset)
522 /* determine how much more will be read after this next call. */
523 label_read_size = offset + ndd->nslabel_size - read_size;
524 label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
527 /* truncate last read if needed */
528 if (read_size + label_read_size > config_size)
529 label_read_size = config_size - read_size;
531 /* Read the label data */
532 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
533 read_size, label_read_size);
537 /* push read_size to next read offset */
538 read_size += label_read_size;
541 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
546 int nd_label_active_count(struct nvdimm_drvdata *ndd)
548 struct nd_namespace_index *nsindex;
553 if (!preamble_current(ndd, &nsindex, &free, &nslot))
556 for_each_clear_bit_le(slot, free, nslot) {
557 struct nd_namespace_label *nd_label;
559 nd_label = to_label(ndd, slot);
561 if (!slot_valid(ndd, nd_label, slot)) {
562 u32 label_slot = __le32_to_cpu(nd_label->slot);
563 u64 size = __le64_to_cpu(nd_label->rawsize);
564 u64 dpa = __le64_to_cpu(nd_label->dpa);
567 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
568 slot, label_slot, dpa, size);
576 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
578 struct nd_namespace_index *nsindex;
582 if (!preamble_current(ndd, &nsindex, &free, &nslot))
585 for_each_clear_bit_le(slot, free, nslot) {
586 struct nd_namespace_label *nd_label;
588 nd_label = to_label(ndd, slot);
589 if (!slot_valid(ndd, nd_label, slot))
593 return to_label(ndd, slot);
599 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
601 struct nd_namespace_index *nsindex;
605 if (!preamble_next(ndd, &nsindex, &free, &nslot))
608 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
610 slot = find_next_bit_le(free, nslot, 0);
614 clear_bit_le(slot, free);
619 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
621 struct nd_namespace_index *nsindex;
625 if (!preamble_next(ndd, &nsindex, &free, &nslot))
628 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
631 return !test_and_set_bit_le(slot, free);
635 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
637 struct nd_namespace_index *nsindex;
641 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
643 if (!preamble_next(ndd, &nsindex, &free, &nslot))
644 return nvdimm_num_label_slots(ndd);
646 return bitmap_weight(free, nslot);
649 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
652 struct nd_namespace_index *nsindex;
653 unsigned long offset;
658 nsindex = to_namespace_index(ndd, index);
659 if (flags & ND_NSINDEX_INIT)
660 nslot = nvdimm_num_label_slots(ndd);
662 nslot = __le32_to_cpu(nsindex->nslot);
664 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
665 memset(&nsindex->flags, 0, 3);
666 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
667 nsindex->seq = __cpu_to_le32(seq);
668 offset = (unsigned long) nsindex
669 - (unsigned long) to_namespace_index(ndd, 0);
670 nsindex->myoff = __cpu_to_le64(offset);
671 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
672 offset = (unsigned long) to_namespace_index(ndd,
673 nd_label_next_nsindex(index))
674 - (unsigned long) to_namespace_index(ndd, 0);
675 nsindex->otheroff = __cpu_to_le64(offset);
676 offset = (unsigned long) nd_label_base(ndd)
677 - (unsigned long) to_namespace_index(ndd, 0);
678 nsindex->labeloff = __cpu_to_le64(offset);
679 nsindex->nslot = __cpu_to_le32(nslot);
680 nsindex->major = __cpu_to_le16(1);
681 if (sizeof_namespace_label(ndd) < 256)
682 nsindex->minor = __cpu_to_le16(1);
684 nsindex->minor = __cpu_to_le16(2);
685 nsindex->checksum = __cpu_to_le64(0);
686 if (flags & ND_NSINDEX_INIT) {
687 unsigned long *free = (unsigned long *) nsindex->free;
688 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
691 memset(nsindex->free, 0xff, nfree / 8);
692 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
693 clear_bit_le(nslot + i, free);
695 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
696 nsindex->checksum = __cpu_to_le64(checksum);
697 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
698 nsindex, sizeof_namespace_index(ndd));
702 if (flags & ND_NSINDEX_INIT)
705 /* copy the index we just wrote to the new 'next' */
706 WARN_ON(index != ndd->ns_next);
707 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
708 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
709 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
710 WARN_ON(ndd->ns_current == ndd->ns_next);
715 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
716 struct nd_namespace_label *nd_label)
718 return (unsigned long) nd_label
719 - (unsigned long) to_namespace_index(ndd, 0);
722 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
724 if (guid_equal(guid, &nvdimm_btt_guid))
725 return NVDIMM_CCLASS_BTT;
726 else if (guid_equal(guid, &nvdimm_btt2_guid))
727 return NVDIMM_CCLASS_BTT2;
728 else if (guid_equal(guid, &nvdimm_pfn_guid))
729 return NVDIMM_CCLASS_PFN;
730 else if (guid_equal(guid, &nvdimm_dax_guid))
731 return NVDIMM_CCLASS_DAX;
732 else if (guid_equal(guid, &guid_null))
733 return NVDIMM_CCLASS_NONE;
735 return NVDIMM_CCLASS_UNKNOWN;
738 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
741 if (claim_class == NVDIMM_CCLASS_BTT)
742 return &nvdimm_btt_guid;
743 else if (claim_class == NVDIMM_CCLASS_BTT2)
744 return &nvdimm_btt2_guid;
745 else if (claim_class == NVDIMM_CCLASS_PFN)
746 return &nvdimm_pfn_guid;
747 else if (claim_class == NVDIMM_CCLASS_DAX)
748 return &nvdimm_dax_guid;
749 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
751 * If we're modifying a namespace for which we don't
752 * know the claim_class, don't touch the existing guid.
759 static int __pmem_label_update(struct nd_region *nd_region,
760 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
761 int pos, unsigned long flags)
763 struct nd_namespace_common *ndns = &nspm->nsio.common;
764 struct nd_interleave_set *nd_set = nd_region->nd_set;
765 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
766 struct nd_label_ent *label_ent, *victim = NULL;
767 struct nd_namespace_label *nd_label;
768 struct nd_namespace_index *nsindex;
769 struct nd_label_id label_id;
770 struct resource *res;
777 if (!preamble_next(ndd, &nsindex, &free, &nslot))
780 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
781 nd_label_gen_id(&label_id, nspm->uuid, 0);
782 for_each_dpa_resource(ndd, res)
783 if (strcmp(res->name, label_id.id) == 0)
791 /* allocate and write the label to the staging (next) index */
792 slot = nd_label_alloc_slot(ndd);
793 if (slot == UINT_MAX)
795 dev_dbg(ndd->dev, "allocated: %d\n", slot);
797 nd_label = to_label(ndd, slot);
798 memset(nd_label, 0, sizeof_namespace_label(ndd));
799 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
801 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
802 nd_label->flags = __cpu_to_le32(flags);
803 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
804 nd_label->position = __cpu_to_le16(pos);
805 nd_label->isetcookie = __cpu_to_le64(cookie);
806 nd_label->rawsize = __cpu_to_le64(resource_size(res));
807 nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
808 nd_label->dpa = __cpu_to_le64(res->start);
809 nd_label->slot = __cpu_to_le32(slot);
810 if (namespace_label_has(ndd, type_guid))
811 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
812 if (namespace_label_has(ndd, abstraction_guid))
813 guid_copy(&nd_label->abstraction_guid,
814 to_abstraction_guid(ndns->claim_class,
815 &nd_label->abstraction_guid));
816 if (namespace_label_has(ndd, checksum)) {
819 nd_label->checksum = __cpu_to_le64(0);
820 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
821 nd_label->checksum = __cpu_to_le64(sum);
823 nd_dbg_dpa(nd_region, ndd, res, "\n");
826 offset = nd_label_offset(ndd, nd_label);
827 rc = nvdimm_set_config_data(ndd, offset, nd_label,
828 sizeof_namespace_label(ndd));
832 /* Garbage collect the previous label */
833 mutex_lock(&nd_mapping->lock);
834 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
835 if (!label_ent->label)
837 if (memcmp(nspm->uuid, label_ent->label->uuid,
838 NSLABEL_UUID_LEN) != 0)
841 list_move_tail(&victim->list, &nd_mapping->labels);
845 dev_dbg(ndd->dev, "free: %d\n", slot);
846 slot = to_slot(ndd, victim->label);
847 nd_label_free_slot(ndd, slot);
848 victim->label = NULL;
852 rc = nd_label_write_index(ndd, ndd->ns_next,
853 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
855 list_for_each_entry(label_ent, &nd_mapping->labels, list)
856 if (!label_ent->label) {
857 label_ent->label = nd_label;
861 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
862 "failed to track label: %d\n",
863 to_slot(ndd, nd_label));
867 mutex_unlock(&nd_mapping->lock);
872 static bool is_old_resource(struct resource *res, struct resource **list, int n)
876 if (res->flags & DPA_RESOURCE_ADJUSTED)
878 for (i = 0; i < n; i++)
884 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
885 struct nd_namespace_label *nd_label)
887 struct resource *res;
889 for_each_dpa_resource(ndd, res) {
890 if (res->start != __le64_to_cpu(nd_label->dpa))
892 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
901 * 1/ Account all the labels that can be freed after this update
902 * 2/ Allocate and write the label to the staging (next) index
903 * 3/ Record the resources in the namespace device
905 static int __blk_label_update(struct nd_region *nd_region,
906 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
909 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
910 struct nd_interleave_set *nd_set = nd_region->nd_set;
911 struct nd_namespace_common *ndns = &nsblk->common;
912 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
913 struct nd_namespace_label *nd_label;
914 struct nd_label_ent *label_ent, *e;
915 struct nd_namespace_index *nsindex;
916 unsigned long *free, *victim_map = NULL;
917 struct resource *res, **old_res_list;
918 struct nd_label_id label_id;
919 u8 uuid[NSLABEL_UUID_LEN];
924 if (!preamble_next(ndd, &nsindex, &free, &nslot))
927 old_res_list = nsblk->res;
928 nfree = nd_label_nfree(ndd);
929 old_num_resources = nsblk->num_resources;
930 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
933 * We need to loop over the old resources a few times, which seems a
934 * bit inefficient, but we need to know that we have the label
935 * space before we start mutating the tracking structures.
936 * Otherwise the recovery method of last resort for userspace is
937 * disable and re-enable the parent region.
940 for_each_dpa_resource(ndd, res) {
941 if (strcmp(res->name, label_id.id) != 0)
943 if (!is_old_resource(res, old_res_list, old_num_resources))
948 if (old_num_resources) {
949 /* convert old local-label-map to dimm-slot victim-map */
950 victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
954 /* mark unused labels for garbage collection */
955 for_each_clear_bit_le(slot, free, nslot) {
956 nd_label = to_label(ndd, slot);
957 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
958 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
960 res = to_resource(ndd, nd_label);
961 if (res && is_old_resource(res, old_res_list,
964 slot = to_slot(ndd, nd_label);
965 set_bit(slot, victim_map);
970 /* don't allow updates that consume the last label */
971 if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
972 dev_info(&nsblk->common.dev, "insufficient label space\n");
973 bitmap_free(victim_map);
976 /* from here on we need to abort on error */
979 /* assign all resources to the namespace before writing the labels */
981 nsblk->num_resources = 0;
982 for_each_dpa_resource(ndd, res) {
983 if (strcmp(res->name, label_id.id) != 0)
985 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
992 * Find the resource associated with the first label in the set
993 * per the v1.2 namespace specification.
995 for (i = 0; i < nsblk->num_resources; i++) {
996 struct resource *min = nsblk->res[min_dpa_idx];
999 if (res->start < min->start)
1003 for (i = 0; i < nsblk->num_resources; i++) {
1006 res = nsblk->res[i];
1007 if (is_old_resource(res, old_res_list, old_num_resources))
1008 continue; /* carry-over */
1009 slot = nd_label_alloc_slot(ndd);
1010 if (slot == UINT_MAX)
1012 dev_dbg(ndd->dev, "allocated: %d\n", slot);
1014 nd_label = to_label(ndd, slot);
1015 memset(nd_label, 0, sizeof_namespace_label(ndd));
1016 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1017 if (nsblk->alt_name)
1018 memcpy(nd_label->name, nsblk->alt_name,
1020 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
1023 * Use the presence of the type_guid as a flag to
1024 * determine isetcookie usage and nlabel + position
1025 * policy for blk-aperture namespaces.
1027 if (namespace_label_has(ndd, type_guid)) {
1028 if (i == min_dpa_idx) {
1029 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
1030 nd_label->position = __cpu_to_le16(0);
1032 nd_label->nlabel = __cpu_to_le16(0xffff);
1033 nd_label->position = __cpu_to_le16(0xffff);
1035 nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
1037 nd_label->nlabel = __cpu_to_le16(0); /* N/A */
1038 nd_label->position = __cpu_to_le16(0); /* N/A */
1039 nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
1042 nd_label->dpa = __cpu_to_le64(res->start);
1043 nd_label->rawsize = __cpu_to_le64(resource_size(res));
1044 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
1045 nd_label->slot = __cpu_to_le32(slot);
1046 if (namespace_label_has(ndd, type_guid))
1047 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
1048 if (namespace_label_has(ndd, abstraction_guid))
1049 guid_copy(&nd_label->abstraction_guid,
1050 to_abstraction_guid(ndns->claim_class,
1051 &nd_label->abstraction_guid));
1053 if (namespace_label_has(ndd, checksum)) {
1056 nd_label->checksum = __cpu_to_le64(0);
1057 sum = nd_fletcher64(nd_label,
1058 sizeof_namespace_label(ndd), 1);
1059 nd_label->checksum = __cpu_to_le64(sum);
1063 offset = nd_label_offset(ndd, nd_label);
1064 rc = nvdimm_set_config_data(ndd, offset, nd_label,
1065 sizeof_namespace_label(ndd));
1070 /* free up now unused slots in the new index */
1071 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1072 dev_dbg(ndd->dev, "free: %d\n", slot);
1073 nd_label_free_slot(ndd, slot);
1077 rc = nd_label_write_index(ndd, ndd->ns_next,
1078 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1083 * Now that the on-dimm labels are up to date, fix up the tracking
1084 * entries in nd_mapping->labels
1087 mutex_lock(&nd_mapping->lock);
1088 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1089 nd_label = label_ent->label;
1093 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1094 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1097 list_move(&label_ent->list, &list);
1098 label_ent->label = NULL;
1100 list_splice_tail_init(&list, &nd_mapping->labels);
1101 mutex_unlock(&nd_mapping->lock);
1103 if (nlabel + nsblk->num_resources > num_labels) {
1105 * Bug, we can't end up with more resources than
1113 mutex_lock(&nd_mapping->lock);
1114 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1115 typeof(*label_ent), list);
1118 mutex_unlock(&nd_mapping->lock);
1122 for_each_clear_bit_le(slot, free, nslot) {
1123 nd_label = to_label(ndd, slot);
1124 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1125 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1127 res = to_resource(ndd, nd_label);
1128 res->flags &= ~DPA_RESOURCE_ADJUSTED;
1129 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1130 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1131 if (label_ent->label)
1133 label_ent->label = nd_label;
1138 dev_WARN(&nsblk->common.dev,
1139 "failed to track label slot%d\n", slot);
1141 mutex_unlock(&nd_mapping->lock);
1144 kfree(old_res_list);
1145 bitmap_free(victim_map);
1150 * 1/ repair the allocated label bitmap in the index
1151 * 2/ restore the resource list
1153 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1155 nsblk->res = old_res_list;
1156 nsblk->num_resources = old_num_resources;
1157 old_res_list = NULL;
1161 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1163 int i, old_num_labels = 0;
1164 struct nd_label_ent *label_ent;
1165 struct nd_namespace_index *nsindex;
1166 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1168 mutex_lock(&nd_mapping->lock);
1169 list_for_each_entry(label_ent, &nd_mapping->labels, list)
1171 mutex_unlock(&nd_mapping->lock);
1174 * We need to preserve all the old labels for the mapping so
1175 * they can be garbage collected after writing the new labels.
1177 for (i = old_num_labels; i < num_labels; i++) {
1178 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1181 mutex_lock(&nd_mapping->lock);
1182 list_add_tail(&label_ent->list, &nd_mapping->labels);
1183 mutex_unlock(&nd_mapping->lock);
1186 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1189 return max(num_labels, old_num_labels);
1191 nsindex = to_namespace_index(ndd, 0);
1192 memset(nsindex, 0, ndd->nsarea.config_size);
1193 for (i = 0; i < 2; i++) {
1194 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1200 ndd->ns_current = 0;
1202 return max(num_labels, old_num_labels);
1205 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1207 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1208 struct nd_label_ent *label_ent, *e;
1209 struct nd_namespace_index *nsindex;
1210 u8 label_uuid[NSLABEL_UUID_LEN];
1211 unsigned long *free;
1219 /* no index || no labels == nothing to delete */
1220 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1223 mutex_lock(&nd_mapping->lock);
1224 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1225 struct nd_namespace_label *nd_label = label_ent->label;
1230 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1231 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1234 slot = to_slot(ndd, nd_label);
1235 nd_label_free_slot(ndd, slot);
1236 dev_dbg(ndd->dev, "free: %d\n", slot);
1237 list_move_tail(&label_ent->list, &list);
1238 label_ent->label = NULL;
1240 list_splice_tail_init(&list, &nd_mapping->labels);
1243 nd_mapping_free_labels(nd_mapping);
1244 dev_dbg(ndd->dev, "no more active labels\n");
1246 mutex_unlock(&nd_mapping->lock);
1248 return nd_label_write_index(ndd, ndd->ns_next,
1249 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1252 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1253 struct nd_namespace_pmem *nspm, resource_size_t size)
1257 for (i = 0; i < nd_region->ndr_mappings; i++) {
1258 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1259 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1260 struct resource *res;
1264 rc = del_labels(nd_mapping, nspm->uuid);
1270 for_each_dpa_resource(ndd, res)
1271 if (strncmp(res->name, "pmem", 4) == 0)
1273 WARN_ON_ONCE(!count);
1275 rc = init_labels(nd_mapping, count);
1279 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1280 NSLABEL_FLAG_UPDATING);
1288 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1289 for (i = 0; i < nd_region->ndr_mappings; i++) {
1290 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1292 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1300 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1301 struct nd_namespace_blk *nsblk, resource_size_t size)
1303 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1304 struct resource *res;
1308 return del_labels(nd_mapping, nsblk->uuid);
1310 for_each_dpa_resource(to_ndd(nd_mapping), res)
1313 count = init_labels(nd_mapping, count);
1317 return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1320 int __init nd_label_init(void)
1322 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1323 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1324 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1325 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));