]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/nvdimm/pfn_devs.c
Merge drm/drm-next into drm-misc-next
[linux.git] / drivers / nvdimm / pfn_devs.c
index 3e7b11cf1aaec67f69ed4faf7d3a7b3c48215d5e..60d81fae06eeae154c0a8aa87ca9e56f3345500c 100644 (file)
@@ -103,39 +103,42 @@ static ssize_t align_show(struct device *dev,
        return sprintf(buf, "%ld\n", nd_pfn->align);
 }
 
-static const unsigned long *nd_pfn_supported_alignments(void)
+static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
 {
-       /*
-        * This needs to be a non-static variable because the *_SIZE
-        * macros aren't always constants.
-        */
-       const unsigned long supported_alignments[] = {
-               PAGE_SIZE,
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               HPAGE_PMD_SIZE,
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-               HPAGE_PUD_SIZE,
-#endif
-#endif
-               0,
-       };
-       static unsigned long data[ARRAY_SIZE(supported_alignments)];
 
-       memcpy(data, supported_alignments, sizeof(data));
+       alignments[0] = PAGE_SIZE;
+
+       if (has_transparent_hugepage()) {
+               alignments[1] = HPAGE_PMD_SIZE;
+               if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+                       alignments[2] = HPAGE_PUD_SIZE;
+       }
 
-       return data;
+       return alignments;
+}
+
+/*
+ * Use pmd mapping if supported as default alignment
+ */
+static unsigned long nd_pfn_default_alignment(void)
+{
+
+       if (has_transparent_hugepage())
+               return HPAGE_PMD_SIZE;
+       return PAGE_SIZE;
 }
 
 static ssize_t align_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
+       unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
        ssize_t rc;
 
        nd_device_lock(dev);
        nvdimm_bus_lock(dev);
        rc = nd_size_select_store(dev, buf, &nd_pfn->align,
-                       nd_pfn_supported_alignments());
+                       nd_pfn_supported_alignments(aligns));
        dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
                        buf[len - 1] == '\n' ? "" : "\n");
        nvdimm_bus_unlock(dev);
@@ -259,7 +262,10 @@ static DEVICE_ATTR_RO(size);
 static ssize_t supported_alignments_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       return nd_size_select_show(0, nd_pfn_supported_alignments(), buf);
+       unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
+
+       return nd_size_select_show(0,
+                       nd_pfn_supported_alignments(aligns), buf);
 }
 static DEVICE_ATTR_RO(supported_alignments);
 
@@ -302,7 +308,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
                return NULL;
 
        nd_pfn->mode = PFN_MODE_NONE;
-       nd_pfn->align = PFN_DEFAULT_ALIGNMENT;
+       nd_pfn->align = nd_pfn_default_alignment();
        dev = &nd_pfn->dev;
        device_initialize(&nd_pfn->dev);
        if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
@@ -412,6 +418,21 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
        return 0;
 }
 
+static bool nd_supported_alignment(unsigned long align)
+{
+       int i;
+       unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
+
+       if (align == 0)
+               return false;
+
+       nd_pfn_supported_alignments(supported);
+       for (i = 0; supported[i]; i++)
+               if (align == supported[i])
+                       return true;
+       return false;
+}
+
 /**
  * nd_pfn_validate - read and validate info-block
  * @nd_pfn: fsdax namespace runtime state / properties
@@ -460,6 +481,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
        if (__le16_to_cpu(pfn_sb->version_minor) < 2)
                pfn_sb->align = 0;
 
+       if (__le16_to_cpu(pfn_sb->version_minor) < 4) {
+               pfn_sb->page_struct_size = cpu_to_le16(64);
+               pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
+       }
+
        switch (le32_to_cpu(pfn_sb->mode)) {
        case PFN_MODE_RAM:
        case PFN_MODE_PMEM:
@@ -475,6 +501,34 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                align = 1UL << ilog2(offset);
        mode = le32_to_cpu(pfn_sb->mode);
 
+       if ((le32_to_cpu(pfn_sb->page_size) > PAGE_SIZE) &&
+                       (mode == PFN_MODE_PMEM)) {
+               dev_err(&nd_pfn->dev,
+                               "init failed, page size mismatch %d\n",
+                               le32_to_cpu(pfn_sb->page_size));
+               return -EOPNOTSUPP;
+       }
+
+       if ((le16_to_cpu(pfn_sb->page_struct_size) < sizeof(struct page)) &&
+                       (mode == PFN_MODE_PMEM)) {
+               dev_err(&nd_pfn->dev,
+                               "init failed, struct page size mismatch %d\n",
+                               le16_to_cpu(pfn_sb->page_struct_size));
+               return -EOPNOTSUPP;
+       }
+
+       /*
+        * Check whether the we support the alignment. For Dax if the
+        * superblock alignment is not matching, we won't initialize
+        * the device.
+        */
+       if (!nd_supported_alignment(align) &&
+                       !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
+               dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
+                               "%ld:%ld\n", nd_pfn->align, align);
+               return -EOPNOTSUPP;
+       }
+
        if (!nd_pfn->uuid) {
                /*
                 * When probing a namepace via nd_pfn_probe() the uuid
@@ -618,9 +672,11 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
        struct nd_namespace_common *ndns = nd_pfn->ndns;
        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        resource_size_t base = nsio->res.start + start_pad;
+       resource_size_t end = nsio->res.end - end_trunc;
        struct vmem_altmap __altmap = {
                .base_pfn = init_altmap_base(base),
                .reserve = init_altmap_reserve(base),
+               .end_pfn = PHYS_PFN(end),
        };
 
        memcpy(res, &nsio->res, sizeof(*res));
@@ -655,6 +711,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        resource_size_t start, size;
        struct nd_region *nd_region;
        unsigned long npfns, align;
+       u32 end_trunc;
        struct nd_pfn_sb *pfn_sb;
        phys_addr_t offset;
        const char *sig;
@@ -696,13 +753,22 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        size = resource_size(&nsio->res);
        npfns = PHYS_PFN(size - SZ_8K);
        align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+       end_trunc = start + size - ALIGN_DOWN(start + size, align);
        if (nd_pfn->mode == PFN_MODE_PMEM) {
                /*
                 * The altmap should be padded out to the block size used
                 * when populating the vmemmap. This *should* be equal to
                 * PMD_SIZE for most architectures.
+                *
+                * Also make sure size of struct page is less than 64. We
+                * want to make sure we use large enough size here so that
+                * we don't have a dynamic reserve space depending on
+                * struct page size. But we also want to make sure we notice
+                * when we end up adding new elements to struct page.
                 */
-               offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
+               BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
+               offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
+                       - start;
        } else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K, align) - start;
        else
@@ -714,7 +780,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
                return -ENXIO;
        }
 
-       npfns = PHYS_PFN(size - offset);
+       npfns = PHYS_PFN(size - offset - end_trunc);
        pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
        pfn_sb->dataoff = cpu_to_le64(offset);
        pfn_sb->npfns = cpu_to_le64(npfns);
@@ -722,8 +788,11 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
        memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
        pfn_sb->version_major = cpu_to_le16(1);
-       pfn_sb->version_minor = cpu_to_le16(3);
+       pfn_sb->version_minor = cpu_to_le16(4);
+       pfn_sb->end_trunc = cpu_to_le32(end_trunc);
        pfn_sb->align = cpu_to_le32(nd_pfn->align);
+       pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+       pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
        checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
        pfn_sb->checksum = cpu_to_le64(checksum);