1 // SPDX-License-Identifier: GPL-2.0-only
3 * Persistent Memory Driver
5 * Copyright (c) 2014-2015, Intel Corporation.
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
10 #include <asm/cacheflush.h>
11 #include <linux/blkdev.h>
12 #include <linux/hdreg.h>
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/set_memory.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/badblocks.h>
19 #include <linux/memremap.h>
20 #include <linux/vmalloc.h>
21 #include <linux/blk-mq.h>
22 #include <linux/pfn_t.h>
23 #include <linux/slab.h>
24 #include <linux/uio.h>
25 #include <linux/dax.h>
27 #include <linux/backing-dev.h>
32 static struct device *to_dev(struct pmem_device *pmem)
35 * nvdimm bus services need a 'dev' parameter, and we record the device
41 static struct nd_region *to_region(struct pmem_device *pmem)
43 return to_nd_region(to_dev(pmem)->parent);
46 static void hwpoison_clear(struct pmem_device *pmem,
47 phys_addr_t phys, unsigned int len)
49 unsigned long pfn_start, pfn_end, pfn;
51 /* only pmem in the linear map supports HWPoison */
52 if (is_vmalloc_addr(pmem->virt_addr))
55 pfn_start = PHYS_PFN(phys);
56 pfn_end = pfn_start + PHYS_PFN(len);
57 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
58 struct page *page = pfn_to_page(pfn);
61 * Note, no need to hold a get_dev_pagemap() reference
62 * here since we're in the driver I/O path and
63 * outstanding I/O requests pin the dev_pagemap.
65 if (test_and_clear_pmem_poison(page))
66 clear_mce_nospec(pfn);
70 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
71 phys_addr_t offset, unsigned int len)
73 struct device *dev = to_dev(pmem);
76 blk_status_t rc = BLK_STS_OK;
78 sector = (offset - pmem->data_offset) / 512;
80 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
83 if (cleared > 0 && cleared / 512) {
84 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
86 dev_dbg(dev, "%#llx clear %ld sector%s\n",
87 (unsigned long long) sector, cleared,
88 cleared > 1 ? "s" : "");
89 badblocks_clear(&pmem->bb, sector, cleared);
91 sysfs_notify_dirent(pmem->bb_state);
94 arch_invalidate_pmem(pmem->virt_addr + offset, len);
99 static void write_pmem(void *pmem_addr, struct page *page,
100 unsigned int off, unsigned int len)
106 mem = kmap_atomic(page);
107 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
108 memcpy_flushcache(pmem_addr, mem + off, chunk);
117 static blk_status_t read_pmem(struct page *page, unsigned int off,
118 void *pmem_addr, unsigned int len)
125 mem = kmap_atomic(page);
126 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
127 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
130 return BLK_STS_IOERR;
139 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
140 unsigned int len, unsigned int off, unsigned int op,
143 blk_status_t rc = BLK_STS_OK;
144 bool bad_pmem = false;
145 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
146 void *pmem_addr = pmem->virt_addr + pmem_off;
148 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
151 if (!op_is_write(op)) {
152 if (unlikely(bad_pmem))
155 rc = read_pmem(page, off, pmem_addr, len);
156 flush_dcache_page(page);
160 * Note that we write the data both before and after
161 * clearing poison. The write before clear poison
162 * handles situations where the latest written data is
163 * preserved and the clear poison operation simply marks
164 * the address range as valid without changing the data.
165 * In this case application software can assume that an
166 * interrupted write will either return the new good
169 * However, if pmem_clear_poison() leaves the data in an
170 * indeterminate state we need to perform the write
171 * after clear poison.
173 flush_dcache_page(page);
174 write_pmem(pmem_addr, page, off, len);
175 if (unlikely(bad_pmem)) {
176 rc = pmem_clear_poison(pmem, pmem_off, len);
177 write_pmem(pmem_addr, page, off, len);
184 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
191 struct bvec_iter iter;
192 struct pmem_device *pmem = q->queuedata;
193 struct nd_region *nd_region = to_region(pmem);
195 if (bio->bi_opf & REQ_PREFLUSH)
196 ret = nvdimm_flush(nd_region, bio);
198 do_acct = nd_iostat_start(bio, &start);
199 bio_for_each_segment(bvec, bio, iter) {
200 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
201 bvec.bv_offset, bio_op(bio), iter.bi_sector);
208 nd_iostat_end(bio, start);
210 if (bio->bi_opf & REQ_FUA)
211 ret = nvdimm_flush(nd_region, bio);
214 bio->bi_status = errno_to_blk_status(ret);
217 return BLK_QC_T_NONE;
220 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
221 struct page *page, unsigned int op)
223 struct pmem_device *pmem = bdev->bd_queue->queuedata;
226 rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
230 * The ->rw_page interface is subtle and tricky. The core
231 * retries on any error, so we can only invoke page_endio() in
232 * the successful completion case. Otherwise, we'll see crashes
233 * caused by double completion.
236 page_endio(page, op_is_write(op), 0);
238 return blk_status_to_errno(rc);
241 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
242 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
243 long nr_pages, void **kaddr, pfn_t *pfn)
245 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
247 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
248 PFN_PHYS(nr_pages))))
252 *kaddr = pmem->virt_addr + offset;
254 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
257 * If badblocks are present, limit known good range to the
260 if (unlikely(pmem->bb.count))
262 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
265 static const struct block_device_operations pmem_fops = {
266 .owner = THIS_MODULE,
267 .rw_page = pmem_rw_page,
268 .revalidate_disk = nvdimm_revalidate_disk,
271 static long pmem_dax_direct_access(struct dax_device *dax_dev,
272 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
274 struct pmem_device *pmem = dax_get_private(dax_dev);
276 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
280 * Use the 'no check' versions of copy_from_iter_flushcache() and
281 * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
282 * checking, both file offset and device offset, is handled by
285 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
286 void *addr, size_t bytes, struct iov_iter *i)
288 return _copy_from_iter_flushcache(addr, bytes, i);
291 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
292 void *addr, size_t bytes, struct iov_iter *i)
294 return _copy_to_iter_mcsafe(addr, bytes, i);
297 static const struct dax_operations pmem_dax_ops = {
298 .direct_access = pmem_dax_direct_access,
299 .dax_supported = generic_fsdax_supported,
300 .copy_from_iter = pmem_copy_from_iter,
301 .copy_to_iter = pmem_copy_to_iter,
304 static const struct attribute_group *pmem_attribute_groups[] = {
305 &dax_attribute_group,
309 static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
311 struct request_queue *q =
312 container_of(pgmap->ref, struct request_queue, q_usage_counter);
314 blk_cleanup_queue(q);
317 static void pmem_release_queue(void *pgmap)
319 pmem_pagemap_cleanup(pgmap);
322 static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
324 struct request_queue *q =
325 container_of(pgmap->ref, struct request_queue, q_usage_counter);
327 blk_freeze_queue_start(q);
330 static void pmem_release_disk(void *__pmem)
332 struct pmem_device *pmem = __pmem;
334 kill_dax(pmem->dax_dev);
335 put_dax(pmem->dax_dev);
336 del_gendisk(pmem->disk);
337 put_disk(pmem->disk);
340 static void pmem_pagemap_page_free(struct page *page)
342 wake_up_var(&page->_refcount);
345 static const struct dev_pagemap_ops fsdax_pagemap_ops = {
346 .page_free = pmem_pagemap_page_free,
347 .kill = pmem_pagemap_kill,
348 .cleanup = pmem_pagemap_cleanup,
351 static int pmem_attach_disk(struct device *dev,
352 struct nd_namespace_common *ndns)
354 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
355 struct nd_region *nd_region = to_nd_region(dev->parent);
356 int nid = dev_to_node(dev), fua;
357 struct resource *res = &nsio->res;
358 struct resource bb_res;
359 struct nd_pfn *nd_pfn = NULL;
360 struct dax_device *dax_dev;
361 struct nd_pfn_sb *pfn_sb;
362 struct pmem_device *pmem;
363 struct request_queue *q;
364 struct device *gendev;
365 struct gendisk *disk;
368 unsigned long flags = 0UL;
370 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
374 rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
378 /* while nsio_rw_bytes is active, parse a pfn info block if present */
379 if (is_nd_pfn(dev)) {
380 nd_pfn = to_nd_pfn(dev);
381 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
386 /* we're attaching a block device, disable raw namespace access */
387 devm_namespace_disable(dev, ndns);
389 dev_set_drvdata(dev, pmem);
390 pmem->phys_addr = res->start;
391 pmem->size = resource_size(res);
392 fua = nvdimm_has_flush(nd_region);
393 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
394 dev_warn(dev, "unable to guarantee persistence of writes\n");
398 if (!devm_request_mem_region(dev, res->start, resource_size(res),
399 dev_name(&ndns->dev))) {
400 dev_warn(dev, "could not reserve region %pR\n", res);
404 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
408 pmem->pfn_flags = PFN_DEV;
409 pmem->pgmap.ref = &q->q_usage_counter;
410 if (is_nd_pfn(dev)) {
411 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
412 pmem->pgmap.ops = &fsdax_pagemap_ops;
413 addr = devm_memremap_pages(dev, &pmem->pgmap);
414 pfn_sb = nd_pfn->pfn_sb;
415 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
416 pmem->pfn_pad = resource_size(res) -
417 resource_size(&pmem->pgmap.res);
418 pmem->pfn_flags |= PFN_MAP;
419 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
420 bb_res.start += pmem->data_offset;
421 } else if (pmem_should_map_pages(dev)) {
422 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
423 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
424 pmem->pgmap.ops = &fsdax_pagemap_ops;
425 addr = devm_memremap_pages(dev, &pmem->pgmap);
426 pmem->pfn_flags |= PFN_MAP;
427 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
429 if (devm_add_action_or_reset(dev, pmem_release_queue,
432 addr = devm_memremap(dev, pmem->phys_addr,
433 pmem->size, ARCH_MEMREMAP_PMEM);
434 memcpy(&bb_res, &nsio->res, sizeof(bb_res));
438 return PTR_ERR(addr);
439 pmem->virt_addr = addr;
441 blk_queue_write_cache(q, true, fua);
442 blk_queue_make_request(q, pmem_make_request);
443 blk_queue_physical_block_size(q, PAGE_SIZE);
444 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
445 blk_queue_max_hw_sectors(q, UINT_MAX);
446 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
447 if (pmem->pfn_flags & PFN_MAP)
448 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
451 disk = alloc_disk_node(0, nid);
456 disk->fops = &pmem_fops;
458 disk->flags = GENHD_FL_EXT_DEVT;
459 disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
460 nvdimm_namespace_disk_name(ndns, disk->disk_name);
461 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
463 if (devm_init_badblocks(dev, &pmem->bb))
465 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
466 disk->bb = &pmem->bb;
468 if (is_nvdimm_sync(nd_region))
469 flags = DAXDEV_F_SYNC;
470 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
475 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
476 pmem->dax_dev = dax_dev;
477 gendev = disk_to_dev(disk);
478 gendev->groups = pmem_attribute_groups;
480 device_add_disk(dev, disk, NULL);
481 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
484 revalidate_disk(disk);
486 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
489 dev_warn(dev, "'badblocks' notification disabled\n");
494 static int nd_pmem_probe(struct device *dev)
497 struct nd_namespace_common *ndns;
499 ndns = nvdimm_namespace_common_probe(dev);
501 return PTR_ERR(ndns);
504 return nvdimm_namespace_attach_btt(ndns);
507 return pmem_attach_disk(dev, ndns);
509 ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
513 ret = nd_btt_probe(dev, ndns);
518 * We have two failure conditions here, there is no
519 * info reserver block or we found a valid info reserve block
520 * but failed to initialize the pfn superblock.
522 * For the first case consider namespace as a raw pmem namespace
525 * For the latter, consider this a success and advance the namespace
528 ret = nd_pfn_probe(dev, ndns);
531 else if (ret == -EOPNOTSUPP)
534 ret = nd_dax_probe(dev, ndns);
537 else if (ret == -EOPNOTSUPP)
540 /* probe complete, attach handles namespace enabling */
541 devm_namespace_disable(dev, ndns);
543 return pmem_attach_disk(dev, ndns);
546 static int nd_pmem_remove(struct device *dev)
548 struct pmem_device *pmem = dev_get_drvdata(dev);
551 nvdimm_namespace_detach_btt(to_nd_btt(dev));
554 * Note, this assumes nd_device_lock() context to not
555 * race nd_pmem_notify()
557 sysfs_put(pmem->bb_state);
558 pmem->bb_state = NULL;
560 nvdimm_flush(to_nd_region(dev->parent), NULL);
565 static void nd_pmem_shutdown(struct device *dev)
567 nvdimm_flush(to_nd_region(dev->parent), NULL);
570 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
572 struct nd_region *nd_region;
573 resource_size_t offset = 0, end_trunc = 0;
574 struct nd_namespace_common *ndns;
575 struct nd_namespace_io *nsio;
577 struct badblocks *bb;
578 struct kernfs_node *bb_state;
580 if (event != NVDIMM_REVALIDATE_POISON)
583 if (is_nd_btt(dev)) {
584 struct nd_btt *nd_btt = to_nd_btt(dev);
587 nd_region = to_nd_region(ndns->dev.parent);
588 nsio = to_nd_namespace_io(&ndns->dev);
592 struct pmem_device *pmem = dev_get_drvdata(dev);
594 nd_region = to_region(pmem);
596 bb_state = pmem->bb_state;
598 if (is_nd_pfn(dev)) {
599 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
600 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
603 offset = pmem->data_offset +
604 __le32_to_cpu(pfn_sb->start_pad);
605 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
610 nsio = to_nd_namespace_io(&ndns->dev);
613 res.start = nsio->res.start + offset;
614 res.end = nsio->res.end - end_trunc;
615 nvdimm_badblocks_populate(nd_region, bb, &res);
617 sysfs_notify_dirent(bb_state);
620 MODULE_ALIAS("pmem");
621 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
622 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
623 static struct nd_device_driver nd_pmem_driver = {
624 .probe = nd_pmem_probe,
625 .remove = nd_pmem_remove,
626 .notify = nd_pmem_notify,
627 .shutdown = nd_pmem_shutdown,
631 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
634 module_nd_driver(nd_pmem_driver);
636 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
637 MODULE_LICENSE("GPL v2");