2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/genhd.h>
18 #include <linux/pfn_t.h>
19 #include <linux/cdev.h>
20 #include <linux/hash.h>
21 #include <linux/slab.h>
22 #include <linux/uio.h>
23 #include <linux/dax.h>
26 static dev_t dax_devt;
27 DEFINE_STATIC_SRCU(dax_srcu);
28 static struct vfsmount *dax_mnt;
29 static DEFINE_IDA(dax_minor_ida);
30 static struct kmem_cache *dax_cache __read_mostly;
31 static struct super_block *dax_superblock __read_mostly;
33 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
34 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
35 static DEFINE_SPINLOCK(dax_host_lock);
37 int dax_read_lock(void)
39 return srcu_read_lock(&dax_srcu);
41 EXPORT_SYMBOL_GPL(dax_read_lock);
43 void dax_read_unlock(int id)
45 srcu_read_unlock(&dax_srcu, id);
47 EXPORT_SYMBOL_GPL(dax_read_unlock);
50 #include <linux/blkdev.h>
52 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
55 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
58 *pgoff = PHYS_PFN(phys_off);
59 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
63 EXPORT_SYMBOL(bdev_dax_pgoff);
65 #if IS_ENABLED(CONFIG_FS_DAX)
66 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
68 if (!blk_queue_dax(bdev->bd_queue))
70 return fs_dax_get_by_host(bdev->bd_disk->disk_name);
72 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
76 * __bdev_dax_supported() - Check if the device supports dax for filesystem
77 * @sb: The superblock of the device
78 * @blocksize: The block size of the device
80 * This is a library function for filesystems to check if the block device
81 * can be mounted with dax option.
83 * Return: negative errno if unsupported, 0 if supported.
85 int __bdev_dax_supported(struct super_block *sb, int blocksize)
87 struct block_device *bdev = sb->s_bdev;
88 struct dax_device *dax_dev;
95 if (blocksize != PAGE_SIZE) {
96 pr_debug("VFS (%s): error: unsupported blocksize for dax\n",
101 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
103 pr_debug("VFS (%s): error: unaligned partition for dax\n",
108 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
110 pr_debug("VFS (%s): error: device does not support dax\n",
115 id = dax_read_lock();
116 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
122 pr_debug("VFS (%s): error: dax access failed (%ld)\n",
124 return len < 0 ? len : -EIO;
127 if ((IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn))
128 || pfn_t_devmap(pfn))
131 pr_debug("VFS (%s): error: dax support not enabled\n",
138 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
141 enum dax_device_flags {
142 /* !alive + rcu grace period == no new operations / mappings */
144 /* gate whether dax_flush() calls the low level flush routine */
149 * struct dax_device - anchor object for dax services
151 * @cdev: optional character interface for "device dax"
152 * @host: optional name for lookups where the device path is not available
153 * @private: dax driver private data
154 * @flags: state and boolean properties
157 struct hlist_node list;
163 const struct dax_operations *ops;
166 static ssize_t write_cache_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
169 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
172 WARN_ON_ONCE(!dax_dev);
176 rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
182 static ssize_t write_cache_store(struct device *dev,
183 struct device_attribute *attr, const char *buf, size_t len)
186 int rc = strtobool(buf, &write_cache);
187 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
189 WARN_ON_ONCE(!dax_dev);
195 else if (write_cache)
196 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
198 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
203 static DEVICE_ATTR_RW(write_cache);
205 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
207 struct device *dev = container_of(kobj, typeof(*dev), kobj);
208 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
210 WARN_ON_ONCE(!dax_dev);
214 #ifndef CONFIG_ARCH_HAS_PMEM_API
215 if (a == &dev_attr_write_cache.attr)
221 static struct attribute *dax_attributes[] = {
222 &dev_attr_write_cache.attr,
226 struct attribute_group dax_attribute_group = {
228 .attrs = dax_attributes,
229 .is_visible = dax_visible,
231 EXPORT_SYMBOL_GPL(dax_attribute_group);
234 * dax_direct_access() - translate a device pgoff to an absolute pfn
235 * @dax_dev: a dax_device instance representing the logical memory range
236 * @pgoff: offset in pages from the start of the device to translate
237 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
238 * @kaddr: output parameter that returns a virtual address mapping of pfn
239 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
241 * Return: negative errno if an error occurs, otherwise the number of
242 * pages accessible at the device relative @pgoff.
244 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
245 void **kaddr, pfn_t *pfn)
252 if (!dax_alive(dax_dev))
258 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
262 return min(avail, nr_pages);
264 EXPORT_SYMBOL_GPL(dax_direct_access);
266 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
267 size_t bytes, struct iov_iter *i)
269 if (!dax_alive(dax_dev))
272 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
274 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
276 #ifdef CONFIG_ARCH_HAS_PMEM_API
277 void arch_wb_cache_pmem(void *addr, size_t size);
278 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
280 if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
283 arch_wb_cache_pmem(addr, size);
286 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
290 EXPORT_SYMBOL_GPL(dax_flush);
292 void dax_write_cache(struct dax_device *dax_dev, bool wc)
295 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
297 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
299 EXPORT_SYMBOL_GPL(dax_write_cache);
301 bool dax_write_cache_enabled(struct dax_device *dax_dev)
303 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
305 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
307 bool dax_alive(struct dax_device *dax_dev)
309 lockdep_assert_held(&dax_srcu);
310 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
312 EXPORT_SYMBOL_GPL(dax_alive);
314 static int dax_host_hash(const char *host)
316 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
320 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
321 * that any fault handlers or operations that might have seen
322 * dax_alive(), have completed. Any operations that start after
323 * synchronize_srcu() has run will abort upon seeing !dax_alive().
325 void kill_dax(struct dax_device *dax_dev)
330 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
332 synchronize_srcu(&dax_srcu);
334 spin_lock(&dax_host_lock);
335 hlist_del_init(&dax_dev->list);
336 spin_unlock(&dax_host_lock);
338 dax_dev->private = NULL;
340 EXPORT_SYMBOL_GPL(kill_dax);
342 static struct inode *dax_alloc_inode(struct super_block *sb)
344 struct dax_device *dax_dev;
347 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
351 inode = &dax_dev->inode;
356 static struct dax_device *to_dax_dev(struct inode *inode)
358 return container_of(inode, struct dax_device, inode);
361 static void dax_i_callback(struct rcu_head *head)
363 struct inode *inode = container_of(head, struct inode, i_rcu);
364 struct dax_device *dax_dev = to_dax_dev(inode);
366 kfree(dax_dev->host);
367 dax_dev->host = NULL;
369 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
370 kmem_cache_free(dax_cache, dax_dev);
373 static void dax_destroy_inode(struct inode *inode)
375 struct dax_device *dax_dev = to_dax_dev(inode);
377 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
378 "kill_dax() must be called before final iput()\n");
379 call_rcu(&inode->i_rcu, dax_i_callback);
382 static const struct super_operations dax_sops = {
383 .statfs = simple_statfs,
384 .alloc_inode = dax_alloc_inode,
385 .destroy_inode = dax_destroy_inode,
386 .drop_inode = generic_delete_inode,
389 static struct dentry *dax_mount(struct file_system_type *fs_type,
390 int flags, const char *dev_name, void *data)
392 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
395 static struct file_system_type dax_fs_type = {
398 .kill_sb = kill_anon_super,
401 static int dax_test(struct inode *inode, void *data)
403 dev_t devt = *(dev_t *) data;
405 return inode->i_rdev == devt;
408 static int dax_set(struct inode *inode, void *data)
410 dev_t devt = *(dev_t *) data;
412 inode->i_rdev = devt;
416 static struct dax_device *dax_dev_get(dev_t devt)
418 struct dax_device *dax_dev;
421 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
422 dax_test, dax_set, &devt);
427 dax_dev = to_dax_dev(inode);
428 if (inode->i_state & I_NEW) {
429 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
430 inode->i_cdev = &dax_dev->cdev;
431 inode->i_mode = S_IFCHR;
432 inode->i_flags = S_DAX;
433 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
434 unlock_new_inode(inode);
440 static void dax_add_host(struct dax_device *dax_dev, const char *host)
445 * Unconditionally init dax_dev since it's coming from a
446 * non-zeroed slab cache
448 INIT_HLIST_NODE(&dax_dev->list);
449 dax_dev->host = host;
453 hash = dax_host_hash(host);
454 spin_lock(&dax_host_lock);
455 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
456 spin_unlock(&dax_host_lock);
459 struct dax_device *alloc_dax(void *private, const char *__host,
460 const struct dax_operations *ops)
462 struct dax_device *dax_dev;
467 host = kstrdup(__host, GFP_KERNEL);
471 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
475 devt = MKDEV(MAJOR(dax_devt), minor);
476 dax_dev = dax_dev_get(devt);
480 dax_add_host(dax_dev, host);
482 dax_dev->private = private;
486 ida_simple_remove(&dax_minor_ida, minor);
491 EXPORT_SYMBOL_GPL(alloc_dax);
493 void put_dax(struct dax_device *dax_dev)
497 iput(&dax_dev->inode);
499 EXPORT_SYMBOL_GPL(put_dax);
502 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
503 * @host: alternate name for the device registered by a dax driver
505 struct dax_device *dax_get_by_host(const char *host)
507 struct dax_device *dax_dev, *found = NULL;
513 hash = dax_host_hash(host);
515 id = dax_read_lock();
516 spin_lock(&dax_host_lock);
517 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
518 if (!dax_alive(dax_dev)
519 || strcmp(host, dax_dev->host) != 0)
522 if (igrab(&dax_dev->inode))
526 spin_unlock(&dax_host_lock);
531 EXPORT_SYMBOL_GPL(dax_get_by_host);
534 * inode_dax: convert a public inode into its dax_dev
535 * @inode: An inode with i_cdev pointing to a dax_dev
537 * Note this is not equivalent to to_dax_dev() which is for private
538 * internal use where we know the inode filesystem type == dax_fs_type.
540 struct dax_device *inode_dax(struct inode *inode)
542 struct cdev *cdev = inode->i_cdev;
544 return container_of(cdev, struct dax_device, cdev);
546 EXPORT_SYMBOL_GPL(inode_dax);
548 struct inode *dax_inode(struct dax_device *dax_dev)
550 return &dax_dev->inode;
552 EXPORT_SYMBOL_GPL(dax_inode);
554 void *dax_get_private(struct dax_device *dax_dev)
556 return dax_dev->private;
558 EXPORT_SYMBOL_GPL(dax_get_private);
560 static void init_once(void *_dax_dev)
562 struct dax_device *dax_dev = _dax_dev;
563 struct inode *inode = &dax_dev->inode;
565 memset(dax_dev, 0, sizeof(*dax_dev));
566 inode_init_once(inode);
569 static int __dax_fs_init(void)
573 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
574 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
575 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
580 rc = register_filesystem(&dax_fs_type);
582 goto err_register_fs;
584 dax_mnt = kern_mount(&dax_fs_type);
585 if (IS_ERR(dax_mnt)) {
586 rc = PTR_ERR(dax_mnt);
589 dax_superblock = dax_mnt->mnt_sb;
594 unregister_filesystem(&dax_fs_type);
596 kmem_cache_destroy(dax_cache);
601 static void __dax_fs_exit(void)
603 kern_unmount(dax_mnt);
604 unregister_filesystem(&dax_fs_type);
605 kmem_cache_destroy(dax_cache);
608 static int __init dax_fs_init(void)
612 rc = __dax_fs_init();
616 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
622 static void __exit dax_fs_exit(void)
624 unregister_chrdev_region(dax_devt, MINORMASK+1);
625 ida_destroy(&dax_minor_ida);
629 MODULE_AUTHOR("Intel Corporation");
630 MODULE_LICENSE("GPL v2");
631 subsys_initcall(dax_fs_init);
632 module_exit(dax_fs_exit);