1 // SPDX-License-Identifier: GPL-2.0-only
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
6 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
11 #include <linux/init.h>
14 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
16 #define SECTOR_MASK (PAGE_SECTORS - 1)
20 #define TICKS_PER_SEC 50ULL
21 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
23 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
24 static DECLARE_FAULT_ATTR(null_timeout_attr);
25 static DECLARE_FAULT_ATTR(null_requeue_attr);
28 static inline u64 mb_per_tick(int mbps)
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
34 * Status flags for nullb_device.
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
41 enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
44 NULLB_DEV_FL_THROTTLED = 2,
45 NULLB_DEV_FL_CACHE = 3,
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
50 * nullb_page is a page in memory for nullb devices.
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
63 DECLARE_BITMAP(bitmap, MAP_SZ);
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
68 static LIST_HEAD(nullb_list);
69 static struct mutex lock;
70 static int null_major;
71 static DEFINE_IDA(nullb_indexes);
72 static struct blk_mq_tag_set tag_set;
86 static int g_no_sched;
87 module_param_named(no_sched, g_no_sched, int, 0444);
88 MODULE_PARM_DESC(no_sched, "No io scheduler");
90 static int g_submit_queues = 1;
91 module_param_named(submit_queues, g_submit_queues, int, 0444);
92 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
94 static int g_home_node = NUMA_NO_NODE;
95 module_param_named(home_node, g_home_node, int, 0444);
96 MODULE_PARM_DESC(home_node, "Home node for the device");
98 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
99 static char g_timeout_str[80];
100 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
102 static char g_requeue_str[80];
103 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
106 static int g_queue_mode = NULL_Q_MQ;
108 static int null_param_store_val(const char *str, int *val, int min, int max)
112 ret = kstrtoint(str, 10, &new_val);
116 if (new_val < min || new_val > max)
123 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
125 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
128 static const struct kernel_param_ops null_queue_mode_param_ops = {
129 .set = null_set_queue_mode,
130 .get = param_get_int,
133 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
134 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
136 static int g_gb = 250;
137 module_param_named(gb, g_gb, int, 0444);
138 MODULE_PARM_DESC(gb, "Size in GB");
140 static int g_bs = 512;
141 module_param_named(bs, g_bs, int, 0444);
142 MODULE_PARM_DESC(bs, "Block size (in bytes)");
144 static unsigned int nr_devices = 1;
145 module_param(nr_devices, uint, 0444);
146 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
148 static bool g_blocking;
149 module_param_named(blocking, g_blocking, bool, 0444);
150 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
152 static bool shared_tags;
153 module_param(shared_tags, bool, 0444);
154 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
156 static int g_irqmode = NULL_IRQ_SOFTIRQ;
158 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
160 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
164 static const struct kernel_param_ops null_irqmode_param_ops = {
165 .set = null_set_irqmode,
166 .get = param_get_int,
169 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
170 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
172 static unsigned long g_completion_nsec = 10000;
173 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
174 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
176 static int g_hw_queue_depth = 64;
177 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
178 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
180 static bool g_use_per_node_hctx;
181 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
182 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
185 module_param_named(zoned, g_zoned, bool, S_IRUGO);
186 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
188 static unsigned long g_zone_size = 256;
189 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
190 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
192 static unsigned int g_zone_nr_conv;
193 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
194 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
196 static struct nullb_device *null_alloc_dev(void);
197 static void null_free_dev(struct nullb_device *dev);
198 static void null_del_dev(struct nullb *nullb);
199 static int null_add_dev(struct nullb_device *dev);
200 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
202 static inline struct nullb_device *to_nullb_device(struct config_item *item)
204 return item ? container_of(item, struct nullb_device, item) : NULL;
207 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
209 return snprintf(page, PAGE_SIZE, "%u\n", val);
212 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
215 return snprintf(page, PAGE_SIZE, "%lu\n", val);
218 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
220 return snprintf(page, PAGE_SIZE, "%u\n", val);
223 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
224 const char *page, size_t count)
229 result = kstrtouint(page, 0, &tmp);
237 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
238 const char *page, size_t count)
243 result = kstrtoul(page, 0, &tmp);
251 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
257 result = kstrtobool(page, &tmp);
265 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
266 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
268 nullb_device_##NAME##_show(struct config_item *item, char *page) \
270 return nullb_device_##TYPE##_attr_show( \
271 to_nullb_device(item)->NAME, page); \
274 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
277 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
278 struct nullb_device *dev = to_nullb_device(item); \
279 TYPE uninitialized_var(new_value); \
282 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
286 ret = apply_fn(dev, new_value); \
287 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
291 dev->NAME = new_value; \
294 CONFIGFS_ATTR(nullb_device_, NAME);
296 static int nullb_apply_submit_queues(struct nullb_device *dev,
297 unsigned int submit_queues)
299 struct nullb *nullb = dev->nullb;
300 struct blk_mq_tag_set *set;
305 set = nullb->tag_set;
306 blk_mq_update_nr_hw_queues(set, submit_queues);
307 return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
310 NULLB_DEVICE_ATTR(size, ulong, NULL);
311 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
312 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
313 NULLB_DEVICE_ATTR(home_node, uint, NULL);
314 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
315 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
316 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
317 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
318 NULLB_DEVICE_ATTR(index, uint, NULL);
319 NULLB_DEVICE_ATTR(blocking, bool, NULL);
320 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
321 NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
322 NULLB_DEVICE_ATTR(discard, bool, NULL);
323 NULLB_DEVICE_ATTR(mbps, uint, NULL);
324 NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
325 NULLB_DEVICE_ATTR(zoned, bool, NULL);
326 NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
327 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
329 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
331 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
334 static ssize_t nullb_device_power_store(struct config_item *item,
335 const char *page, size_t count)
337 struct nullb_device *dev = to_nullb_device(item);
341 ret = nullb_device_bool_attr_store(&newp, page, count);
345 if (!dev->power && newp) {
346 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
348 if (null_add_dev(dev)) {
349 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
353 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
355 } else if (dev->power && !newp) {
356 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
359 null_del_dev(dev->nullb);
362 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
368 CONFIGFS_ATTR(nullb_device_, power);
370 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
372 struct nullb_device *t_dev = to_nullb_device(item);
374 return badblocks_show(&t_dev->badblocks, page, 0);
377 static ssize_t nullb_device_badblocks_store(struct config_item *item,
378 const char *page, size_t count)
380 struct nullb_device *t_dev = to_nullb_device(item);
381 char *orig, *buf, *tmp;
385 orig = kstrndup(page, count, GFP_KERNEL);
389 buf = strstrip(orig);
392 if (buf[0] != '+' && buf[0] != '-')
394 tmp = strchr(&buf[1], '-');
398 ret = kstrtoull(buf + 1, 0, &start);
401 ret = kstrtoull(tmp + 1, 0, &end);
407 /* enable badblocks */
408 cmpxchg(&t_dev->badblocks.shift, -1, 0);
410 ret = badblocks_set(&t_dev->badblocks, start,
413 ret = badblocks_clear(&t_dev->badblocks, start,
421 CONFIGFS_ATTR(nullb_device_, badblocks);
423 static struct configfs_attribute *nullb_device_attrs[] = {
424 &nullb_device_attr_size,
425 &nullb_device_attr_completion_nsec,
426 &nullb_device_attr_submit_queues,
427 &nullb_device_attr_home_node,
428 &nullb_device_attr_queue_mode,
429 &nullb_device_attr_blocksize,
430 &nullb_device_attr_irqmode,
431 &nullb_device_attr_hw_queue_depth,
432 &nullb_device_attr_index,
433 &nullb_device_attr_blocking,
434 &nullb_device_attr_use_per_node_hctx,
435 &nullb_device_attr_power,
436 &nullb_device_attr_memory_backed,
437 &nullb_device_attr_discard,
438 &nullb_device_attr_mbps,
439 &nullb_device_attr_cache_size,
440 &nullb_device_attr_badblocks,
441 &nullb_device_attr_zoned,
442 &nullb_device_attr_zone_size,
443 &nullb_device_attr_zone_nr_conv,
447 static void nullb_device_release(struct config_item *item)
449 struct nullb_device *dev = to_nullb_device(item);
451 null_free_device_storage(dev, false);
455 static struct configfs_item_operations nullb_device_ops = {
456 .release = nullb_device_release,
459 static const struct config_item_type nullb_device_type = {
460 .ct_item_ops = &nullb_device_ops,
461 .ct_attrs = nullb_device_attrs,
462 .ct_owner = THIS_MODULE,
466 config_item *nullb_group_make_item(struct config_group *group, const char *name)
468 struct nullb_device *dev;
470 dev = null_alloc_dev();
472 return ERR_PTR(-ENOMEM);
474 config_item_init_type_name(&dev->item, name, &nullb_device_type);
480 nullb_group_drop_item(struct config_group *group, struct config_item *item)
482 struct nullb_device *dev = to_nullb_device(item);
484 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
487 null_del_dev(dev->nullb);
491 config_item_put(item);
494 static ssize_t memb_group_features_show(struct config_item *item, char *page)
496 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_nr_conv\n");
499 CONFIGFS_ATTR_RO(memb_group_, features);
501 static struct configfs_attribute *nullb_group_attrs[] = {
502 &memb_group_attr_features,
506 static struct configfs_group_operations nullb_group_ops = {
507 .make_item = nullb_group_make_item,
508 .drop_item = nullb_group_drop_item,
511 static const struct config_item_type nullb_group_type = {
512 .ct_group_ops = &nullb_group_ops,
513 .ct_attrs = nullb_group_attrs,
514 .ct_owner = THIS_MODULE,
517 static struct configfs_subsystem nullb_subsys = {
520 .ci_namebuf = "nullb",
521 .ci_type = &nullb_group_type,
526 static inline int null_cache_active(struct nullb *nullb)
528 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
531 static struct nullb_device *null_alloc_dev(void)
533 struct nullb_device *dev;
535 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
538 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
539 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
540 if (badblocks_init(&dev->badblocks, 0)) {
545 dev->size = g_gb * 1024;
546 dev->completion_nsec = g_completion_nsec;
547 dev->submit_queues = g_submit_queues;
548 dev->home_node = g_home_node;
549 dev->queue_mode = g_queue_mode;
550 dev->blocksize = g_bs;
551 dev->irqmode = g_irqmode;
552 dev->hw_queue_depth = g_hw_queue_depth;
553 dev->blocking = g_blocking;
554 dev->use_per_node_hctx = g_use_per_node_hctx;
555 dev->zoned = g_zoned;
556 dev->zone_size = g_zone_size;
557 dev->zone_nr_conv = g_zone_nr_conv;
561 static void null_free_dev(struct nullb_device *dev)
567 badblocks_exit(&dev->badblocks);
571 static void put_tag(struct nullb_queue *nq, unsigned int tag)
573 clear_bit_unlock(tag, nq->tag_map);
575 if (waitqueue_active(&nq->wait))
579 static unsigned int get_tag(struct nullb_queue *nq)
584 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
585 if (tag >= nq->queue_depth)
587 } while (test_and_set_bit_lock(tag, nq->tag_map));
592 static void free_cmd(struct nullb_cmd *cmd)
594 put_tag(cmd->nq, cmd->tag);
597 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
599 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
601 struct nullb_cmd *cmd;
606 cmd = &nq->cmds[tag];
609 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
610 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
612 cmd->timer.function = null_cmd_timer_expired;
620 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
622 struct nullb_cmd *cmd;
625 cmd = __alloc_cmd(nq);
626 if (cmd || !can_wait)
630 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
631 cmd = __alloc_cmd(nq);
638 finish_wait(&nq->wait, &wait);
642 static void end_cmd(struct nullb_cmd *cmd)
644 int queue_mode = cmd->nq->dev->queue_mode;
646 switch (queue_mode) {
648 blk_mq_end_request(cmd->rq, cmd->error);
651 cmd->bio->bi_status = cmd->error;
659 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
661 end_cmd(container_of(timer, struct nullb_cmd, timer));
663 return HRTIMER_NORESTART;
666 static void null_cmd_end_timer(struct nullb_cmd *cmd)
668 ktime_t kt = cmd->nq->dev->completion_nsec;
670 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
673 static void null_complete_rq(struct request *rq)
675 end_cmd(blk_mq_rq_to_pdu(rq));
678 static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
680 struct nullb_page *t_page;
682 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
686 t_page->page = alloc_pages(gfp_flags, 0);
690 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
698 static void null_free_page(struct nullb_page *t_page)
700 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
701 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
703 __free_page(t_page->page);
707 static bool null_page_empty(struct nullb_page *page)
709 int size = MAP_SZ - 2;
711 return find_first_bit(page->bitmap, size) == size;
714 static void null_free_sector(struct nullb *nullb, sector_t sector,
717 unsigned int sector_bit;
719 struct nullb_page *t_page, *ret;
720 struct radix_tree_root *root;
722 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
723 idx = sector >> PAGE_SECTORS_SHIFT;
724 sector_bit = (sector & SECTOR_MASK);
726 t_page = radix_tree_lookup(root, idx);
728 __clear_bit(sector_bit, t_page->bitmap);
730 if (null_page_empty(t_page)) {
731 ret = radix_tree_delete_item(root, idx, t_page);
732 WARN_ON(ret != t_page);
735 nullb->dev->curr_cache -= PAGE_SIZE;
740 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
741 struct nullb_page *t_page, bool is_cache)
743 struct radix_tree_root *root;
745 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
747 if (radix_tree_insert(root, idx, t_page)) {
748 null_free_page(t_page);
749 t_page = radix_tree_lookup(root, idx);
750 WARN_ON(!t_page || t_page->page->index != idx);
752 nullb->dev->curr_cache += PAGE_SIZE;
757 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
759 unsigned long pos = 0;
761 struct nullb_page *ret, *t_pages[FREE_BATCH];
762 struct radix_tree_root *root;
764 root = is_cache ? &dev->cache : &dev->data;
769 nr_pages = radix_tree_gang_lookup(root,
770 (void **)t_pages, pos, FREE_BATCH);
772 for (i = 0; i < nr_pages; i++) {
773 pos = t_pages[i]->page->index;
774 ret = radix_tree_delete_item(root, pos, t_pages[i]);
775 WARN_ON(ret != t_pages[i]);
780 } while (nr_pages == FREE_BATCH);
786 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
787 sector_t sector, bool for_write, bool is_cache)
789 unsigned int sector_bit;
791 struct nullb_page *t_page;
792 struct radix_tree_root *root;
794 idx = sector >> PAGE_SECTORS_SHIFT;
795 sector_bit = (sector & SECTOR_MASK);
797 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
798 t_page = radix_tree_lookup(root, idx);
799 WARN_ON(t_page && t_page->page->index != idx);
801 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
807 static struct nullb_page *null_lookup_page(struct nullb *nullb,
808 sector_t sector, bool for_write, bool ignore_cache)
810 struct nullb_page *page = NULL;
813 page = __null_lookup_page(nullb, sector, for_write, true);
816 return __null_lookup_page(nullb, sector, for_write, false);
819 static struct nullb_page *null_insert_page(struct nullb *nullb,
820 sector_t sector, bool ignore_cache)
821 __releases(&nullb->lock)
822 __acquires(&nullb->lock)
825 struct nullb_page *t_page;
827 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
831 spin_unlock_irq(&nullb->lock);
833 t_page = null_alloc_page(GFP_NOIO);
837 if (radix_tree_preload(GFP_NOIO))
840 spin_lock_irq(&nullb->lock);
841 idx = sector >> PAGE_SECTORS_SHIFT;
842 t_page->page->index = idx;
843 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
844 radix_tree_preload_end();
848 null_free_page(t_page);
850 spin_lock_irq(&nullb->lock);
851 return null_lookup_page(nullb, sector, true, ignore_cache);
854 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
859 struct nullb_page *t_page, *ret;
862 idx = c_page->page->index;
864 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
866 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
867 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
868 null_free_page(c_page);
869 if (t_page && null_page_empty(t_page)) {
870 ret = radix_tree_delete_item(&nullb->dev->data,
872 null_free_page(t_page);
880 src = kmap_atomic(c_page->page);
881 dst = kmap_atomic(t_page->page);
883 for (i = 0; i < PAGE_SECTORS;
884 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
885 if (test_bit(i, c_page->bitmap)) {
886 offset = (i << SECTOR_SHIFT);
887 memcpy(dst + offset, src + offset,
888 nullb->dev->blocksize);
889 __set_bit(i, t_page->bitmap);
896 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
898 nullb->dev->curr_cache -= PAGE_SIZE;
903 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
905 int i, err, nr_pages;
906 struct nullb_page *c_pages[FREE_BATCH];
907 unsigned long flushed = 0, one_round;
910 if ((nullb->dev->cache_size * 1024 * 1024) >
911 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
914 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
915 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
917 * nullb_flush_cache_page could unlock before using the c_pages. To
918 * avoid race, we don't allow page free
920 for (i = 0; i < nr_pages; i++) {
921 nullb->cache_flush_pos = c_pages[i]->page->index;
923 * We found the page which is being flushed to disk by other
926 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
929 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
933 for (i = 0; i < nr_pages; i++) {
934 if (c_pages[i] == NULL)
936 err = null_flush_cache_page(nullb, c_pages[i]);
941 flushed += one_round << PAGE_SHIFT;
945 nullb->cache_flush_pos = 0;
946 if (one_round == 0) {
947 /* give other threads a chance */
948 spin_unlock_irq(&nullb->lock);
949 spin_lock_irq(&nullb->lock);
956 static int copy_to_nullb(struct nullb *nullb, struct page *source,
957 unsigned int off, sector_t sector, size_t n, bool is_fua)
959 size_t temp, count = 0;
961 struct nullb_page *t_page;
965 temp = min_t(size_t, nullb->dev->blocksize, n - count);
967 if (null_cache_active(nullb) && !is_fua)
968 null_make_cache_space(nullb, PAGE_SIZE);
970 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
971 t_page = null_insert_page(nullb, sector,
972 !null_cache_active(nullb) || is_fua);
976 src = kmap_atomic(source);
977 dst = kmap_atomic(t_page->page);
978 memcpy(dst + offset, src + off + count, temp);
982 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
985 null_free_sector(nullb, sector, true);
988 sector += temp >> SECTOR_SHIFT;
993 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
994 unsigned int off, sector_t sector, size_t n)
996 size_t temp, count = 0;
998 struct nullb_page *t_page;
1002 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1004 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1005 t_page = null_lookup_page(nullb, sector, false,
1006 !null_cache_active(nullb));
1008 dst = kmap_atomic(dest);
1010 memset(dst + off + count, 0, temp);
1013 src = kmap_atomic(t_page->page);
1014 memcpy(dst + off + count, src + offset, temp);
1020 sector += temp >> SECTOR_SHIFT;
1025 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1026 unsigned int len, unsigned int off)
1030 dst = kmap_atomic(page);
1031 memset(dst + off, 0xFF, len);
1035 static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1039 spin_lock_irq(&nullb->lock);
1041 temp = min_t(size_t, n, nullb->dev->blocksize);
1042 null_free_sector(nullb, sector, false);
1043 if (null_cache_active(nullb))
1044 null_free_sector(nullb, sector, true);
1045 sector += temp >> SECTOR_SHIFT;
1048 spin_unlock_irq(&nullb->lock);
1051 static int null_handle_flush(struct nullb *nullb)
1055 if (!null_cache_active(nullb))
1058 spin_lock_irq(&nullb->lock);
1060 err = null_make_cache_space(nullb,
1061 nullb->dev->cache_size * 1024 * 1024);
1062 if (err || nullb->dev->curr_cache == 0)
1066 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1067 spin_unlock_irq(&nullb->lock);
1071 static int null_transfer(struct nullb *nullb, struct page *page,
1072 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1075 struct nullb_device *dev = nullb->dev;
1076 unsigned int valid_len = len;
1081 valid_len = null_zone_valid_read_len(nullb,
1085 err = copy_from_nullb(nullb, page, off,
1092 nullb_fill_pattern(nullb, page, len, off);
1093 flush_dcache_page(page);
1095 flush_dcache_page(page);
1096 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1102 static int null_handle_rq(struct nullb_cmd *cmd)
1104 struct request *rq = cmd->rq;
1105 struct nullb *nullb = cmd->nq->dev->nullb;
1109 struct req_iterator iter;
1110 struct bio_vec bvec;
1112 sector = blk_rq_pos(rq);
1114 if (req_op(rq) == REQ_OP_DISCARD) {
1115 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1119 spin_lock_irq(&nullb->lock);
1120 rq_for_each_segment(bvec, rq, iter) {
1122 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1123 op_is_write(req_op(rq)), sector,
1124 req_op(rq) & REQ_FUA);
1126 spin_unlock_irq(&nullb->lock);
1129 sector += len >> SECTOR_SHIFT;
1131 spin_unlock_irq(&nullb->lock);
1136 static int null_handle_bio(struct nullb_cmd *cmd)
1138 struct bio *bio = cmd->bio;
1139 struct nullb *nullb = cmd->nq->dev->nullb;
1143 struct bio_vec bvec;
1144 struct bvec_iter iter;
1146 sector = bio->bi_iter.bi_sector;
1148 if (bio_op(bio) == REQ_OP_DISCARD) {
1149 null_handle_discard(nullb, sector,
1150 bio_sectors(bio) << SECTOR_SHIFT);
1154 spin_lock_irq(&nullb->lock);
1155 bio_for_each_segment(bvec, bio, iter) {
1157 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1158 op_is_write(bio_op(bio)), sector,
1159 bio->bi_opf & REQ_FUA);
1161 spin_unlock_irq(&nullb->lock);
1164 sector += len >> SECTOR_SHIFT;
1166 spin_unlock_irq(&nullb->lock);
1170 static void null_stop_queue(struct nullb *nullb)
1172 struct request_queue *q = nullb->q;
1174 if (nullb->dev->queue_mode == NULL_Q_MQ)
1175 blk_mq_stop_hw_queues(q);
1178 static void null_restart_queue_async(struct nullb *nullb)
1180 struct request_queue *q = nullb->q;
1182 if (nullb->dev->queue_mode == NULL_Q_MQ)
1183 blk_mq_start_stopped_hw_queues(q, true);
1186 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1188 struct nullb_device *dev = cmd->nq->dev;
1189 struct nullb *nullb = dev->nullb;
1190 blk_status_t sts = BLK_STS_OK;
1191 struct request *rq = cmd->rq;
1193 if (!hrtimer_active(&nullb->bw_timer))
1194 hrtimer_restart(&nullb->bw_timer);
1196 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1197 null_stop_queue(nullb);
1198 /* race with timer */
1199 if (atomic_long_read(&nullb->cur_bytes) > 0)
1200 null_restart_queue_async(nullb);
1201 /* requeue request */
1202 sts = BLK_STS_DEV_RESOURCE;
1207 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1209 sector_t nr_sectors)
1211 struct badblocks *bb = &cmd->nq->dev->badblocks;
1215 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1216 return BLK_STS_IOERR;
1221 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1224 struct nullb_device *dev = cmd->nq->dev;
1227 if (dev->queue_mode == NULL_Q_BIO)
1228 err = null_handle_bio(cmd);
1230 err = null_handle_rq(cmd);
1232 return errno_to_blk_status(err);
1235 static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1237 /* Complete IO by inline, softirq or timer */
1238 switch (cmd->nq->dev->irqmode) {
1239 case NULL_IRQ_SOFTIRQ:
1240 switch (cmd->nq->dev->queue_mode) {
1242 blk_mq_complete_request(cmd->rq);
1246 * XXX: no proper submitting cpu information available.
1255 case NULL_IRQ_TIMER:
1256 null_cmd_end_timer(cmd);
1261 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1262 sector_t nr_sectors, enum req_opf op)
1264 struct nullb_device *dev = cmd->nq->dev;
1265 struct nullb *nullb = dev->nullb;
1268 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1269 sts = null_handle_throttled(cmd);
1270 if (sts != BLK_STS_OK)
1274 if (op == REQ_OP_FLUSH) {
1275 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1279 if (nullb->dev->badblocks.shift != -1) {
1280 cmd->error = null_handle_badblocks(cmd, sector, nr_sectors);
1281 if (cmd->error != BLK_STS_OK)
1285 if (dev->memory_backed)
1286 cmd->error = null_handle_memory_backed(cmd, op);
1288 if (!cmd->error && dev->zoned)
1289 cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
1292 nullb_complete_cmd(cmd);
1296 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1298 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1299 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1300 unsigned int mbps = nullb->dev->mbps;
1302 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1303 return HRTIMER_NORESTART;
1305 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1306 null_restart_queue_async(nullb);
1308 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1310 return HRTIMER_RESTART;
1313 static void nullb_setup_bwtimer(struct nullb *nullb)
1315 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1317 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1318 nullb->bw_timer.function = nullb_bwtimer_fn;
1319 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1320 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1323 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1327 if (nullb->nr_queues != 1)
1328 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1330 return &nullb->queues[index];
1333 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
1335 sector_t sector = bio->bi_iter.bi_sector;
1336 sector_t nr_sectors = bio_sectors(bio);
1337 struct nullb *nullb = q->queuedata;
1338 struct nullb_queue *nq = nullb_to_queue(nullb);
1339 struct nullb_cmd *cmd;
1341 cmd = alloc_cmd(nq, 1);
1344 null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
1345 return BLK_QC_T_NONE;
1348 static bool should_timeout_request(struct request *rq)
1350 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1351 if (g_timeout_str[0])
1352 return should_fail(&null_timeout_attr, 1);
1357 static bool should_requeue_request(struct request *rq)
1359 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1360 if (g_requeue_str[0])
1361 return should_fail(&null_requeue_attr, 1);
1366 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1368 pr_info("rq %p timed out\n", rq);
1369 blk_mq_complete_request(rq);
1373 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1374 const struct blk_mq_queue_data *bd)
1376 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1377 struct nullb_queue *nq = hctx->driver_data;
1378 sector_t nr_sectors = blk_rq_sectors(bd->rq);
1379 sector_t sector = blk_rq_pos(bd->rq);
1381 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1383 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
1384 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1385 cmd->timer.function = null_cmd_timer_expired;
1390 blk_mq_start_request(bd->rq);
1392 if (should_requeue_request(bd->rq)) {
1394 * Alternate between hitting the core BUSY path, and the
1395 * driver driven requeue path
1397 nq->requeue_selection++;
1398 if (nq->requeue_selection & 1)
1399 return BLK_STS_RESOURCE;
1401 blk_mq_requeue_request(bd->rq, true);
1405 if (should_timeout_request(bd->rq))
1408 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
1411 static const struct blk_mq_ops null_mq_ops = {
1412 .queue_rq = null_queue_rq,
1413 .complete = null_complete_rq,
1414 .timeout = null_timeout_rq,
1417 static void cleanup_queue(struct nullb_queue *nq)
1423 static void cleanup_queues(struct nullb *nullb)
1427 for (i = 0; i < nullb->nr_queues; i++)
1428 cleanup_queue(&nullb->queues[i]);
1430 kfree(nullb->queues);
1433 static void null_del_dev(struct nullb *nullb)
1435 struct nullb_device *dev = nullb->dev;
1437 ida_simple_remove(&nullb_indexes, nullb->index);
1439 list_del_init(&nullb->list);
1441 del_gendisk(nullb->disk);
1443 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1444 hrtimer_cancel(&nullb->bw_timer);
1445 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1446 null_restart_queue_async(nullb);
1449 blk_cleanup_queue(nullb->q);
1450 if (dev->queue_mode == NULL_Q_MQ &&
1451 nullb->tag_set == &nullb->__tag_set)
1452 blk_mq_free_tag_set(nullb->tag_set);
1453 put_disk(nullb->disk);
1454 cleanup_queues(nullb);
1455 if (null_cache_active(nullb))
1456 null_free_device_storage(nullb->dev, true);
1461 static void null_config_discard(struct nullb *nullb)
1463 if (nullb->dev->discard == false)
1465 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1466 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1467 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1468 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
1471 static const struct block_device_operations null_ops = {
1472 .owner = THIS_MODULE,
1473 .report_zones = null_report_zones,
1476 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1481 init_waitqueue_head(&nq->wait);
1482 nq->queue_depth = nullb->queue_depth;
1483 nq->dev = nullb->dev;
1486 static void null_init_queues(struct nullb *nullb)
1488 struct request_queue *q = nullb->q;
1489 struct blk_mq_hw_ctx *hctx;
1490 struct nullb_queue *nq;
1493 queue_for_each_hw_ctx(q, hctx, i) {
1494 if (!hctx->nr_ctx || !hctx->tags)
1496 nq = &nullb->queues[i];
1497 hctx->driver_data = nq;
1498 null_init_queue(nullb, nq);
1503 static int setup_commands(struct nullb_queue *nq)
1505 struct nullb_cmd *cmd;
1508 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
1512 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1513 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
1519 for (i = 0; i < nq->queue_depth; i++) {
1527 static int setup_queues(struct nullb *nullb)
1529 nullb->queues = kcalloc(nullb->dev->submit_queues,
1530 sizeof(struct nullb_queue),
1535 nullb->queue_depth = nullb->dev->hw_queue_depth;
1540 static int init_driver_queues(struct nullb *nullb)
1542 struct nullb_queue *nq;
1545 for (i = 0; i < nullb->dev->submit_queues; i++) {
1546 nq = &nullb->queues[i];
1548 null_init_queue(nullb, nq);
1550 ret = setup_commands(nq);
1558 static int null_gendisk_register(struct nullb *nullb)
1560 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
1561 struct gendisk *disk;
1563 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
1566 set_capacity(disk, size);
1568 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1569 disk->major = null_major;
1570 disk->first_minor = nullb->index;
1571 disk->fops = &null_ops;
1572 disk->private_data = nullb;
1573 disk->queue = nullb->q;
1574 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1576 #ifdef CONFIG_BLK_DEV_ZONED
1577 if (nullb->dev->zoned) {
1578 if (queue_is_mq(nullb->q)) {
1579 int ret = blk_revalidate_disk_zones(disk);
1583 blk_queue_chunk_sectors(nullb->q,
1584 nullb->dev->zone_size_sects);
1585 nullb->q->nr_zones = blkdev_nr_zones(disk);
1594 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
1596 set->ops = &null_mq_ops;
1597 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1599 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1601 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
1602 set->cmd_size = sizeof(struct nullb_cmd);
1603 set->flags = BLK_MQ_F_SHOULD_MERGE;
1605 set->flags |= BLK_MQ_F_NO_SCHED;
1606 set->driver_data = NULL;
1608 if ((nullb && nullb->dev->blocking) || g_blocking)
1609 set->flags |= BLK_MQ_F_BLOCKING;
1611 return blk_mq_alloc_tag_set(set);
1614 static int null_validate_conf(struct nullb_device *dev)
1616 dev->blocksize = round_down(dev->blocksize, 512);
1617 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1619 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1620 if (dev->submit_queues != nr_online_nodes)
1621 dev->submit_queues = nr_online_nodes;
1622 } else if (dev->submit_queues > nr_cpu_ids)
1623 dev->submit_queues = nr_cpu_ids;
1624 else if (dev->submit_queues == 0)
1625 dev->submit_queues = 1;
1627 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1628 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1630 /* Do memory allocation, so set blocking */
1631 if (dev->memory_backed)
1632 dev->blocking = true;
1633 else /* cache is meaningless */
1634 dev->cache_size = 0;
1635 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1637 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1638 /* can not stop a queue */
1639 if (dev->queue_mode == NULL_Q_BIO)
1643 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
1644 pr_err("zone_size must be power-of-two\n");
1651 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1652 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1657 if (!setup_fault_attr(attr, str))
1665 static bool null_setup_fault(void)
1667 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1668 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1670 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1676 static int null_add_dev(struct nullb_device *dev)
1678 struct nullb *nullb;
1681 rv = null_validate_conf(dev);
1685 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1693 spin_lock_init(&nullb->lock);
1695 rv = setup_queues(nullb);
1697 goto out_free_nullb;
1699 if (dev->queue_mode == NULL_Q_MQ) {
1701 nullb->tag_set = &tag_set;
1704 nullb->tag_set = &nullb->__tag_set;
1705 rv = null_init_tag_set(nullb, nullb->tag_set);
1709 goto out_cleanup_queues;
1711 if (!null_setup_fault())
1712 goto out_cleanup_queues;
1714 nullb->tag_set->timeout = 5 * HZ;
1715 nullb->q = blk_mq_init_queue(nullb->tag_set);
1716 if (IS_ERR(nullb->q)) {
1718 goto out_cleanup_tags;
1720 null_init_queues(nullb);
1721 } else if (dev->queue_mode == NULL_Q_BIO) {
1722 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
1725 goto out_cleanup_queues;
1727 blk_queue_make_request(nullb->q, null_queue_bio);
1728 rv = init_driver_queues(nullb);
1730 goto out_cleanup_blk_queue;
1734 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1735 nullb_setup_bwtimer(nullb);
1738 if (dev->cache_size > 0) {
1739 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1740 blk_queue_write_cache(nullb->q, true, true);
1744 rv = null_zone_init(dev);
1746 goto out_cleanup_blk_queue;
1748 nullb->q->limits.zoned = BLK_ZONED_HM;
1749 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
1750 blk_queue_required_elevator_features(nullb->q,
1751 ELEVATOR_F_ZBD_SEQ_WRITE);
1754 nullb->q->queuedata = nullb;
1755 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1756 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
1759 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
1760 dev->index = nullb->index;
1761 mutex_unlock(&lock);
1763 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1764 blk_queue_physical_block_size(nullb->q, dev->blocksize);
1766 null_config_discard(nullb);
1768 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1770 rv = null_gendisk_register(nullb);
1772 goto out_cleanup_zone;
1775 list_add_tail(&nullb->list, &nullb_list);
1776 mutex_unlock(&lock);
1781 null_zone_exit(dev);
1782 out_cleanup_blk_queue:
1783 blk_cleanup_queue(nullb->q);
1785 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
1786 blk_mq_free_tag_set(nullb->tag_set);
1788 cleanup_queues(nullb);
1795 static int __init null_init(void)
1799 struct nullb *nullb;
1800 struct nullb_device *dev;
1802 if (g_bs > PAGE_SIZE) {
1803 pr_warn("invalid block size\n");
1804 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
1808 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1809 pr_err("invalid home_node value\n");
1810 g_home_node = NUMA_NO_NODE;
1813 if (g_queue_mode == NULL_Q_RQ) {
1814 pr_err("legacy IO path no longer available\n");
1817 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1818 if (g_submit_queues != nr_online_nodes) {
1819 pr_warn("submit_queues param is set to %u.\n",
1821 g_submit_queues = nr_online_nodes;
1823 } else if (g_submit_queues > nr_cpu_ids)
1824 g_submit_queues = nr_cpu_ids;
1825 else if (g_submit_queues <= 0)
1826 g_submit_queues = 1;
1828 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1829 ret = null_init_tag_set(NULL, &tag_set);
1834 config_group_init(&nullb_subsys.su_group);
1835 mutex_init(&nullb_subsys.su_mutex);
1837 ret = configfs_register_subsystem(&nullb_subsys);
1843 null_major = register_blkdev(0, "nullb");
1844 if (null_major < 0) {
1849 for (i = 0; i < nr_devices; i++) {
1850 dev = null_alloc_dev();
1855 ret = null_add_dev(dev);
1862 pr_info("module loaded\n");
1866 while (!list_empty(&nullb_list)) {
1867 nullb = list_entry(nullb_list.next, struct nullb, list);
1869 null_del_dev(nullb);
1872 unregister_blkdev(null_major, "nullb");
1874 configfs_unregister_subsystem(&nullb_subsys);
1876 if (g_queue_mode == NULL_Q_MQ && shared_tags)
1877 blk_mq_free_tag_set(&tag_set);
1881 static void __exit null_exit(void)
1883 struct nullb *nullb;
1885 configfs_unregister_subsystem(&nullb_subsys);
1887 unregister_blkdev(null_major, "nullb");
1890 while (!list_empty(&nullb_list)) {
1891 struct nullb_device *dev;
1893 nullb = list_entry(nullb_list.next, struct nullb, list);
1895 null_del_dev(nullb);
1898 mutex_unlock(&lock);
1900 if (g_queue_mode == NULL_Q_MQ && shared_tags)
1901 blk_mq_free_tag_set(&tag_set);
1904 module_init(null_init);
1905 module_exit(null_exit);
1907 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
1908 MODULE_LICENSE("GPL");