2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
9 #include <linux/module.h>
11 #define DM_MSG_PREFIX "zoned"
13 #define DMZ_MIN_BIOS 8192
19 struct dmz_target *target;
26 * Chunk work descriptor.
28 struct dm_chunk_work {
29 struct work_struct work;
31 struct dmz_target *target;
33 struct bio_list bio_list;
44 /* Zoned block device information */
47 /* For metadata handling */
48 struct dmz_metadata *metadata;
51 struct dmz_reclaim *reclaim;
54 struct radix_tree_root chunk_rxtree;
55 struct workqueue_struct *chunk_wq;
56 struct mutex chunk_lock;
58 /* For cloned BIOs to zones */
59 struct bio_set bio_set;
62 spinlock_t flush_lock;
63 struct bio_list flush_list;
64 struct delayed_work flush_work;
65 struct workqueue_struct *flush_wq;
69 * Flush intervals (seconds).
71 #define DMZ_FLUSH_PERIOD (10 * HZ)
74 * Target BIO completion.
76 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
80 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
81 bio->bi_status = status;
83 if (refcount_dec_and_test(&bioctx->ref)) {
84 struct dm_zone *zone = bioctx->zone;
87 if (bio->bi_status != BLK_STS_OK &&
88 bio_op(bio) == REQ_OP_WRITE &&
90 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
91 dmz_deactivate_zone(zone);
98 * Completion callback for an internally cloned target BIO. This terminates the
99 * target BIO when there are no more references to its context.
101 static void dmz_clone_endio(struct bio *clone)
103 struct dmz_bioctx *bioctx = clone->bi_private;
104 blk_status_t status = clone->bi_status;
107 dmz_bio_endio(bioctx->bio, status);
111 * Issue a clone of a target BIO. The clone may only partially process the
112 * original target BIO.
114 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
115 struct bio *bio, sector_t chunk_block,
116 unsigned int nr_blocks)
118 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
121 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
125 bio_set_dev(clone, dmz->dev->bdev);
126 clone->bi_iter.bi_sector =
127 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
128 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
129 clone->bi_end_io = dmz_clone_endio;
130 clone->bi_private = bioctx;
132 bio_advance(bio, clone->bi_iter.bi_size);
134 refcount_inc(&bioctx->ref);
135 generic_make_request(clone);
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks;
144 * Zero out pages of discarded blocks accessed by a read BIO.
146 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
147 sector_t chunk_block, unsigned int nr_blocks)
149 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
151 /* Clear nr_blocks */
152 swap(bio->bi_iter.bi_size, size);
154 swap(bio->bi_iter.bi_size, size);
156 bio_advance(bio, size);
160 * Process a read BIO.
162 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
165 sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
166 unsigned int nr_blocks = dmz_bio_blocks(bio);
167 sector_t end_block = chunk_block + nr_blocks;
168 struct dm_zone *rzone, *bzone;
171 /* Read into unmapped chunks need only zeroing the BIO buffer */
177 dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
178 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
179 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
180 dmz_id(dmz->metadata, zone),
181 (unsigned long long)chunk_block, nr_blocks);
183 /* Check block validity to determine the read location */
185 while (chunk_block < end_block) {
187 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
188 /* Test block validity in the data zone */
189 ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
193 /* Read data zone blocks */
200 * No valid blocks found in the data zone.
201 * Check the buffer zone, if there is one.
203 if (!nr_blocks && bzone) {
204 ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
208 /* Read buffer zone blocks */
215 /* Valid blocks found: read them */
216 nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
217 ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
220 chunk_block += nr_blocks;
222 /* No valid block: zeroout the current BIO block */
223 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
232 * Write blocks directly in a data zone, at the write pointer.
233 * If a buffer zone is assigned, invalidate the blocks written
236 static int dmz_handle_direct_write(struct dmz_target *dmz,
237 struct dm_zone *zone, struct bio *bio,
238 sector_t chunk_block,
239 unsigned int nr_blocks)
241 struct dmz_metadata *zmd = dmz->metadata;
242 struct dm_zone *bzone = zone->bzone;
245 if (dmz_is_readonly(zone))
249 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
254 * Validate the blocks in the data zone and invalidate
255 * in the buffer zone, if there is one.
257 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
258 if (ret == 0 && bzone)
259 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
265 * Write blocks in the buffer zone of @zone.
266 * If no buffer zone is assigned yet, get one.
267 * Called with @zone write locked.
269 static int dmz_handle_buffered_write(struct dmz_target *dmz,
270 struct dm_zone *zone, struct bio *bio,
271 sector_t chunk_block,
272 unsigned int nr_blocks)
274 struct dmz_metadata *zmd = dmz->metadata;
275 struct dm_zone *bzone;
278 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone);
283 if (dmz_is_readonly(bzone))
287 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
292 * Validate the blocks in the buffer zone
293 * and invalidate in the data zone.
295 ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
296 if (ret == 0 && chunk_block < zone->wp_block)
297 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
303 * Process a write BIO.
305 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
308 sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
309 unsigned int nr_blocks = dmz_bio_blocks(bio);
314 dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
315 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
316 (dmz_is_rnd(zone) ? "RND" : "SEQ"),
317 dmz_id(dmz->metadata, zone),
318 (unsigned long long)chunk_block, nr_blocks);
320 if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
322 * zone is a random zone or it is a sequential zone
323 * and the BIO is aligned to the zone write pointer:
324 * direct write the zone.
326 return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
330 * This is an unaligned write in a sequential zone:
331 * use buffered write.
333 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
337 * Process a discard BIO.
339 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
342 struct dmz_metadata *zmd = dmz->metadata;
343 sector_t block = dmz_bio_block(bio);
344 unsigned int nr_blocks = dmz_bio_blocks(bio);
345 sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
348 /* For unmapped chunks, there is nothing to do */
352 if (dmz_is_readonly(zone))
355 dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
356 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
358 (unsigned long long)chunk_block, nr_blocks);
361 * Invalidate blocks in the data zone and its
362 * buffer zone if one is mapped.
364 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
365 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
366 if (ret == 0 && zone->bzone)
367 ret = dmz_invalidate_blocks(zmd, zone->bzone,
368 chunk_block, nr_blocks);
375 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
378 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
379 struct dmz_metadata *zmd = dmz->metadata;
380 struct dm_zone *zone;
384 * Write may trigger a zone allocation. So make sure the
385 * allocation can succeed.
387 if (bio_op(bio) == REQ_OP_WRITE)
388 dmz_schedule_reclaim(dmz->reclaim);
390 dmz_lock_metadata(zmd);
393 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained,
395 + the zone returned will be set to active state.
397 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
404 /* Process the BIO */
406 dmz_activate_zone(zone);
410 switch (bio_op(bio)) {
412 ret = dmz_handle_read(dmz, zone, bio);
415 ret = dmz_handle_write(dmz, zone, bio);
418 case REQ_OP_WRITE_ZEROES:
419 ret = dmz_handle_discard(dmz, zone, bio);
422 dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
428 * Release the chunk mapping. This will check that the mapping
429 * is still valid, that is, that the zone used still has valid blocks.
432 dmz_put_chunk_mapping(zmd, zone);
434 dmz_bio_endio(bio, errno_to_blk_status(ret));
436 dmz_unlock_metadata(zmd);
440 * Increment a chunk reference counter.
442 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
444 refcount_inc(&cw->refcount);
448 * Decrement a chunk work reference count and
449 * free it if it becomes 0.
451 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
453 if (refcount_dec_and_test(&cw->refcount)) {
454 WARN_ON(!bio_list_empty(&cw->bio_list));
455 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
461 * Chunk BIO work function.
463 static void dmz_chunk_work(struct work_struct *work)
465 struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
466 struct dmz_target *dmz = cw->target;
469 mutex_lock(&dmz->chunk_lock);
471 /* Process the chunk BIOs */
472 while ((bio = bio_list_pop(&cw->bio_list))) {
473 mutex_unlock(&dmz->chunk_lock);
474 dmz_handle_bio(dmz, cw, bio);
475 mutex_lock(&dmz->chunk_lock);
476 dmz_put_chunk_work(cw);
479 /* Queueing the work incremented the work refcount */
480 dmz_put_chunk_work(cw);
482 mutex_unlock(&dmz->chunk_lock);
488 static void dmz_flush_work(struct work_struct *work)
490 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
494 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata);
497 /* Process queued flush requests */
499 spin_lock(&dmz->flush_lock);
500 bio = bio_list_pop(&dmz->flush_list);
501 spin_unlock(&dmz->flush_lock);
506 dmz_bio_endio(bio, errno_to_blk_status(ret));
509 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
513 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one.
516 static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw;
521 mutex_lock(&dmz->chunk_lock);
523 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
528 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
533 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0);
537 bio_list_init(&cw->bio_list);
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
547 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw);
550 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw);
553 mutex_unlock(&dmz->chunk_lock);
559 static int dmz_map(struct dm_target *ti, struct bio *bio)
561 struct dmz_target *dmz = ti->private;
562 struct dmz_dev *dev = dmz->dev;
563 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
564 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector;
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors,
570 (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
571 (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
572 (unsigned int)dmz_bio_blocks(bio));
574 bio_set_dev(bio, dev->bdev);
576 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
577 return DM_MAPIO_REMAPPED;
579 /* The BIO should be block aligned */
580 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
581 return DM_MAPIO_KILL;
583 /* Initialize the BIO context */
584 bioctx->target = dmz;
587 refcount_set(&bioctx->ref, 1);
589 /* Set the BIO pending in the flush list */
590 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
591 spin_lock(&dmz->flush_lock);
592 bio_list_add(&dmz->flush_list, bio);
593 spin_unlock(&dmz->flush_lock);
594 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
595 return DM_MAPIO_SUBMITTED;
598 /* Split zone BIOs to fit entirely into a zone */
599 chunk_sector = sector & (dev->zone_nr_sectors - 1);
600 if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
603 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim);
605 dmz_queue_chunk_work(dmz, bio);
607 return DM_MAPIO_SUBMITTED;
611 * Get zoned device information.
613 static int dmz_get_zoned_device(struct dm_target *ti, char *path)
615 struct dmz_target *dmz = ti->private;
616 struct request_queue *q;
618 sector_t aligned_capacity;
621 /* Get the target device */
622 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
624 ti->error = "Get target device failed";
629 dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
635 dev->bdev = dmz->ddev->bdev;
636 (void)bdevname(dev->bdev, dev->name);
638 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
639 ti->error = "Not a zoned block device";
644 q = bdev_get_queue(dev->bdev);
645 dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
646 aligned_capacity = dev->capacity &
647 ~((sector_t)blk_queue_zone_sectors(q) - 1);
649 ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
650 ti->error = "Partial mapping not supported";
655 dev->zone_nr_sectors = blk_queue_zone_sectors(q);
656 dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
658 dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
659 dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
661 dev->nr_zones = blkdev_nr_zones(dev->bdev);
667 dm_put_device(ti, dmz->ddev);
674 * Cleanup zoned device information.
676 static void dmz_put_zoned_device(struct dm_target *ti)
678 struct dmz_target *dmz = ti->private;
680 dm_put_device(ti, dmz->ddev);
688 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
690 struct dmz_target *dmz;
694 /* Check arguments */
696 ti->error = "Invalid argument count";
700 /* Allocate and initialize the target descriptor */
701 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
703 ti->error = "Unable to allocate the zoned target descriptor";
708 /* Get the target zoned block device */
709 ret = dmz_get_zoned_device(ti, argv[0]);
715 /* Initialize metadata */
717 ret = dmz_ctr_metadata(dev, &dmz->metadata);
719 ti->error = "Metadata initialization failed";
723 /* Set target (no write same support) */
724 ti->max_io_len = dev->zone_nr_sectors << 9;
725 ti->num_flush_bios = 1;
726 ti->num_discard_bios = 1;
727 ti->num_write_zeroes_bios = 1;
728 ti->per_io_data_size = sizeof(struct dmz_bioctx);
729 ti->flush_supported = true;
730 ti->discards_supported = true;
732 /* The exposed capacity is the number of chunks that can be mapped */
733 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
736 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
738 ti->error = "Create BIO set failed";
743 mutex_init(&dmz->chunk_lock);
744 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
745 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
747 if (!dmz->chunk_wq) {
748 ti->error = "Create chunk workqueue failed";
754 spin_lock_init(&dmz->flush_lock);
755 bio_list_init(&dmz->flush_list);
756 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
757 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
759 if (!dmz->flush_wq) {
760 ti->error = "Create flush workqueue failed";
764 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
766 /* Initialize reclaim */
767 ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
769 ti->error = "Zone reclaim initialization failed";
773 dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
774 (unsigned long long)ti->len,
775 (unsigned long long)dmz_sect2blk(ti->len));
779 destroy_workqueue(dmz->flush_wq);
781 destroy_workqueue(dmz->chunk_wq);
783 mutex_destroy(&dmz->chunk_lock);
784 bioset_exit(&dmz->bio_set);
786 dmz_dtr_metadata(dmz->metadata);
788 dmz_put_zoned_device(ti);
798 static void dmz_dtr(struct dm_target *ti)
800 struct dmz_target *dmz = ti->private;
802 flush_workqueue(dmz->chunk_wq);
803 destroy_workqueue(dmz->chunk_wq);
805 dmz_dtr_reclaim(dmz->reclaim);
807 cancel_delayed_work_sync(&dmz->flush_work);
808 destroy_workqueue(dmz->flush_wq);
810 (void) dmz_flush_metadata(dmz->metadata);
812 dmz_dtr_metadata(dmz->metadata);
814 bioset_exit(&dmz->bio_set);
816 dmz_put_zoned_device(ti);
818 mutex_destroy(&dmz->chunk_lock);
824 * Setup target request queue limits.
826 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
828 struct dmz_target *dmz = ti->private;
829 unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
831 limits->logical_block_size = DMZ_BLOCK_SIZE;
832 limits->physical_block_size = DMZ_BLOCK_SIZE;
834 blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
835 blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
837 limits->discard_alignment = DMZ_BLOCK_SIZE;
838 limits->discard_granularity = DMZ_BLOCK_SIZE;
839 limits->max_discard_sectors = chunk_sectors;
840 limits->max_hw_discard_sectors = chunk_sectors;
841 limits->max_write_zeroes_sectors = chunk_sectors;
843 /* FS hint to try to align to the device zone size */
844 limits->chunk_sectors = chunk_sectors;
845 limits->max_sectors = chunk_sectors;
847 /* We are exposing a drive-managed zoned block device */
848 limits->zoned = BLK_ZONED_NONE;
852 * Pass on ioctl to the backend device.
854 static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
856 struct dmz_target *dmz = ti->private;
858 *bdev = dmz->dev->bdev;
864 * Stop works on suspend.
866 static void dmz_suspend(struct dm_target *ti)
868 struct dmz_target *dmz = ti->private;
870 flush_workqueue(dmz->chunk_wq);
871 dmz_suspend_reclaim(dmz->reclaim);
872 cancel_delayed_work_sync(&dmz->flush_work);
876 * Restart works on resume or if suspend failed.
878 static void dmz_resume(struct dm_target *ti)
880 struct dmz_target *dmz = ti->private;
882 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
883 dmz_resume_reclaim(dmz->reclaim);
886 static int dmz_iterate_devices(struct dm_target *ti,
887 iterate_devices_callout_fn fn, void *data)
889 struct dmz_target *dmz = ti->private;
890 struct dmz_dev *dev = dmz->dev;
891 sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
893 return fn(ti, dmz->ddev, 0, capacity, data);
896 static struct target_type dmz_type = {
898 .version = {1, 0, 0},
899 .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
900 .module = THIS_MODULE,
904 .io_hints = dmz_io_hints,
905 .prepare_ioctl = dmz_prepare_ioctl,
906 .postsuspend = dmz_suspend,
907 .resume = dmz_resume,
908 .iterate_devices = dmz_iterate_devices,
911 static int __init dmz_init(void)
913 return dm_register_target(&dmz_type);
916 static void __exit dmz_exit(void)
918 dm_unregister_target(&dmz_type);
921 module_init(dmz_init);
922 module_exit(dmz_exit);
924 MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
925 MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
926 MODULE_LICENSE("GPL");