]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/md/dm-zoned-target.c
Merge tag 'selinux-pr-20191007' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / md / dm-zoned-target.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-zoned.h"
9
10 #include <linux/module.h>
11
12 #define DM_MSG_PREFIX           "zoned"
13
14 #define DMZ_MIN_BIOS            8192
15
16 /*
17  * Zone BIO context.
18  */
19 struct dmz_bioctx {
20         struct dmz_target       *target;
21         struct dm_zone          *zone;
22         struct bio              *bio;
23         refcount_t              ref;
24 };
25
26 /*
27  * Chunk work descriptor.
28  */
29 struct dm_chunk_work {
30         struct work_struct      work;
31         refcount_t              refcount;
32         struct dmz_target       *target;
33         unsigned int            chunk;
34         struct bio_list         bio_list;
35 };
36
37 /*
38  * Target descriptor.
39  */
40 struct dmz_target {
41         struct dm_dev           *ddev;
42
43         unsigned long           flags;
44
45         /* Zoned block device information */
46         struct dmz_dev          *dev;
47
48         /* For metadata handling */
49         struct dmz_metadata     *metadata;
50
51         /* For reclaim */
52         struct dmz_reclaim      *reclaim;
53
54         /* For chunk work */
55         struct radix_tree_root  chunk_rxtree;
56         struct workqueue_struct *chunk_wq;
57         struct mutex            chunk_lock;
58
59         /* For cloned BIOs to zones */
60         struct bio_set          bio_set;
61
62         /* For flush */
63         spinlock_t              flush_lock;
64         struct bio_list         flush_list;
65         struct delayed_work     flush_work;
66         struct workqueue_struct *flush_wq;
67 };
68
69 /*
70  * Flush intervals (seconds).
71  */
72 #define DMZ_FLUSH_PERIOD        (10 * HZ)
73
74 /*
75  * Target BIO completion.
76  */
77 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
78 {
79         struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
80
81         if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
82                 bio->bi_status = status;
83
84         if (refcount_dec_and_test(&bioctx->ref)) {
85                 struct dm_zone *zone = bioctx->zone;
86
87                 if (zone) {
88                         if (bio->bi_status != BLK_STS_OK &&
89                             bio_op(bio) == REQ_OP_WRITE &&
90                             dmz_is_seq(zone))
91                                 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
92                         dmz_deactivate_zone(zone);
93                 }
94                 bio_endio(bio);
95         }
96 }
97
98 /*
99  * Completion callback for an internally cloned target BIO. This terminates the
100  * target BIO when there are no more references to its context.
101  */
102 static void dmz_clone_endio(struct bio *clone)
103 {
104         struct dmz_bioctx *bioctx = clone->bi_private;
105         blk_status_t status = clone->bi_status;
106
107         bio_put(clone);
108         dmz_bio_endio(bioctx->bio, status);
109 }
110
111 /*
112  * Issue a clone of a target BIO. The clone may only partially process the
113  * original target BIO.
114  */
115 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
116                           struct bio *bio, sector_t chunk_block,
117                           unsigned int nr_blocks)
118 {
119         struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
120         struct bio *clone;
121
122         clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
123         if (!clone)
124                 return -ENOMEM;
125
126         bio_set_dev(clone, dmz->dev->bdev);
127         clone->bi_iter.bi_sector =
128                 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
129         clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
130         clone->bi_end_io = dmz_clone_endio;
131         clone->bi_private = bioctx;
132
133         bio_advance(bio, clone->bi_iter.bi_size);
134
135         refcount_inc(&bioctx->ref);
136         generic_make_request(clone);
137
138         if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
139                 zone->wp_block += nr_blocks;
140
141         return 0;
142 }
143
144 /*
145  * Zero out pages of discarded blocks accessed by a read BIO.
146  */
147 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
148                                  sector_t chunk_block, unsigned int nr_blocks)
149 {
150         unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
151
152         /* Clear nr_blocks */
153         swap(bio->bi_iter.bi_size, size);
154         zero_fill_bio(bio);
155         swap(bio->bi_iter.bi_size, size);
156
157         bio_advance(bio, size);
158 }
159
160 /*
161  * Process a read BIO.
162  */
163 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
164                            struct bio *bio)
165 {
166         sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
167         unsigned int nr_blocks = dmz_bio_blocks(bio);
168         sector_t end_block = chunk_block + nr_blocks;
169         struct dm_zone *rzone, *bzone;
170         int ret;
171
172         /* Read into unmapped chunks need only zeroing the BIO buffer */
173         if (!zone) {
174                 zero_fill_bio(bio);
175                 return 0;
176         }
177
178         dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
179                       (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
180                       (dmz_is_rnd(zone) ? "RND" : "SEQ"),
181                       dmz_id(dmz->metadata, zone),
182                       (unsigned long long)chunk_block, nr_blocks);
183
184         /* Check block validity to determine the read location */
185         bzone = zone->bzone;
186         while (chunk_block < end_block) {
187                 nr_blocks = 0;
188                 if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
189                         /* Test block validity in the data zone */
190                         ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
191                         if (ret < 0)
192                                 return ret;
193                         if (ret > 0) {
194                                 /* Read data zone blocks */
195                                 nr_blocks = ret;
196                                 rzone = zone;
197                         }
198                 }
199
200                 /*
201                  * No valid blocks found in the data zone.
202                  * Check the buffer zone, if there is one.
203                  */
204                 if (!nr_blocks && bzone) {
205                         ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
206                         if (ret < 0)
207                                 return ret;
208                         if (ret > 0) {
209                                 /* Read buffer zone blocks */
210                                 nr_blocks = ret;
211                                 rzone = bzone;
212                         }
213                 }
214
215                 if (nr_blocks) {
216                         /* Valid blocks found: read them */
217                         nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
218                         ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
219                         if (ret)
220                                 return ret;
221                         chunk_block += nr_blocks;
222                 } else {
223                         /* No valid block: zeroout the current BIO block */
224                         dmz_handle_read_zero(dmz, bio, chunk_block, 1);
225                         chunk_block++;
226                 }
227         }
228
229         return 0;
230 }
231
232 /*
233  * Write blocks directly in a data zone, at the write pointer.
234  * If a buffer zone is assigned, invalidate the blocks written
235  * in place.
236  */
237 static int dmz_handle_direct_write(struct dmz_target *dmz,
238                                    struct dm_zone *zone, struct bio *bio,
239                                    sector_t chunk_block,
240                                    unsigned int nr_blocks)
241 {
242         struct dmz_metadata *zmd = dmz->metadata;
243         struct dm_zone *bzone = zone->bzone;
244         int ret;
245
246         if (dmz_is_readonly(zone))
247                 return -EROFS;
248
249         /* Submit write */
250         ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
251         if (ret)
252                 return ret;
253
254         /*
255          * Validate the blocks in the data zone and invalidate
256          * in the buffer zone, if there is one.
257          */
258         ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
259         if (ret == 0 && bzone)
260                 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
261
262         return ret;
263 }
264
265 /*
266  * Write blocks in the buffer zone of @zone.
267  * If no buffer zone is assigned yet, get one.
268  * Called with @zone write locked.
269  */
270 static int dmz_handle_buffered_write(struct dmz_target *dmz,
271                                      struct dm_zone *zone, struct bio *bio,
272                                      sector_t chunk_block,
273                                      unsigned int nr_blocks)
274 {
275         struct dmz_metadata *zmd = dmz->metadata;
276         struct dm_zone *bzone;
277         int ret;
278
279         /* Get the buffer zone. One will be allocated if needed */
280         bzone = dmz_get_chunk_buffer(zmd, zone);
281         if (IS_ERR(bzone))
282                 return PTR_ERR(bzone);
283
284         if (dmz_is_readonly(bzone))
285                 return -EROFS;
286
287         /* Submit write */
288         ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
289         if (ret)
290                 return ret;
291
292         /*
293          * Validate the blocks in the buffer zone
294          * and invalidate in the data zone.
295          */
296         ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
297         if (ret == 0 && chunk_block < zone->wp_block)
298                 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
299
300         return ret;
301 }
302
303 /*
304  * Process a write BIO.
305  */
306 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
307                             struct bio *bio)
308 {
309         sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
310         unsigned int nr_blocks = dmz_bio_blocks(bio);
311
312         if (!zone)
313                 return -ENOSPC;
314
315         dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
316                       (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
317                       (dmz_is_rnd(zone) ? "RND" : "SEQ"),
318                       dmz_id(dmz->metadata, zone),
319                       (unsigned long long)chunk_block, nr_blocks);
320
321         if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
322                 /*
323                  * zone is a random zone or it is a sequential zone
324                  * and the BIO is aligned to the zone write pointer:
325                  * direct write the zone.
326                  */
327                 return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
328         }
329
330         /*
331          * This is an unaligned write in a sequential zone:
332          * use buffered write.
333          */
334         return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
335 }
336
337 /*
338  * Process a discard BIO.
339  */
340 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
341                               struct bio *bio)
342 {
343         struct dmz_metadata *zmd = dmz->metadata;
344         sector_t block = dmz_bio_block(bio);
345         unsigned int nr_blocks = dmz_bio_blocks(bio);
346         sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
347         int ret = 0;
348
349         /* For unmapped chunks, there is nothing to do */
350         if (!zone)
351                 return 0;
352
353         if (dmz_is_readonly(zone))
354                 return -EROFS;
355
356         dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
357                       (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
358                       dmz_id(zmd, zone),
359                       (unsigned long long)chunk_block, nr_blocks);
360
361         /*
362          * Invalidate blocks in the data zone and its
363          * buffer zone if one is mapped.
364          */
365         if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
366                 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
367         if (ret == 0 && zone->bzone)
368                 ret = dmz_invalidate_blocks(zmd, zone->bzone,
369                                             chunk_block, nr_blocks);
370         return ret;
371 }
372
373 /*
374  * Process a BIO.
375  */
376 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
377                            struct bio *bio)
378 {
379         struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
380         struct dmz_metadata *zmd = dmz->metadata;
381         struct dm_zone *zone;
382         int ret;
383
384         /*
385          * Write may trigger a zone allocation. So make sure the
386          * allocation can succeed.
387          */
388         if (bio_op(bio) == REQ_OP_WRITE)
389                 dmz_schedule_reclaim(dmz->reclaim);
390
391         dmz_lock_metadata(zmd);
392
393         if (dmz->dev->flags & DMZ_BDEV_DYING) {
394                 ret = -EIO;
395                 goto out;
396         }
397
398         /*
399          * Get the data zone mapping the chunk. There may be no
400          * mapping for read and discard. If a mapping is obtained,
401          + the zone returned will be set to active state.
402          */
403         zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
404                                      bio_op(bio));
405         if (IS_ERR(zone)) {
406                 ret = PTR_ERR(zone);
407                 goto out;
408         }
409
410         /* Process the BIO */
411         if (zone) {
412                 dmz_activate_zone(zone);
413                 bioctx->zone = zone;
414         }
415
416         switch (bio_op(bio)) {
417         case REQ_OP_READ:
418                 ret = dmz_handle_read(dmz, zone, bio);
419                 break;
420         case REQ_OP_WRITE:
421                 ret = dmz_handle_write(dmz, zone, bio);
422                 break;
423         case REQ_OP_DISCARD:
424         case REQ_OP_WRITE_ZEROES:
425                 ret = dmz_handle_discard(dmz, zone, bio);
426                 break;
427         default:
428                 dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
429                             bio_op(bio));
430                 ret = -EIO;
431         }
432
433         /*
434          * Release the chunk mapping. This will check that the mapping
435          * is still valid, that is, that the zone used still has valid blocks.
436          */
437         if (zone)
438                 dmz_put_chunk_mapping(zmd, zone);
439 out:
440         dmz_bio_endio(bio, errno_to_blk_status(ret));
441
442         dmz_unlock_metadata(zmd);
443 }
444
445 /*
446  * Increment a chunk reference counter.
447  */
448 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
449 {
450         refcount_inc(&cw->refcount);
451 }
452
453 /*
454  * Decrement a chunk work reference count and
455  * free it if it becomes 0.
456  */
457 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
458 {
459         if (refcount_dec_and_test(&cw->refcount)) {
460                 WARN_ON(!bio_list_empty(&cw->bio_list));
461                 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
462                 kfree(cw);
463         }
464 }
465
466 /*
467  * Chunk BIO work function.
468  */
469 static void dmz_chunk_work(struct work_struct *work)
470 {
471         struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
472         struct dmz_target *dmz = cw->target;
473         struct bio *bio;
474
475         mutex_lock(&dmz->chunk_lock);
476
477         /* Process the chunk BIOs */
478         while ((bio = bio_list_pop(&cw->bio_list))) {
479                 mutex_unlock(&dmz->chunk_lock);
480                 dmz_handle_bio(dmz, cw, bio);
481                 mutex_lock(&dmz->chunk_lock);
482                 dmz_put_chunk_work(cw);
483         }
484
485         /* Queueing the work incremented the work refcount */
486         dmz_put_chunk_work(cw);
487
488         mutex_unlock(&dmz->chunk_lock);
489 }
490
491 /*
492  * Flush work.
493  */
494 static void dmz_flush_work(struct work_struct *work)
495 {
496         struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
497         struct bio *bio;
498         int ret;
499
500         /* Flush dirty metadata blocks */
501         ret = dmz_flush_metadata(dmz->metadata);
502         if (ret)
503                 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
504
505         /* Process queued flush requests */
506         while (1) {
507                 spin_lock(&dmz->flush_lock);
508                 bio = bio_list_pop(&dmz->flush_list);
509                 spin_unlock(&dmz->flush_lock);
510
511                 if (!bio)
512                         break;
513
514                 dmz_bio_endio(bio, errno_to_blk_status(ret));
515         }
516
517         queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
518 }
519
520 /*
521  * Get a chunk work and start it to process a new BIO.
522  * If the BIO chunk has no work yet, create one.
523  */
524 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
525 {
526         unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
527         struct dm_chunk_work *cw;
528         int ret = 0;
529
530         mutex_lock(&dmz->chunk_lock);
531
532         /* Get the BIO chunk work. If one is not active yet, create one */
533         cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
534         if (!cw) {
535
536                 /* Create a new chunk work */
537                 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
538                 if (unlikely(!cw)) {
539                         ret = -ENOMEM;
540                         goto out;
541                 }
542
543                 INIT_WORK(&cw->work, dmz_chunk_work);
544                 refcount_set(&cw->refcount, 0);
545                 cw->target = dmz;
546                 cw->chunk = chunk;
547                 bio_list_init(&cw->bio_list);
548
549                 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
550                 if (unlikely(ret)) {
551                         kfree(cw);
552                         goto out;
553                 }
554         }
555
556         bio_list_add(&cw->bio_list, bio);
557         dmz_get_chunk_work(cw);
558
559         dmz_reclaim_bio_acc(dmz->reclaim);
560         if (queue_work(dmz->chunk_wq, &cw->work))
561                 dmz_get_chunk_work(cw);
562 out:
563         mutex_unlock(&dmz->chunk_lock);
564         return ret;
565 }
566
567 /*
568  * Check the backing device availability. If it's on the way out,
569  * start failing I/O. Reclaim and metadata components also call this
570  * function to cleanly abort operation in the event of such failure.
571  */
572 bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
573 {
574         struct gendisk *disk;
575
576         if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
577                 disk = dmz_dev->bdev->bd_disk;
578                 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
579                         dmz_dev_warn(dmz_dev, "Backing device queue dying");
580                         dmz_dev->flags |= DMZ_BDEV_DYING;
581                 } else if (disk->fops->check_events) {
582                         if (disk->fops->check_events(disk, 0) &
583                                         DISK_EVENT_MEDIA_CHANGE) {
584                                 dmz_dev_warn(dmz_dev, "Backing device offline");
585                                 dmz_dev->flags |= DMZ_BDEV_DYING;
586                         }
587                 }
588         }
589
590         return dmz_dev->flags & DMZ_BDEV_DYING;
591 }
592
593 /*
594  * Process a new BIO.
595  */
596 static int dmz_map(struct dm_target *ti, struct bio *bio)
597 {
598         struct dmz_target *dmz = ti->private;
599         struct dmz_dev *dev = dmz->dev;
600         struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
601         sector_t sector = bio->bi_iter.bi_sector;
602         unsigned int nr_sectors = bio_sectors(bio);
603         sector_t chunk_sector;
604         int ret;
605
606         if (dmz_bdev_is_dying(dmz->dev))
607                 return DM_MAPIO_KILL;
608
609         dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
610                       bio_op(bio), (unsigned long long)sector, nr_sectors,
611                       (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
612                       (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
613                       (unsigned int)dmz_bio_blocks(bio));
614
615         bio_set_dev(bio, dev->bdev);
616
617         if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
618                 return DM_MAPIO_REMAPPED;
619
620         /* The BIO should be block aligned */
621         if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
622                 return DM_MAPIO_KILL;
623
624         /* Initialize the BIO context */
625         bioctx->target = dmz;
626         bioctx->zone = NULL;
627         bioctx->bio = bio;
628         refcount_set(&bioctx->ref, 1);
629
630         /* Set the BIO pending in the flush list */
631         if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
632                 spin_lock(&dmz->flush_lock);
633                 bio_list_add(&dmz->flush_list, bio);
634                 spin_unlock(&dmz->flush_lock);
635                 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
636                 return DM_MAPIO_SUBMITTED;
637         }
638
639         /* Split zone BIOs to fit entirely into a zone */
640         chunk_sector = sector & (dev->zone_nr_sectors - 1);
641         if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
642                 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
643
644         /* Now ready to handle this BIO */
645         ret = dmz_queue_chunk_work(dmz, bio);
646         if (ret) {
647                 dmz_dev_debug(dmz->dev,
648                               "BIO op %d, can't process chunk %llu, err %i\n",
649                               bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
650                               ret);
651                 return DM_MAPIO_REQUEUE;
652         }
653
654         return DM_MAPIO_SUBMITTED;
655 }
656
657 /*
658  * Get zoned device information.
659  */
660 static int dmz_get_zoned_device(struct dm_target *ti, char *path)
661 {
662         struct dmz_target *dmz = ti->private;
663         struct request_queue *q;
664         struct dmz_dev *dev;
665         sector_t aligned_capacity;
666         int ret;
667
668         /* Get the target device */
669         ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
670         if (ret) {
671                 ti->error = "Get target device failed";
672                 dmz->ddev = NULL;
673                 return ret;
674         }
675
676         dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
677         if (!dev) {
678                 ret = -ENOMEM;
679                 goto err;
680         }
681
682         dev->bdev = dmz->ddev->bdev;
683         (void)bdevname(dev->bdev, dev->name);
684
685         if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
686                 ti->error = "Not a zoned block device";
687                 ret = -EINVAL;
688                 goto err;
689         }
690
691         q = bdev_get_queue(dev->bdev);
692         dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
693         aligned_capacity = dev->capacity &
694                                 ~((sector_t)blk_queue_zone_sectors(q) - 1);
695         if (ti->begin ||
696             ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
697                 ti->error = "Partial mapping not supported";
698                 ret = -EINVAL;
699                 goto err;
700         }
701
702         dev->zone_nr_sectors = blk_queue_zone_sectors(q);
703         dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
704
705         dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
706         dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
707
708         dev->nr_zones = blkdev_nr_zones(dev->bdev);
709
710         dmz->dev = dev;
711
712         return 0;
713 err:
714         dm_put_device(ti, dmz->ddev);
715         kfree(dev);
716
717         return ret;
718 }
719
720 /*
721  * Cleanup zoned device information.
722  */
723 static void dmz_put_zoned_device(struct dm_target *ti)
724 {
725         struct dmz_target *dmz = ti->private;
726
727         dm_put_device(ti, dmz->ddev);
728         kfree(dmz->dev);
729         dmz->dev = NULL;
730 }
731
732 /*
733  * Setup target.
734  */
735 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
736 {
737         struct dmz_target *dmz;
738         struct dmz_dev *dev;
739         int ret;
740
741         /* Check arguments */
742         if (argc != 1) {
743                 ti->error = "Invalid argument count";
744                 return -EINVAL;
745         }
746
747         /* Allocate and initialize the target descriptor */
748         dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
749         if (!dmz) {
750                 ti->error = "Unable to allocate the zoned target descriptor";
751                 return -ENOMEM;
752         }
753         ti->private = dmz;
754
755         /* Get the target zoned block device */
756         ret = dmz_get_zoned_device(ti, argv[0]);
757         if (ret) {
758                 dmz->ddev = NULL;
759                 goto err;
760         }
761
762         /* Initialize metadata */
763         dev = dmz->dev;
764         ret = dmz_ctr_metadata(dev, &dmz->metadata);
765         if (ret) {
766                 ti->error = "Metadata initialization failed";
767                 goto err_dev;
768         }
769
770         /* Set target (no write same support) */
771         ti->max_io_len = dev->zone_nr_sectors << 9;
772         ti->num_flush_bios = 1;
773         ti->num_discard_bios = 1;
774         ti->num_write_zeroes_bios = 1;
775         ti->per_io_data_size = sizeof(struct dmz_bioctx);
776         ti->flush_supported = true;
777         ti->discards_supported = true;
778
779         /* The exposed capacity is the number of chunks that can be mapped */
780         ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
781
782         /* Zone BIO */
783         ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
784         if (ret) {
785                 ti->error = "Create BIO set failed";
786                 goto err_meta;
787         }
788
789         /* Chunk BIO work */
790         mutex_init(&dmz->chunk_lock);
791         INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
792         dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
793                                         0, dev->name);
794         if (!dmz->chunk_wq) {
795                 ti->error = "Create chunk workqueue failed";
796                 ret = -ENOMEM;
797                 goto err_bio;
798         }
799
800         /* Flush work */
801         spin_lock_init(&dmz->flush_lock);
802         bio_list_init(&dmz->flush_list);
803         INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
804         dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
805                                                 dev->name);
806         if (!dmz->flush_wq) {
807                 ti->error = "Create flush workqueue failed";
808                 ret = -ENOMEM;
809                 goto err_cwq;
810         }
811         mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
812
813         /* Initialize reclaim */
814         ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
815         if (ret) {
816                 ti->error = "Zone reclaim initialization failed";
817                 goto err_fwq;
818         }
819
820         dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
821                      (unsigned long long)ti->len,
822                      (unsigned long long)dmz_sect2blk(ti->len));
823
824         return 0;
825 err_fwq:
826         destroy_workqueue(dmz->flush_wq);
827 err_cwq:
828         destroy_workqueue(dmz->chunk_wq);
829 err_bio:
830         mutex_destroy(&dmz->chunk_lock);
831         bioset_exit(&dmz->bio_set);
832 err_meta:
833         dmz_dtr_metadata(dmz->metadata);
834 err_dev:
835         dmz_put_zoned_device(ti);
836 err:
837         kfree(dmz);
838
839         return ret;
840 }
841
842 /*
843  * Cleanup target.
844  */
845 static void dmz_dtr(struct dm_target *ti)
846 {
847         struct dmz_target *dmz = ti->private;
848
849         flush_workqueue(dmz->chunk_wq);
850         destroy_workqueue(dmz->chunk_wq);
851
852         dmz_dtr_reclaim(dmz->reclaim);
853
854         cancel_delayed_work_sync(&dmz->flush_work);
855         destroy_workqueue(dmz->flush_wq);
856
857         (void) dmz_flush_metadata(dmz->metadata);
858
859         dmz_dtr_metadata(dmz->metadata);
860
861         bioset_exit(&dmz->bio_set);
862
863         dmz_put_zoned_device(ti);
864
865         mutex_destroy(&dmz->chunk_lock);
866
867         kfree(dmz);
868 }
869
870 /*
871  * Setup target request queue limits.
872  */
873 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
874 {
875         struct dmz_target *dmz = ti->private;
876         unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
877
878         limits->logical_block_size = DMZ_BLOCK_SIZE;
879         limits->physical_block_size = DMZ_BLOCK_SIZE;
880
881         blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
882         blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
883
884         limits->discard_alignment = DMZ_BLOCK_SIZE;
885         limits->discard_granularity = DMZ_BLOCK_SIZE;
886         limits->max_discard_sectors = chunk_sectors;
887         limits->max_hw_discard_sectors = chunk_sectors;
888         limits->max_write_zeroes_sectors = chunk_sectors;
889
890         /* FS hint to try to align to the device zone size */
891         limits->chunk_sectors = chunk_sectors;
892         limits->max_sectors = chunk_sectors;
893
894         /* We are exposing a drive-managed zoned block device */
895         limits->zoned = BLK_ZONED_NONE;
896 }
897
898 /*
899  * Pass on ioctl to the backend device.
900  */
901 static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
902 {
903         struct dmz_target *dmz = ti->private;
904
905         if (dmz_bdev_is_dying(dmz->dev))
906                 return -ENODEV;
907
908         *bdev = dmz->dev->bdev;
909
910         return 0;
911 }
912
913 /*
914  * Stop works on suspend.
915  */
916 static void dmz_suspend(struct dm_target *ti)
917 {
918         struct dmz_target *dmz = ti->private;
919
920         flush_workqueue(dmz->chunk_wq);
921         dmz_suspend_reclaim(dmz->reclaim);
922         cancel_delayed_work_sync(&dmz->flush_work);
923 }
924
925 /*
926  * Restart works on resume or if suspend failed.
927  */
928 static void dmz_resume(struct dm_target *ti)
929 {
930         struct dmz_target *dmz = ti->private;
931
932         queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
933         dmz_resume_reclaim(dmz->reclaim);
934 }
935
936 static int dmz_iterate_devices(struct dm_target *ti,
937                                iterate_devices_callout_fn fn, void *data)
938 {
939         struct dmz_target *dmz = ti->private;
940         struct dmz_dev *dev = dmz->dev;
941         sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
942
943         return fn(ti, dmz->ddev, 0, capacity, data);
944 }
945
946 static struct target_type dmz_type = {
947         .name            = "zoned",
948         .version         = {1, 0, 0},
949         .features        = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
950         .module          = THIS_MODULE,
951         .ctr             = dmz_ctr,
952         .dtr             = dmz_dtr,
953         .map             = dmz_map,
954         .io_hints        = dmz_io_hints,
955         .prepare_ioctl   = dmz_prepare_ioctl,
956         .postsuspend     = dmz_suspend,
957         .resume          = dmz_resume,
958         .iterate_devices = dmz_iterate_devices,
959 };
960
961 static int __init dmz_init(void)
962 {
963         return dm_register_target(&dmz_type);
964 }
965
966 static void __exit dmz_exit(void)
967 {
968         dm_unregister_target(&dmz_type);
969 }
970
971 module_init(dmz_init);
972 module_exit(dmz_exit);
973
974 MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
975 MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
976 MODULE_LICENSE("GPL");