]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/block/null_blk_zoned.c
Merge tag 'xfs-5.6-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux.git] / drivers / block / null_blk_zoned.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include "null_blk.h"
4
5 /* zone_size in MBs to sectors. */
6 #define ZONE_SIZE_SHIFT         11
7
8 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
9 {
10         return sect >> ilog2(dev->zone_size_sects);
11 }
12
13 int null_zone_init(struct nullb_device *dev)
14 {
15         sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
16         sector_t sector = 0;
17         unsigned int i;
18
19         if (!is_power_of_2(dev->zone_size)) {
20                 pr_err("zone_size must be power-of-two\n");
21                 return -EINVAL;
22         }
23
24         dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
25         dev->nr_zones = dev_size >>
26                                 (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
27         dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
28                         GFP_KERNEL | __GFP_ZERO);
29         if (!dev->zones)
30                 return -ENOMEM;
31
32         if (dev->zone_nr_conv >= dev->nr_zones) {
33                 dev->zone_nr_conv = dev->nr_zones - 1;
34                 pr_info("changed the number of conventional zones to %u",
35                         dev->zone_nr_conv);
36         }
37
38         for (i = 0; i <  dev->zone_nr_conv; i++) {
39                 struct blk_zone *zone = &dev->zones[i];
40
41                 zone->start = sector;
42                 zone->len = dev->zone_size_sects;
43                 zone->wp = zone->start + zone->len;
44                 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
45                 zone->cond = BLK_ZONE_COND_NOT_WP;
46
47                 sector += dev->zone_size_sects;
48         }
49
50         for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
51                 struct blk_zone *zone = &dev->zones[i];
52
53                 zone->start = zone->wp = sector;
54                 zone->len = dev->zone_size_sects;
55                 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
56                 zone->cond = BLK_ZONE_COND_EMPTY;
57
58                 sector += dev->zone_size_sects;
59         }
60
61         return 0;
62 }
63
64 void null_zone_exit(struct nullb_device *dev)
65 {
66         kvfree(dev->zones);
67 }
68
69 int null_report_zones(struct gendisk *disk, sector_t sector,
70                 unsigned int nr_zones, report_zones_cb cb, void *data)
71 {
72         struct nullb *nullb = disk->private_data;
73         struct nullb_device *dev = nullb->dev;
74         unsigned int first_zone, i;
75         struct blk_zone zone;
76         int error;
77
78         first_zone = null_zone_no(dev, sector);
79         if (first_zone >= dev->nr_zones)
80                 return 0;
81
82         nr_zones = min(nr_zones, dev->nr_zones - first_zone);
83         for (i = 0; i < nr_zones; i++) {
84                 /*
85                  * Stacked DM target drivers will remap the zone information by
86                  * modifying the zone information passed to the report callback.
87                  * So use a local copy to avoid corruption of the device zone
88                  * array.
89                  */
90                 memcpy(&zone, &dev->zones[first_zone + i],
91                        sizeof(struct blk_zone));
92                 error = cb(&zone, i, data);
93                 if (error)
94                         return error;
95         }
96
97         return nr_zones;
98 }
99
100 size_t null_zone_valid_read_len(struct nullb *nullb,
101                                 sector_t sector, unsigned int len)
102 {
103         struct nullb_device *dev = nullb->dev;
104         struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
105         unsigned int nr_sectors = len >> SECTOR_SHIFT;
106
107         /* Read must be below the write pointer position */
108         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
109             sector + nr_sectors <= zone->wp)
110                 return len;
111
112         if (sector > zone->wp)
113                 return 0;
114
115         return (zone->wp - sector) << SECTOR_SHIFT;
116 }
117
118 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
119                      unsigned int nr_sectors)
120 {
121         struct nullb_device *dev = cmd->nq->dev;
122         unsigned int zno = null_zone_no(dev, sector);
123         struct blk_zone *zone = &dev->zones[zno];
124
125         switch (zone->cond) {
126         case BLK_ZONE_COND_FULL:
127                 /* Cannot write to a full zone */
128                 cmd->error = BLK_STS_IOERR;
129                 return BLK_STS_IOERR;
130         case BLK_ZONE_COND_EMPTY:
131         case BLK_ZONE_COND_IMP_OPEN:
132         case BLK_ZONE_COND_EXP_OPEN:
133         case BLK_ZONE_COND_CLOSED:
134                 /* Writes must be at the write pointer position */
135                 if (sector != zone->wp)
136                         return BLK_STS_IOERR;
137
138                 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
139                         zone->cond = BLK_ZONE_COND_IMP_OPEN;
140
141                 zone->wp += nr_sectors;
142                 if (zone->wp == zone->start + zone->len)
143                         zone->cond = BLK_ZONE_COND_FULL;
144                 break;
145         case BLK_ZONE_COND_NOT_WP:
146                 break;
147         default:
148                 /* Invalid zone condition */
149                 return BLK_STS_IOERR;
150         }
151         return BLK_STS_OK;
152 }
153
154 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
155                                    sector_t sector)
156 {
157         struct nullb_device *dev = cmd->nq->dev;
158         struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
159         size_t i;
160
161         switch (op) {
162         case REQ_OP_ZONE_RESET_ALL:
163                 for (i = 0; i < dev->nr_zones; i++) {
164                         if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
165                                 continue;
166                         zone[i].cond = BLK_ZONE_COND_EMPTY;
167                         zone[i].wp = zone[i].start;
168                 }
169                 break;
170         case REQ_OP_ZONE_RESET:
171                 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
172                         return BLK_STS_IOERR;
173
174                 zone->cond = BLK_ZONE_COND_EMPTY;
175                 zone->wp = zone->start;
176                 break;
177         case REQ_OP_ZONE_OPEN:
178                 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
179                         return BLK_STS_IOERR;
180                 if (zone->cond == BLK_ZONE_COND_FULL)
181                         return BLK_STS_IOERR;
182
183                 zone->cond = BLK_ZONE_COND_EXP_OPEN;
184                 break;
185         case REQ_OP_ZONE_CLOSE:
186                 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
187                         return BLK_STS_IOERR;
188                 if (zone->cond == BLK_ZONE_COND_FULL)
189                         return BLK_STS_IOERR;
190
191                 if (zone->wp == zone->start)
192                         zone->cond = BLK_ZONE_COND_EMPTY;
193                 else
194                         zone->cond = BLK_ZONE_COND_CLOSED;
195                 break;
196         case REQ_OP_ZONE_FINISH:
197                 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
198                         return BLK_STS_IOERR;
199
200                 zone->cond = BLK_ZONE_COND_FULL;
201                 zone->wp = zone->start + zone->len;
202                 break;
203         default:
204                 return BLK_STS_NOTSUPP;
205         }
206         return BLK_STS_OK;
207 }
208
209 blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
210                                sector_t sector, sector_t nr_sectors)
211 {
212         switch (op) {
213         case REQ_OP_WRITE:
214                 return null_zone_write(cmd, sector, nr_sectors);
215         case REQ_OP_ZONE_RESET:
216         case REQ_OP_ZONE_RESET_ALL:
217         case REQ_OP_ZONE_OPEN:
218         case REQ_OP_ZONE_CLOSE:
219         case REQ_OP_ZONE_FINISH:
220                 return null_zone_mgmt(cmd, op, sector);
221         default:
222                 return BLK_STS_OK;
223         }
224 }