]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/nvme/target/discovery.c
Merge tag 'acpi-5.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux.git] / drivers / nvme / target / discovery.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Discovery service for the NVMe over Fabrics target.
4  * Copyright (C) 2016 Intel Corporation. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
9 #include "nvmet.h"
10
11 struct nvmet_subsys *nvmet_disc_subsys;
12
13 static u64 nvmet_genctr;
14
15 static void __nvmet_disc_changed(struct nvmet_port *port,
16                                  struct nvmet_ctrl *ctrl)
17 {
18         if (ctrl->port != port)
19                 return;
20
21         if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
22                 return;
23
24         nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
25                               NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
26 }
27
28 void nvmet_port_disc_changed(struct nvmet_port *port,
29                              struct nvmet_subsys *subsys)
30 {
31         struct nvmet_ctrl *ctrl;
32
33         lockdep_assert_held(&nvmet_config_sem);
34         nvmet_genctr++;
35
36         mutex_lock(&nvmet_disc_subsys->lock);
37         list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38                 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
39                         continue;
40
41                 __nvmet_disc_changed(port, ctrl);
42         }
43         mutex_unlock(&nvmet_disc_subsys->lock);
44
45         /* If transport can signal change, notify transport */
46         if (port->tr_ops && port->tr_ops->discovery_chg)
47                 port->tr_ops->discovery_chg(port);
48 }
49
50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51                                         struct nvmet_subsys *subsys,
52                                         struct nvmet_host *host)
53 {
54         struct nvmet_ctrl *ctrl;
55
56         mutex_lock(&nvmet_disc_subsys->lock);
57         list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58                 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
59                         continue;
60
61                 __nvmet_disc_changed(port, ctrl);
62         }
63         mutex_unlock(&nvmet_disc_subsys->lock);
64 }
65
66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67                                struct nvmet_host *host)
68 {
69         struct nvmet_port *port;
70         struct nvmet_subsys_link *s;
71
72         nvmet_genctr++;
73
74         list_for_each_entry(port, nvmet_ports, global_entry)
75                 list_for_each_entry(s, &port->subsystems, entry) {
76                         if (s->subsys != subsys)
77                                 continue;
78                         __nvmet_subsys_disc_changed(port, subsys, host);
79                 }
80 }
81
82 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
83 {
84         down_write(&nvmet_config_sem);
85         if (list_empty(&port->entry)) {
86                 list_add_tail(&port->entry, &parent->referrals);
87                 port->enabled = true;
88                 nvmet_port_disc_changed(parent, NULL);
89         }
90         up_write(&nvmet_config_sem);
91 }
92
93 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
94 {
95         down_write(&nvmet_config_sem);
96         if (!list_empty(&port->entry)) {
97                 port->enabled = false;
98                 list_del_init(&port->entry);
99                 nvmet_port_disc_changed(parent, NULL);
100         }
101         up_write(&nvmet_config_sem);
102 }
103
104 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
105                 struct nvmet_port *port, char *subsys_nqn, char *traddr,
106                 u8 type, u32 numrec)
107 {
108         struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
109
110         e->trtype = port->disc_addr.trtype;
111         e->adrfam = port->disc_addr.adrfam;
112         e->treq = port->disc_addr.treq;
113         e->portid = port->disc_addr.portid;
114         /* we support only dynamic controllers */
115         e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
116         e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
117         e->subtype = type;
118         memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
119         memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
120         memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
121         strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
122 }
123
124 /*
125  * nvmet_set_disc_traddr - set a correct discovery log entry traddr
126  *
127  * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
128  * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
129  * must not contain that "any" IP address. If the transport implements
130  * .disc_traddr, use it. this callback will set the discovery traddr
131  * from the req->port address in case the port in question listens
132  * "any" IP address.
133  */
134 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
135                 char *traddr)
136 {
137         if (req->ops->disc_traddr)
138                 req->ops->disc_traddr(req, port, traddr);
139         else
140                 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
141 }
142
143 static size_t discovery_log_entries(struct nvmet_req *req)
144 {
145         struct nvmet_ctrl *ctrl = req->sq->ctrl;
146         struct nvmet_subsys_link *p;
147         struct nvmet_port *r;
148         size_t entries = 0;
149
150         list_for_each_entry(p, &req->port->subsystems, entry) {
151                 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
152                         continue;
153                 entries++;
154         }
155         list_for_each_entry(r, &req->port->referrals, entry)
156                 entries++;
157         return entries;
158 }
159
160 static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
161 {
162         const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
163         struct nvmet_ctrl *ctrl = req->sq->ctrl;
164         struct nvmf_disc_rsp_page_hdr *hdr;
165         u64 offset = nvmet_get_log_page_offset(req->cmd);
166         size_t data_len = nvmet_get_log_page_len(req->cmd);
167         size_t alloc_len;
168         struct nvmet_subsys_link *p;
169         struct nvmet_port *r;
170         u32 numrec = 0;
171         u16 status = 0;
172         void *buffer;
173
174         /* Spec requires dword aligned offsets */
175         if (offset & 0x3) {
176                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
177                 goto out;
178         }
179
180         /*
181          * Make sure we're passing at least a buffer of response header size.
182          * If host provided data len is less than the header size, only the
183          * number of bytes requested by host will be sent to host.
184          */
185         down_read(&nvmet_config_sem);
186         alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
187         buffer = kzalloc(alloc_len, GFP_KERNEL);
188         if (!buffer) {
189                 up_read(&nvmet_config_sem);
190                 status = NVME_SC_INTERNAL;
191                 goto out;
192         }
193
194         hdr = buffer;
195         list_for_each_entry(p, &req->port->subsystems, entry) {
196                 char traddr[NVMF_TRADDR_SIZE];
197
198                 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
199                         continue;
200
201                 nvmet_set_disc_traddr(req, req->port, traddr);
202                 nvmet_format_discovery_entry(hdr, req->port,
203                                 p->subsys->subsysnqn, traddr,
204                                 NVME_NQN_NVME, numrec);
205                 numrec++;
206         }
207
208         list_for_each_entry(r, &req->port->referrals, entry) {
209                 nvmet_format_discovery_entry(hdr, r,
210                                 NVME_DISC_SUBSYS_NAME,
211                                 r->disc_addr.traddr,
212                                 NVME_NQN_DISC, numrec);
213                 numrec++;
214         }
215
216         hdr->genctr = cpu_to_le64(nvmet_genctr);
217         hdr->numrec = cpu_to_le64(numrec);
218         hdr->recfmt = cpu_to_le16(0);
219
220         nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
221
222         up_read(&nvmet_config_sem);
223
224         status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
225         kfree(buffer);
226 out:
227         nvmet_req_complete(req, status);
228 }
229
230 static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
231 {
232         struct nvmet_ctrl *ctrl = req->sq->ctrl;
233         struct nvme_id_ctrl *id;
234         u16 status = 0;
235
236         id = kzalloc(sizeof(*id), GFP_KERNEL);
237         if (!id) {
238                 status = NVME_SC_INTERNAL;
239                 goto out;
240         }
241
242         memset(id->fr, ' ', sizeof(id->fr));
243         strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
244
245         /* no limit on data transfer sizes for now */
246         id->mdts = 0;
247         id->cntlid = cpu_to_le16(ctrl->cntlid);
248         id->ver = cpu_to_le32(ctrl->subsys->ver);
249         id->lpa = (1 << 2);
250
251         /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
252         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
253
254         id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
255         if (ctrl->ops->has_keyed_sgls)
256                 id->sgls |= cpu_to_le32(1 << 2);
257         if (req->port->inline_data_size)
258                 id->sgls |= cpu_to_le32(1 << 20);
259
260         id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
261
262         strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
263
264         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
265
266         kfree(id);
267 out:
268         nvmet_req_complete(req, status);
269 }
270
271 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
272 {
273         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
274         u16 stat;
275
276         switch (cdw10 & 0xff) {
277         case NVME_FEAT_KATO:
278                 stat = nvmet_set_feat_kato(req);
279                 break;
280         case NVME_FEAT_ASYNC_EVENT:
281                 stat = nvmet_set_feat_async_event(req,
282                                                   NVMET_DISC_AEN_CFG_OPTIONAL);
283                 break;
284         default:
285                 req->error_loc =
286                         offsetof(struct nvme_common_command, cdw10);
287                 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
288                 break;
289         }
290
291         nvmet_req_complete(req, stat);
292 }
293
294 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
295 {
296         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
297         u16 stat = 0;
298
299         switch (cdw10 & 0xff) {
300         case NVME_FEAT_KATO:
301                 nvmet_get_feat_kato(req);
302                 break;
303         case NVME_FEAT_ASYNC_EVENT:
304                 nvmet_get_feat_async_event(req);
305                 break;
306         default:
307                 req->error_loc =
308                         offsetof(struct nvme_common_command, cdw10);
309                 stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
310                 break;
311         }
312
313         nvmet_req_complete(req, stat);
314 }
315
316 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
317 {
318         struct nvme_command *cmd = req->cmd;
319
320         if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
321                 pr_err("got cmd %d while not ready\n",
322                        cmd->common.opcode);
323                 req->error_loc =
324                         offsetof(struct nvme_common_command, opcode);
325                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
326         }
327
328         switch (cmd->common.opcode) {
329         case nvme_admin_set_features:
330                 req->execute = nvmet_execute_disc_set_features;
331                 req->data_len = 0;
332                 return 0;
333         case nvme_admin_get_features:
334                 req->execute = nvmet_execute_disc_get_features;
335                 req->data_len = 0;
336                 return 0;
337         case nvme_admin_async_event:
338                 req->execute = nvmet_execute_async_event;
339                 req->data_len = 0;
340                 return 0;
341         case nvme_admin_keep_alive:
342                 req->execute = nvmet_execute_keep_alive;
343                 req->data_len = 0;
344                 return 0;
345         case nvme_admin_get_log_page:
346                 req->data_len = nvmet_get_log_page_len(cmd);
347
348                 switch (cmd->get_log_page.lid) {
349                 case NVME_LOG_DISC:
350                         req->execute = nvmet_execute_get_disc_log_page;
351                         return 0;
352                 default:
353                         pr_err("unsupported get_log_page lid %d\n",
354                                cmd->get_log_page.lid);
355                         req->error_loc =
356                                 offsetof(struct nvme_get_log_page_command, lid);
357                         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
358                 }
359         case nvme_admin_identify:
360                 req->data_len = NVME_IDENTIFY_DATA_SIZE;
361                 switch (cmd->identify.cns) {
362                 case NVME_ID_CNS_CTRL:
363                         req->execute =
364                                 nvmet_execute_identify_disc_ctrl;
365                         return 0;
366                 default:
367                         pr_err("unsupported identify cns %d\n",
368                                cmd->identify.cns);
369                         req->error_loc = offsetof(struct nvme_identify, cns);
370                         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
371                 }
372         default:
373                 pr_err("unhandled cmd %d\n", cmd->common.opcode);
374                 req->error_loc = offsetof(struct nvme_common_command, opcode);
375                 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
376         }
377
378 }
379
380 int __init nvmet_init_discovery(void)
381 {
382         nvmet_disc_subsys =
383                 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
384         if (IS_ERR(nvmet_disc_subsys))
385                 return PTR_ERR(nvmet_disc_subsys);
386         return 0;
387 }
388
389 void nvmet_exit_discovery(void)
390 {
391         nvmet_subsys_put(nvmet_disc_subsys);
392 }