2 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/dma-mapping.h>
18 #include <linux/types.h>
19 #include <linux/device.h>
20 #include <linux/kref.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/uuid.h>
25 #include <linux/nvme.h>
26 #include <linux/configfs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/blkdev.h>
30 #define NVMET_ASYNC_EVENTS 4
31 #define NVMET_ERROR_LOG_SLOTS 128
34 * Supported optional AENs:
36 #define NVMET_AEN_CFG_OPTIONAL \
37 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
40 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
42 #define NVMET_AEN_CFG_ALL \
43 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
44 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
45 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
47 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
48 * The 16 bit shift is to set IATTR bit to 1, which means offending
49 * offset starts in the data section of connect()
51 #define IPO_IATTR_CONNECT_DATA(x) \
52 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
53 #define IPO_IATTR_CONNECT_SQE(x) \
54 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
57 struct list_head dev_link;
58 struct percpu_ref ref;
59 struct block_device *bdev;
71 struct nvmet_subsys *subsys;
72 const char *device_path;
74 struct config_group device_group;
75 struct config_group group;
77 struct completion disable_done;
79 struct kmem_cache *bvec_cache;
82 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
84 return container_of(to_config_group(item), struct nvmet_ns, group);
93 struct nvmet_ctrl *ctrl;
94 struct percpu_ref ref;
98 struct completion free_done;
99 struct completion confirm_done;
102 struct nvmet_ana_group {
103 struct config_group group;
104 struct nvmet_port *port;
108 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
110 return container_of(to_config_group(item), struct nvmet_ana_group,
115 * struct nvmet_port - Common structure to keep port
116 * information for the target.
117 * @entry: Entry into referrals or transport list.
118 * @disc_addr: Address information is stored in a format defined
119 * for a discovery log page entry.
120 * @group: ConfigFS group for this element's folder.
121 * @priv: Private data for the transport.
124 struct list_head entry;
125 struct nvmf_disc_rsp_page_entry disc_addr;
126 struct config_group group;
127 struct config_group subsys_group;
128 struct list_head subsystems;
129 struct config_group referrals_group;
130 struct list_head referrals;
131 struct config_group ana_groups_group;
132 struct nvmet_ana_group ana_default_group;
133 enum nvme_ana_state *ana_state;
136 int inline_data_size;
139 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
141 return container_of(to_config_group(item), struct nvmet_port,
145 static inline struct nvmet_port *ana_groups_to_port(
146 struct config_item *item)
148 return container_of(to_config_group(item), struct nvmet_port,
153 struct nvmet_subsys *subsys;
154 struct nvmet_cq **cqs;
155 struct nvmet_sq **sqs;
166 struct nvmet_port *port;
169 unsigned long aen_masked;
170 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
171 unsigned int nr_async_event_cmds;
172 struct list_head async_events;
173 struct work_struct async_event_work;
175 struct list_head subsys_entry;
177 struct delayed_work ka_work;
178 struct work_struct fatal_err_work;
180 const struct nvmet_fabrics_ops *ops;
182 __le32 *changed_ns_list;
185 char subsysnqn[NVMF_NQN_FIELD_LEN];
186 char hostnqn[NVMF_NQN_FIELD_LEN];
189 struct nvmet_subsys {
190 enum nvme_subsys_type type;
195 struct list_head namespaces;
196 unsigned int nr_namespaces;
197 unsigned int max_nsid;
199 struct list_head ctrls;
201 struct list_head hosts;
210 struct config_group group;
212 struct config_group namespaces_group;
213 struct config_group allowed_hosts_group;
216 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
218 return container_of(to_config_group(item), struct nvmet_subsys, group);
221 static inline struct nvmet_subsys *namespaces_to_subsys(
222 struct config_item *item)
224 return container_of(to_config_group(item), struct nvmet_subsys,
229 struct config_group group;
232 static inline struct nvmet_host *to_host(struct config_item *item)
234 return container_of(to_config_group(item), struct nvmet_host, group);
237 static inline char *nvmet_host_name(struct nvmet_host *host)
239 return config_item_name(&host->group.cg_item);
242 struct nvmet_host_link {
243 struct list_head entry;
244 struct nvmet_host *host;
247 struct nvmet_subsys_link {
248 struct list_head entry;
249 struct nvmet_subsys *subsys;
253 struct nvmet_fabrics_ops {
254 struct module *owner;
257 bool has_keyed_sgls : 1;
258 void (*queue_response)(struct nvmet_req *req);
259 int (*add_port)(struct nvmet_port *port);
260 void (*remove_port)(struct nvmet_port *port);
261 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
262 void (*disc_traddr)(struct nvmet_req *req,
263 struct nvmet_port *port, char *traddr);
266 #define NVMET_MAX_INLINE_BIOVEC 8
269 struct nvme_command *cmd;
270 struct nvme_completion *rsp;
274 struct scatterlist *sg;
275 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
278 struct bio inline_bio;
283 struct bio_vec *bvec;
284 struct work_struct work;
288 /* data length as parsed from the command: */
290 /* data length as parsed from the SGL descriptor: */
293 struct nvmet_port *port;
295 void (*execute)(struct nvmet_req *req);
296 const struct nvmet_fabrics_ops *ops;
299 extern struct workqueue_struct *buffered_io_wq;
301 static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
303 req->rsp->status = cpu_to_le16(status << 1);
306 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
308 req->rsp->result.u32 = cpu_to_le32(result);
312 * NVMe command writes actually are DMA reads for us on the target side.
314 static inline enum dma_data_direction
315 nvmet_data_dir(struct nvmet_req *req)
317 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
320 struct nvmet_async_event {
321 struct list_head entry;
327 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
328 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
329 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
330 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
331 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
332 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
334 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
335 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
336 void nvmet_req_uninit(struct nvmet_req *req);
337 void nvmet_req_execute(struct nvmet_req *req);
338 void nvmet_req_complete(struct nvmet_req *req, u16 status);
339 int nvmet_req_alloc_sgl(struct nvmet_req *req);
340 void nvmet_req_free_sgl(struct nvmet_req *req);
342 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
344 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
346 void nvmet_sq_destroy(struct nvmet_sq *sq);
347 int nvmet_sq_init(struct nvmet_sq *sq);
349 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
351 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
352 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
353 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
354 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
355 struct nvmet_req *req, struct nvmet_ctrl **ret);
356 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
357 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
359 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
360 enum nvme_subsys_type type);
361 void nvmet_subsys_put(struct nvmet_subsys *subsys);
362 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
364 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
365 void nvmet_put_namespace(struct nvmet_ns *ns);
366 int nvmet_ns_enable(struct nvmet_ns *ns);
367 void nvmet_ns_disable(struct nvmet_ns *ns);
368 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
369 void nvmet_ns_free(struct nvmet_ns *ns);
371 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
372 struct nvmet_port *port);
373 void nvmet_port_send_ana_event(struct nvmet_port *port);
375 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
376 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
378 int nvmet_enable_port(struct nvmet_port *port);
379 void nvmet_disable_port(struct nvmet_port *port);
381 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
382 void nvmet_referral_disable(struct nvmet_port *port);
384 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
386 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
388 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
390 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
392 #define NVMET_QUEUE_SIZE 1024
393 #define NVMET_NR_QUEUES 128
394 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
397 * Nice round number that makes a list of nsids fit into a page.
398 * Should become tunable at some point in the future.
400 #define NVMET_MAX_NAMESPACES 1024
403 * 0 is not a valid ANA group ID, so we start numbering at 1.
405 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
406 * by default, and is available in an optimized state through all ports.
408 #define NVMET_MAX_ANAGRPS 128
409 #define NVMET_DEFAULT_ANA_GRPID 1
412 #define NVMET_DISC_KATO 120
414 int __init nvmet_init_configfs(void);
415 void __exit nvmet_exit_configfs(void);
417 int __init nvmet_init_discovery(void);
418 void nvmet_exit_discovery(void);
420 extern struct nvmet_subsys *nvmet_disc_subsys;
421 extern u64 nvmet_genctr;
422 extern struct rw_semaphore nvmet_config_sem;
424 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
425 extern u64 nvmet_ana_chgcnt;
426 extern struct rw_semaphore nvmet_ana_sem;
428 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
429 const char *hostnqn);
431 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
432 int nvmet_file_ns_enable(struct nvmet_ns *ns);
433 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
434 void nvmet_file_ns_disable(struct nvmet_ns *ns);
435 u16 nvmet_bdev_flush(struct nvmet_req *req);
436 u16 nvmet_file_flush(struct nvmet_req *req);
437 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
439 static inline u32 nvmet_rw_len(struct nvmet_req *req)
441 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
442 req->ns->blksize_shift;
444 #endif /* _NVMET_H */