1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
23 #define NVMET_ASYNC_EVENTS 4
24 #define NVMET_ERROR_LOG_SLOTS 128
25 #define NVMET_NO_ERROR_LOC ((u16)-1)
28 * Supported optional AENs:
30 #define NVMET_AEN_CFG_OPTIONAL \
31 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
32 #define NVMET_DISC_AEN_CFG_OPTIONAL \
33 (NVME_AEN_CFG_DISC_CHANGE)
36 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
38 #define NVMET_AEN_CFG_ALL \
39 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
40 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
41 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
43 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
44 * The 16 bit shift is to set IATTR bit to 1, which means offending
45 * offset starts in the data section of connect()
47 #define IPO_IATTR_CONNECT_DATA(x) \
48 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
49 #define IPO_IATTR_CONNECT_SQE(x) \
50 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
53 struct list_head dev_link;
54 struct percpu_ref ref;
55 struct block_device *bdev;
67 struct nvmet_subsys *subsys;
68 const char *device_path;
70 struct config_group device_group;
71 struct config_group group;
73 struct completion disable_done;
75 struct kmem_cache *bvec_cache;
78 struct pci_dev *p2p_dev;
81 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
83 return container_of(to_config_group(item), struct nvmet_ns, group);
86 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
88 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
97 struct nvmet_ctrl *ctrl;
98 struct percpu_ref ref;
103 struct completion free_done;
104 struct completion confirm_done;
107 struct nvmet_ana_group {
108 struct config_group group;
109 struct nvmet_port *port;
113 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
115 return container_of(to_config_group(item), struct nvmet_ana_group,
120 * struct nvmet_port - Common structure to keep port
121 * information for the target.
122 * @entry: Entry into referrals or transport list.
123 * @disc_addr: Address information is stored in a format defined
124 * for a discovery log page entry.
125 * @group: ConfigFS group for this element's folder.
126 * @priv: Private data for the transport.
129 struct list_head entry;
130 struct nvmf_disc_rsp_page_entry disc_addr;
131 struct config_group group;
132 struct config_group subsys_group;
133 struct list_head subsystems;
134 struct config_group referrals_group;
135 struct list_head referrals;
136 struct list_head global_entry;
137 struct config_group ana_groups_group;
138 struct nvmet_ana_group ana_default_group;
139 enum nvme_ana_state *ana_state;
142 int inline_data_size;
143 const struct nvmet_fabrics_ops *tr_ops;
146 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
148 return container_of(to_config_group(item), struct nvmet_port,
152 static inline struct nvmet_port *ana_groups_to_port(
153 struct config_item *item)
155 return container_of(to_config_group(item), struct nvmet_port,
160 struct nvmet_subsys *subsys;
161 struct nvmet_cq **cqs;
162 struct nvmet_sq **sqs;
175 struct nvmet_port *port;
178 unsigned long aen_masked;
179 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
180 unsigned int nr_async_event_cmds;
181 struct list_head async_events;
182 struct work_struct async_event_work;
184 struct list_head subsys_entry;
186 struct delayed_work ka_work;
187 struct work_struct fatal_err_work;
189 const struct nvmet_fabrics_ops *ops;
191 __le32 *changed_ns_list;
194 char subsysnqn[NVMF_NQN_FIELD_LEN];
195 char hostnqn[NVMF_NQN_FIELD_LEN];
197 struct device *p2p_client;
198 struct radix_tree_root p2p_ns_map;
200 spinlock_t error_lock;
202 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
205 struct nvmet_subsys {
206 enum nvme_subsys_type type;
211 struct list_head namespaces;
212 unsigned int nr_namespaces;
213 unsigned int max_nsid;
215 struct list_head ctrls;
217 struct list_head hosts;
226 struct config_group group;
228 struct config_group namespaces_group;
229 struct config_group allowed_hosts_group;
232 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
234 return container_of(to_config_group(item), struct nvmet_subsys, group);
237 static inline struct nvmet_subsys *namespaces_to_subsys(
238 struct config_item *item)
240 return container_of(to_config_group(item), struct nvmet_subsys,
245 struct config_group group;
248 static inline struct nvmet_host *to_host(struct config_item *item)
250 return container_of(to_config_group(item), struct nvmet_host, group);
253 static inline char *nvmet_host_name(struct nvmet_host *host)
255 return config_item_name(&host->group.cg_item);
258 struct nvmet_host_link {
259 struct list_head entry;
260 struct nvmet_host *host;
263 struct nvmet_subsys_link {
264 struct list_head entry;
265 struct nvmet_subsys *subsys;
269 struct nvmet_fabrics_ops {
270 struct module *owner;
273 bool has_keyed_sgls : 1;
274 void (*queue_response)(struct nvmet_req *req);
275 int (*add_port)(struct nvmet_port *port);
276 void (*remove_port)(struct nvmet_port *port);
277 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
278 void (*disc_traddr)(struct nvmet_req *req,
279 struct nvmet_port *port, char *traddr);
280 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
281 void (*discovery_chg)(struct nvmet_port *port);
284 #define NVMET_MAX_INLINE_BIOVEC 8
285 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
288 struct nvme_command *cmd;
289 struct nvme_completion *cqe;
293 struct scatterlist *sg;
294 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
297 struct bio inline_bio;
302 struct bio_vec *bvec;
303 struct work_struct work;
307 /* data length as parsed from the command: */
309 /* data length as parsed from the SGL descriptor: */
312 struct nvmet_port *port;
314 void (*execute)(struct nvmet_req *req);
315 const struct nvmet_fabrics_ops *ops;
317 struct pci_dev *p2p_dev;
318 struct device *p2p_client;
323 extern struct workqueue_struct *buffered_io_wq;
325 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
327 req->cqe->result.u32 = cpu_to_le32(result);
331 * NVMe command writes actually are DMA reads for us on the target side.
333 static inline enum dma_data_direction
334 nvmet_data_dir(struct nvmet_req *req)
336 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
339 struct nvmet_async_event {
340 struct list_head entry;
346 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
348 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
351 clear_bit(bn, &req->sq->ctrl->aen_masked);
354 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
356 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
358 return test_and_set_bit(bn, &ctrl->aen_masked);
361 void nvmet_get_feat_kato(struct nvmet_req *req);
362 void nvmet_get_feat_async_event(struct nvmet_req *req);
363 u16 nvmet_set_feat_kato(struct nvmet_req *req);
364 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
365 void nvmet_execute_async_event(struct nvmet_req *req);
367 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
368 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
369 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
370 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
371 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
372 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
373 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
375 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
376 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
377 void nvmet_req_uninit(struct nvmet_req *req);
378 void nvmet_req_execute(struct nvmet_req *req);
379 void nvmet_req_complete(struct nvmet_req *req, u16 status);
380 int nvmet_req_alloc_sgl(struct nvmet_req *req);
381 void nvmet_req_free_sgl(struct nvmet_req *req);
383 void nvmet_execute_keep_alive(struct nvmet_req *req);
385 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
387 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
389 void nvmet_sq_destroy(struct nvmet_sq *sq);
390 int nvmet_sq_init(struct nvmet_sq *sq);
392 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
394 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
395 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
396 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
397 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
398 struct nvmet_req *req, struct nvmet_ctrl **ret);
399 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
400 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
402 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
403 enum nvme_subsys_type type);
404 void nvmet_subsys_put(struct nvmet_subsys *subsys);
405 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
407 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
408 void nvmet_put_namespace(struct nvmet_ns *ns);
409 int nvmet_ns_enable(struct nvmet_ns *ns);
410 void nvmet_ns_disable(struct nvmet_ns *ns);
411 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
412 void nvmet_ns_free(struct nvmet_ns *ns);
414 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
415 struct nvmet_port *port);
416 void nvmet_port_send_ana_event(struct nvmet_port *port);
418 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
419 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
421 int nvmet_enable_port(struct nvmet_port *port);
422 void nvmet_disable_port(struct nvmet_port *port);
424 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
425 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
427 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
429 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
431 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
433 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
434 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
436 extern struct list_head *nvmet_ports;
437 void nvmet_port_disc_changed(struct nvmet_port *port,
438 struct nvmet_subsys *subsys);
439 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
440 struct nvmet_host *host);
441 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
442 u8 event_info, u8 log_page);
444 #define NVMET_QUEUE_SIZE 1024
445 #define NVMET_NR_QUEUES 128
446 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
449 * Nice round number that makes a list of nsids fit into a page.
450 * Should become tunable at some point in the future.
452 #define NVMET_MAX_NAMESPACES 1024
455 * 0 is not a valid ANA group ID, so we start numbering at 1.
457 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
458 * by default, and is available in an optimized state through all ports.
460 #define NVMET_MAX_ANAGRPS 128
461 #define NVMET_DEFAULT_ANA_GRPID 1
464 #define NVMET_DISC_KATO_MS 120000
466 int __init nvmet_init_configfs(void);
467 void __exit nvmet_exit_configfs(void);
469 int __init nvmet_init_discovery(void);
470 void nvmet_exit_discovery(void);
472 extern struct nvmet_subsys *nvmet_disc_subsys;
473 extern struct rw_semaphore nvmet_config_sem;
475 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
476 extern u64 nvmet_ana_chgcnt;
477 extern struct rw_semaphore nvmet_ana_sem;
479 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
481 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
482 int nvmet_file_ns_enable(struct nvmet_ns *ns);
483 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
484 void nvmet_file_ns_disable(struct nvmet_ns *ns);
485 u16 nvmet_bdev_flush(struct nvmet_req *req);
486 u16 nvmet_file_flush(struct nvmet_req *req);
487 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
489 static inline u32 nvmet_rw_len(struct nvmet_req *req)
491 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
492 req->ns->blksize_shift;
495 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
497 /* Convert a 32-bit number to a 16-bit 0's based number */
498 static inline __le16 to0based(u32 a)
500 return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
503 #endif /* _NVMET_H */