]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/nvme/host/core.c
52b453e4ae9c4666c2d1fb7d85a87fffea7e1905
[linux.git] / drivers / nvme / host / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/hdreg.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/backing-dev.h>
15 #include <linux/list_sort.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
18 #include <linux/pr.h>
19 #include <linux/ptrace.h>
20 #include <linux/nvme_ioctl.h>
21 #include <linux/t10-pi.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
24
25 #include "nvme.h"
26 #include "fabrics.h"
27
28 #define CREATE_TRACE_POINTS
29 #include "trace.h"
30
31 #define NVME_MINORS             (1U << MINORBITS)
32
33 unsigned int admin_timeout = 60;
34 module_param(admin_timeout, uint, 0644);
35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout);
37
38 unsigned int nvme_io_timeout = 30;
39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout);
42
43 static unsigned char shutdown_timeout = 5;
44 module_param(shutdown_timeout, byte, 0644);
45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
46
47 static u8 nvme_max_retries = 5;
48 module_param_named(max_retries, nvme_max_retries, byte, 0644);
49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50
51 static unsigned long default_ps_max_latency_us = 100000;
52 module_param(default_ps_max_latency_us, ulong, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us,
54                  "max power saving latency for new devices; use PM QOS to change per device");
55
56 static bool force_apst;
57 module_param(force_apst, bool, 0644);
58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
59
60 static bool streams;
61 module_param(streams, bool, 0644);
62 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
63
64 /*
65  * nvme_wq - hosts nvme related works that are not reset or delete
66  * nvme_reset_wq - hosts nvme reset works
67  * nvme_delete_wq - hosts nvme delete works
68  *
69  * nvme_wq will host works such are scan, aen handling, fw activation,
70  * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
71  * runs reset works which also flush works hosted on nvme_wq for
72  * serialization purposes. nvme_delete_wq host controller deletion
73  * works which flush reset works for serialization.
74  */
75 struct workqueue_struct *nvme_wq;
76 EXPORT_SYMBOL_GPL(nvme_wq);
77
78 struct workqueue_struct *nvme_reset_wq;
79 EXPORT_SYMBOL_GPL(nvme_reset_wq);
80
81 struct workqueue_struct *nvme_delete_wq;
82 EXPORT_SYMBOL_GPL(nvme_delete_wq);
83
84 static DEFINE_IDA(nvme_subsystems_ida);
85 static LIST_HEAD(nvme_subsystems);
86 static DEFINE_MUTEX(nvme_subsystems_lock);
87
88 static DEFINE_IDA(nvme_instance_ida);
89 static dev_t nvme_chr_devt;
90 static struct class *nvme_class;
91 static struct class *nvme_subsys_class;
92
93 static int nvme_revalidate_disk(struct gendisk *disk);
94 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
95 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
96                                            unsigned nsid);
97
98 static void nvme_set_queue_dying(struct nvme_ns *ns)
99 {
100         /*
101          * Revalidating a dead namespace sets capacity to 0. This will end
102          * buffered writers dirtying pages that can't be synced.
103          */
104         if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
105                 return;
106         revalidate_disk(ns->disk);
107         blk_set_queue_dying(ns->queue);
108         /* Forcibly unquiesce queues to avoid blocking dispatch */
109         blk_mq_unquiesce_queue(ns->queue);
110 }
111
112 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
113 {
114         /*
115          * Only new queue scan work when admin and IO queues are both alive
116          */
117         if (ctrl->state == NVME_CTRL_LIVE)
118                 queue_work(nvme_wq, &ctrl->scan_work);
119 }
120
121 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
122 {
123         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
124                 return -EBUSY;
125         if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
126                 return -EBUSY;
127         return 0;
128 }
129 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
130
131 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
132 {
133         int ret;
134
135         ret = nvme_reset_ctrl(ctrl);
136         if (!ret) {
137                 flush_work(&ctrl->reset_work);
138                 if (ctrl->state != NVME_CTRL_LIVE &&
139                     ctrl->state != NVME_CTRL_ADMIN_ONLY)
140                         ret = -ENETRESET;
141         }
142
143         return ret;
144 }
145 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
146
147 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
148 {
149         dev_info(ctrl->device,
150                  "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
151
152         flush_work(&ctrl->reset_work);
153         nvme_stop_ctrl(ctrl);
154         nvme_remove_namespaces(ctrl);
155         ctrl->ops->delete_ctrl(ctrl);
156         nvme_uninit_ctrl(ctrl);
157         nvme_put_ctrl(ctrl);
158 }
159
160 static void nvme_delete_ctrl_work(struct work_struct *work)
161 {
162         struct nvme_ctrl *ctrl =
163                 container_of(work, struct nvme_ctrl, delete_work);
164
165         nvme_do_delete_ctrl(ctrl);
166 }
167
168 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
169 {
170         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
171                 return -EBUSY;
172         if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
173                 return -EBUSY;
174         return 0;
175 }
176 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
177
178 static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
179 {
180         int ret = 0;
181
182         /*
183          * Keep a reference until nvme_do_delete_ctrl() complete,
184          * since ->delete_ctrl can free the controller.
185          */
186         nvme_get_ctrl(ctrl);
187         if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
188                 ret = -EBUSY;
189         if (!ret)
190                 nvme_do_delete_ctrl(ctrl);
191         nvme_put_ctrl(ctrl);
192         return ret;
193 }
194
195 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
196 {
197         return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
198 }
199
200 static blk_status_t nvme_error_status(u16 status)
201 {
202         switch (status & 0x7ff) {
203         case NVME_SC_SUCCESS:
204                 return BLK_STS_OK;
205         case NVME_SC_CAP_EXCEEDED:
206                 return BLK_STS_NOSPC;
207         case NVME_SC_LBA_RANGE:
208                 return BLK_STS_TARGET;
209         case NVME_SC_BAD_ATTRIBUTES:
210         case NVME_SC_ONCS_NOT_SUPPORTED:
211         case NVME_SC_INVALID_OPCODE:
212         case NVME_SC_INVALID_FIELD:
213         case NVME_SC_INVALID_NS:
214                 return BLK_STS_NOTSUPP;
215         case NVME_SC_WRITE_FAULT:
216         case NVME_SC_READ_ERROR:
217         case NVME_SC_UNWRITTEN_BLOCK:
218         case NVME_SC_ACCESS_DENIED:
219         case NVME_SC_READ_ONLY:
220         case NVME_SC_COMPARE_FAILED:
221                 return BLK_STS_MEDIUM;
222         case NVME_SC_GUARD_CHECK:
223         case NVME_SC_APPTAG_CHECK:
224         case NVME_SC_REFTAG_CHECK:
225         case NVME_SC_INVALID_PI:
226                 return BLK_STS_PROTECTION;
227         case NVME_SC_RESERVATION_CONFLICT:
228                 return BLK_STS_NEXUS;
229         case NVME_SC_HOST_PATH_ERROR:
230                 return BLK_STS_TRANSPORT;
231         default:
232                 return BLK_STS_IOERR;
233         }
234 }
235
236 static inline bool nvme_req_needs_retry(struct request *req)
237 {
238         if (blk_noretry_request(req))
239                 return false;
240         if (nvme_req(req)->status & NVME_SC_DNR)
241                 return false;
242         if (nvme_req(req)->retries >= nvme_max_retries)
243                 return false;
244         return true;
245 }
246
247 static void nvme_retry_req(struct request *req)
248 {
249         struct nvme_ns *ns = req->q->queuedata;
250         unsigned long delay = 0;
251         u16 crd;
252
253         /* The mask and shift result must be <= 3 */
254         crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
255         if (ns && crd)
256                 delay = ns->ctrl->crdt[crd - 1] * 100;
257
258         nvme_req(req)->retries++;
259         blk_mq_requeue_request(req, false);
260         blk_mq_delay_kick_requeue_list(req->q, delay);
261 }
262
263 void nvme_complete_rq(struct request *req)
264 {
265         blk_status_t status = nvme_error_status(nvme_req(req)->status);
266
267         trace_nvme_complete_rq(req);
268
269         if (nvme_req(req)->ctrl->kas)
270                 nvme_req(req)->ctrl->comp_seen = true;
271
272         if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
273                 if ((req->cmd_flags & REQ_NVME_MPATH) &&
274                     blk_path_error(status)) {
275                         nvme_failover_req(req);
276                         return;
277                 }
278
279                 if (!blk_queue_dying(req->q)) {
280                         nvme_retry_req(req);
281                         return;
282                 }
283         }
284
285         nvme_trace_bio_complete(req, status);
286         blk_mq_end_request(req, status);
287 }
288 EXPORT_SYMBOL_GPL(nvme_complete_rq);
289
290 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
291 {
292         dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
293                                 "Cancelling I/O %d", req->tag);
294
295         /* don't abort one completed request */
296         if (blk_mq_request_completed(req))
297                 return true;
298
299         nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
300         blk_mq_complete_request(req);
301         return true;
302 }
303 EXPORT_SYMBOL_GPL(nvme_cancel_request);
304
305 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
306                 enum nvme_ctrl_state new_state)
307 {
308         enum nvme_ctrl_state old_state;
309         unsigned long flags;
310         bool changed = false;
311
312         spin_lock_irqsave(&ctrl->lock, flags);
313
314         old_state = ctrl->state;
315         switch (new_state) {
316         case NVME_CTRL_ADMIN_ONLY:
317                 switch (old_state) {
318                 case NVME_CTRL_CONNECTING:
319                         changed = true;
320                         /* FALLTHRU */
321                 default:
322                         break;
323                 }
324                 break;
325         case NVME_CTRL_LIVE:
326                 switch (old_state) {
327                 case NVME_CTRL_NEW:
328                 case NVME_CTRL_RESETTING:
329                 case NVME_CTRL_CONNECTING:
330                         changed = true;
331                         /* FALLTHRU */
332                 default:
333                         break;
334                 }
335                 break;
336         case NVME_CTRL_RESETTING:
337                 switch (old_state) {
338                 case NVME_CTRL_NEW:
339                 case NVME_CTRL_LIVE:
340                 case NVME_CTRL_ADMIN_ONLY:
341                         changed = true;
342                         /* FALLTHRU */
343                 default:
344                         break;
345                 }
346                 break;
347         case NVME_CTRL_CONNECTING:
348                 switch (old_state) {
349                 case NVME_CTRL_NEW:
350                 case NVME_CTRL_RESETTING:
351                         changed = true;
352                         /* FALLTHRU */
353                 default:
354                         break;
355                 }
356                 break;
357         case NVME_CTRL_DELETING:
358                 switch (old_state) {
359                 case NVME_CTRL_LIVE:
360                 case NVME_CTRL_ADMIN_ONLY:
361                 case NVME_CTRL_RESETTING:
362                 case NVME_CTRL_CONNECTING:
363                         changed = true;
364                         /* FALLTHRU */
365                 default:
366                         break;
367                 }
368                 break;
369         case NVME_CTRL_DEAD:
370                 switch (old_state) {
371                 case NVME_CTRL_DELETING:
372                         changed = true;
373                         /* FALLTHRU */
374                 default:
375                         break;
376                 }
377                 break;
378         default:
379                 break;
380         }
381
382         if (changed)
383                 ctrl->state = new_state;
384
385         spin_unlock_irqrestore(&ctrl->lock, flags);
386         if (changed && ctrl->state == NVME_CTRL_LIVE)
387                 nvme_kick_requeue_lists(ctrl);
388         return changed;
389 }
390 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
391
392 static void nvme_free_ns_head(struct kref *ref)
393 {
394         struct nvme_ns_head *head =
395                 container_of(ref, struct nvme_ns_head, ref);
396
397         nvme_mpath_remove_disk(head);
398         ida_simple_remove(&head->subsys->ns_ida, head->instance);
399         list_del_init(&head->entry);
400         cleanup_srcu_struct(&head->srcu);
401         nvme_put_subsystem(head->subsys);
402         kfree(head);
403 }
404
405 static void nvme_put_ns_head(struct nvme_ns_head *head)
406 {
407         kref_put(&head->ref, nvme_free_ns_head);
408 }
409
410 static void nvme_free_ns(struct kref *kref)
411 {
412         struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
413
414         if (ns->ndev)
415                 nvme_nvm_unregister(ns);
416
417         put_disk(ns->disk);
418         nvme_put_ns_head(ns->head);
419         nvme_put_ctrl(ns->ctrl);
420         kfree(ns);
421 }
422
423 static void nvme_put_ns(struct nvme_ns *ns)
424 {
425         kref_put(&ns->kref, nvme_free_ns);
426 }
427
428 static inline void nvme_clear_nvme_request(struct request *req)
429 {
430         if (!(req->rq_flags & RQF_DONTPREP)) {
431                 nvme_req(req)->retries = 0;
432                 nvme_req(req)->flags = 0;
433                 req->rq_flags |= RQF_DONTPREP;
434         }
435 }
436
437 struct request *nvme_alloc_request(struct request_queue *q,
438                 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
439 {
440         unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
441         struct request *req;
442
443         if (qid == NVME_QID_ANY) {
444                 req = blk_mq_alloc_request(q, op, flags);
445         } else {
446                 req = blk_mq_alloc_request_hctx(q, op, flags,
447                                 qid ? qid - 1 : 0);
448         }
449         if (IS_ERR(req))
450                 return req;
451
452         req->cmd_flags |= REQ_FAILFAST_DRIVER;
453         nvme_clear_nvme_request(req);
454         nvme_req(req)->cmd = cmd;
455
456         return req;
457 }
458 EXPORT_SYMBOL_GPL(nvme_alloc_request);
459
460 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
461 {
462         struct nvme_command c;
463
464         memset(&c, 0, sizeof(c));
465
466         c.directive.opcode = nvme_admin_directive_send;
467         c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
468         c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
469         c.directive.dtype = NVME_DIR_IDENTIFY;
470         c.directive.tdtype = NVME_DIR_STREAMS;
471         c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
472
473         return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
474 }
475
476 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
477 {
478         return nvme_toggle_streams(ctrl, false);
479 }
480
481 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
482 {
483         return nvme_toggle_streams(ctrl, true);
484 }
485
486 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
487                                   struct streams_directive_params *s, u32 nsid)
488 {
489         struct nvme_command c;
490
491         memset(&c, 0, sizeof(c));
492         memset(s, 0, sizeof(*s));
493
494         c.directive.opcode = nvme_admin_directive_recv;
495         c.directive.nsid = cpu_to_le32(nsid);
496         c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
497         c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
498         c.directive.dtype = NVME_DIR_STREAMS;
499
500         return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
501 }
502
503 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
504 {
505         struct streams_directive_params s;
506         int ret;
507
508         if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
509                 return 0;
510         if (!streams)
511                 return 0;
512
513         ret = nvme_enable_streams(ctrl);
514         if (ret)
515                 return ret;
516
517         ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
518         if (ret)
519                 return ret;
520
521         ctrl->nssa = le16_to_cpu(s.nssa);
522         if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
523                 dev_info(ctrl->device, "too few streams (%u) available\n",
524                                         ctrl->nssa);
525                 nvme_disable_streams(ctrl);
526                 return 0;
527         }
528
529         ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
530         dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
531         return 0;
532 }
533
534 /*
535  * Check if 'req' has a write hint associated with it. If it does, assign
536  * a valid namespace stream to the write.
537  */
538 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
539                                      struct request *req, u16 *control,
540                                      u32 *dsmgmt)
541 {
542         enum rw_hint streamid = req->write_hint;
543
544         if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
545                 streamid = 0;
546         else {
547                 streamid--;
548                 if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
549                         return;
550
551                 *control |= NVME_RW_DTYPE_STREAMS;
552                 *dsmgmt |= streamid << 16;
553         }
554
555         if (streamid < ARRAY_SIZE(req->q->write_hints))
556                 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
557 }
558
559 static inline void nvme_setup_flush(struct nvme_ns *ns,
560                 struct nvme_command *cmnd)
561 {
562         cmnd->common.opcode = nvme_cmd_flush;
563         cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
564 }
565
566 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
567                 struct nvme_command *cmnd)
568 {
569         unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
570         struct nvme_dsm_range *range;
571         struct bio *bio;
572
573         range = kmalloc_array(segments, sizeof(*range),
574                                 GFP_ATOMIC | __GFP_NOWARN);
575         if (!range) {
576                 /*
577                  * If we fail allocation our range, fallback to the controller
578                  * discard page. If that's also busy, it's safe to return
579                  * busy, as we know we can make progress once that's freed.
580                  */
581                 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
582                         return BLK_STS_RESOURCE;
583
584                 range = page_address(ns->ctrl->discard_page);
585         }
586
587         __rq_for_each_bio(bio, req) {
588                 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
589                 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
590
591                 if (n < segments) {
592                         range[n].cattr = cpu_to_le32(0);
593                         range[n].nlb = cpu_to_le32(nlb);
594                         range[n].slba = cpu_to_le64(slba);
595                 }
596                 n++;
597         }
598
599         if (WARN_ON_ONCE(n != segments)) {
600                 if (virt_to_page(range) == ns->ctrl->discard_page)
601                         clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
602                 else
603                         kfree(range);
604                 return BLK_STS_IOERR;
605         }
606
607         cmnd->dsm.opcode = nvme_cmd_dsm;
608         cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
609         cmnd->dsm.nr = cpu_to_le32(segments - 1);
610         cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
611
612         req->special_vec.bv_page = virt_to_page(range);
613         req->special_vec.bv_offset = offset_in_page(range);
614         req->special_vec.bv_len = sizeof(*range) * segments;
615         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
616
617         return BLK_STS_OK;
618 }
619
620 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
621                 struct request *req, struct nvme_command *cmnd)
622 {
623         if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
624                 return nvme_setup_discard(ns, req, cmnd);
625
626         cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
627         cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
628         cmnd->write_zeroes.slba =
629                 cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
630         cmnd->write_zeroes.length =
631                 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
632         cmnd->write_zeroes.control = 0;
633         return BLK_STS_OK;
634 }
635
636 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
637                 struct request *req, struct nvme_command *cmnd)
638 {
639         struct nvme_ctrl *ctrl = ns->ctrl;
640         u16 control = 0;
641         u32 dsmgmt = 0;
642
643         if (req->cmd_flags & REQ_FUA)
644                 control |= NVME_RW_FUA;
645         if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
646                 control |= NVME_RW_LR;
647
648         if (req->cmd_flags & REQ_RAHEAD)
649                 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
650
651         cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
652         cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
653         cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
654         cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
655
656         if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
657                 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
658
659         if (ns->ms) {
660                 /*
661                  * If formated with metadata, the block layer always provides a
662                  * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
663                  * we enable the PRACT bit for protection information or set the
664                  * namespace capacity to zero to prevent any I/O.
665                  */
666                 if (!blk_integrity_rq(req)) {
667                         if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
668                                 return BLK_STS_NOTSUPP;
669                         control |= NVME_RW_PRINFO_PRACT;
670                 } else if (req_op(req) == REQ_OP_WRITE) {
671                         t10_pi_prepare(req, ns->pi_type);
672                 }
673
674                 switch (ns->pi_type) {
675                 case NVME_NS_DPS_PI_TYPE3:
676                         control |= NVME_RW_PRINFO_PRCHK_GUARD;
677                         break;
678                 case NVME_NS_DPS_PI_TYPE1:
679                 case NVME_NS_DPS_PI_TYPE2:
680                         control |= NVME_RW_PRINFO_PRCHK_GUARD |
681                                         NVME_RW_PRINFO_PRCHK_REF;
682                         cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
683                         break;
684                 }
685         }
686
687         cmnd->rw.control = cpu_to_le16(control);
688         cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
689         return 0;
690 }
691
692 void nvme_cleanup_cmd(struct request *req)
693 {
694         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
695             nvme_req(req)->status == 0) {
696                 struct nvme_ns *ns = req->rq_disk->private_data;
697
698                 t10_pi_complete(req, ns->pi_type,
699                                 blk_rq_bytes(req) >> ns->lba_shift);
700         }
701         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
702                 struct nvme_ns *ns = req->rq_disk->private_data;
703                 struct page *page = req->special_vec.bv_page;
704
705                 if (page == ns->ctrl->discard_page)
706                         clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
707                 else
708                         kfree(page_address(page) + req->special_vec.bv_offset);
709         }
710 }
711 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
712
713 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
714                 struct nvme_command *cmd)
715 {
716         blk_status_t ret = BLK_STS_OK;
717
718         nvme_clear_nvme_request(req);
719
720         memset(cmd, 0, sizeof(*cmd));
721         switch (req_op(req)) {
722         case REQ_OP_DRV_IN:
723         case REQ_OP_DRV_OUT:
724                 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
725                 break;
726         case REQ_OP_FLUSH:
727                 nvme_setup_flush(ns, cmd);
728                 break;
729         case REQ_OP_WRITE_ZEROES:
730                 ret = nvme_setup_write_zeroes(ns, req, cmd);
731                 break;
732         case REQ_OP_DISCARD:
733                 ret = nvme_setup_discard(ns, req, cmd);
734                 break;
735         case REQ_OP_READ:
736         case REQ_OP_WRITE:
737                 ret = nvme_setup_rw(ns, req, cmd);
738                 break;
739         default:
740                 WARN_ON_ONCE(1);
741                 return BLK_STS_IOERR;
742         }
743
744         cmd->common.command_id = req->tag;
745         trace_nvme_setup_cmd(req, cmd);
746         return ret;
747 }
748 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
749
750 static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
751 {
752         struct completion *waiting = rq->end_io_data;
753
754         rq->end_io_data = NULL;
755         complete(waiting);
756 }
757
758 static void nvme_execute_rq_polled(struct request_queue *q,
759                 struct gendisk *bd_disk, struct request *rq, int at_head)
760 {
761         DECLARE_COMPLETION_ONSTACK(wait);
762
763         WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
764
765         rq->cmd_flags |= REQ_HIPRI;
766         rq->end_io_data = &wait;
767         blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
768
769         while (!completion_done(&wait)) {
770                 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
771                 cond_resched();
772         }
773 }
774
775 /*
776  * Returns 0 on success.  If the result is negative, it's a Linux error code;
777  * if the result is positive, it's an NVM Express status code
778  */
779 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
780                 union nvme_result *result, void *buffer, unsigned bufflen,
781                 unsigned timeout, int qid, int at_head,
782                 blk_mq_req_flags_t flags, bool poll)
783 {
784         struct request *req;
785         int ret;
786
787         req = nvme_alloc_request(q, cmd, flags, qid);
788         if (IS_ERR(req))
789                 return PTR_ERR(req);
790
791         req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
792
793         if (buffer && bufflen) {
794                 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
795                 if (ret)
796                         goto out;
797         }
798
799         if (poll)
800                 nvme_execute_rq_polled(req->q, NULL, req, at_head);
801         else
802                 blk_execute_rq(req->q, NULL, req, at_head);
803         if (result)
804                 *result = nvme_req(req)->result;
805         if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
806                 ret = -EINTR;
807         else
808                 ret = nvme_req(req)->status;
809  out:
810         blk_mq_free_request(req);
811         return ret;
812 }
813 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
814
815 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
816                 void *buffer, unsigned bufflen)
817 {
818         return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
819                         NVME_QID_ANY, 0, 0, false);
820 }
821 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
822
823 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
824                 unsigned len, u32 seed, bool write)
825 {
826         struct bio_integrity_payload *bip;
827         int ret = -ENOMEM;
828         void *buf;
829
830         buf = kmalloc(len, GFP_KERNEL);
831         if (!buf)
832                 goto out;
833
834         ret = -EFAULT;
835         if (write && copy_from_user(buf, ubuf, len))
836                 goto out_free_meta;
837
838         bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
839         if (IS_ERR(bip)) {
840                 ret = PTR_ERR(bip);
841                 goto out_free_meta;
842         }
843
844         bip->bip_iter.bi_size = len;
845         bip->bip_iter.bi_sector = seed;
846         ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
847                         offset_in_page(buf));
848         if (ret == len)
849                 return buf;
850         ret = -ENOMEM;
851 out_free_meta:
852         kfree(buf);
853 out:
854         return ERR_PTR(ret);
855 }
856
857 static int nvme_submit_user_cmd(struct request_queue *q,
858                 struct nvme_command *cmd, void __user *ubuffer,
859                 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
860                 u32 meta_seed, u32 *result, unsigned timeout)
861 {
862         bool write = nvme_is_write(cmd);
863         struct nvme_ns *ns = q->queuedata;
864         struct gendisk *disk = ns ? ns->disk : NULL;
865         struct request *req;
866         struct bio *bio = NULL;
867         void *meta = NULL;
868         int ret;
869
870         req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
871         if (IS_ERR(req))
872                 return PTR_ERR(req);
873
874         req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
875         nvme_req(req)->flags |= NVME_REQ_USERCMD;
876
877         if (ubuffer && bufflen) {
878                 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
879                                 GFP_KERNEL);
880                 if (ret)
881                         goto out;
882                 bio = req->bio;
883                 bio->bi_disk = disk;
884                 if (disk && meta_buffer && meta_len) {
885                         meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
886                                         meta_seed, write);
887                         if (IS_ERR(meta)) {
888                                 ret = PTR_ERR(meta);
889                                 goto out_unmap;
890                         }
891                         req->cmd_flags |= REQ_INTEGRITY;
892                 }
893         }
894
895         blk_execute_rq(req->q, disk, req, 0);
896         if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
897                 ret = -EINTR;
898         else
899                 ret = nvme_req(req)->status;
900         if (result)
901                 *result = le32_to_cpu(nvme_req(req)->result.u32);
902         if (meta && !ret && !write) {
903                 if (copy_to_user(meta_buffer, meta, meta_len))
904                         ret = -EFAULT;
905         }
906         kfree(meta);
907  out_unmap:
908         if (bio)
909                 blk_rq_unmap_user(bio);
910  out:
911         blk_mq_free_request(req);
912         return ret;
913 }
914
915 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
916 {
917         struct nvme_ctrl *ctrl = rq->end_io_data;
918         unsigned long flags;
919         bool startka = false;
920
921         blk_mq_free_request(rq);
922
923         if (status) {
924                 dev_err(ctrl->device,
925                         "failed nvme_keep_alive_end_io error=%d\n",
926                                 status);
927                 return;
928         }
929
930         ctrl->comp_seen = false;
931         spin_lock_irqsave(&ctrl->lock, flags);
932         if (ctrl->state == NVME_CTRL_LIVE ||
933             ctrl->state == NVME_CTRL_CONNECTING)
934                 startka = true;
935         spin_unlock_irqrestore(&ctrl->lock, flags);
936         if (startka)
937                 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
938 }
939
940 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
941 {
942         struct request *rq;
943
944         rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
945                         NVME_QID_ANY);
946         if (IS_ERR(rq))
947                 return PTR_ERR(rq);
948
949         rq->timeout = ctrl->kato * HZ;
950         rq->end_io_data = ctrl;
951
952         blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
953
954         return 0;
955 }
956
957 static void nvme_keep_alive_work(struct work_struct *work)
958 {
959         struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
960                         struct nvme_ctrl, ka_work);
961         bool comp_seen = ctrl->comp_seen;
962
963         if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
964                 dev_dbg(ctrl->device,
965                         "reschedule traffic based keep-alive timer\n");
966                 ctrl->comp_seen = false;
967                 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
968                 return;
969         }
970
971         if (nvme_keep_alive(ctrl)) {
972                 /* allocation failure, reset the controller */
973                 dev_err(ctrl->device, "keep-alive failed\n");
974                 nvme_reset_ctrl(ctrl);
975                 return;
976         }
977 }
978
979 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
980 {
981         if (unlikely(ctrl->kato == 0))
982                 return;
983
984         schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
985 }
986
987 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
988 {
989         if (unlikely(ctrl->kato == 0))
990                 return;
991
992         cancel_delayed_work_sync(&ctrl->ka_work);
993 }
994 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
995
996 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
997 {
998         struct nvme_command c = { };
999         int error;
1000
1001         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1002         c.identify.opcode = nvme_admin_identify;
1003         c.identify.cns = NVME_ID_CNS_CTRL;
1004
1005         *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1006         if (!*id)
1007                 return -ENOMEM;
1008
1009         error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1010                         sizeof(struct nvme_id_ctrl));
1011         if (error)
1012                 kfree(*id);
1013         return error;
1014 }
1015
1016 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1017                 struct nvme_ns_ids *ids)
1018 {
1019         struct nvme_command c = { };
1020         int status;
1021         void *data;
1022         int pos;
1023         int len;
1024
1025         c.identify.opcode = nvme_admin_identify;
1026         c.identify.nsid = cpu_to_le32(nsid);
1027         c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1028
1029         data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1030         if (!data)
1031                 return -ENOMEM;
1032
1033         status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1034                                       NVME_IDENTIFY_DATA_SIZE);
1035         if (status)
1036                 goto free_data;
1037
1038         for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1039                 struct nvme_ns_id_desc *cur = data + pos;
1040
1041                 if (cur->nidl == 0)
1042                         break;
1043
1044                 switch (cur->nidt) {
1045                 case NVME_NIDT_EUI64:
1046                         if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1047                                 dev_warn(ctrl->device,
1048                                          "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
1049                                          cur->nidl);
1050                                 goto free_data;
1051                         }
1052                         len = NVME_NIDT_EUI64_LEN;
1053                         memcpy(ids->eui64, data + pos + sizeof(*cur), len);
1054                         break;
1055                 case NVME_NIDT_NGUID:
1056                         if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1057                                 dev_warn(ctrl->device,
1058                                          "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
1059                                          cur->nidl);
1060                                 goto free_data;
1061                         }
1062                         len = NVME_NIDT_NGUID_LEN;
1063                         memcpy(ids->nguid, data + pos + sizeof(*cur), len);
1064                         break;
1065                 case NVME_NIDT_UUID:
1066                         if (cur->nidl != NVME_NIDT_UUID_LEN) {
1067                                 dev_warn(ctrl->device,
1068                                          "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
1069                                          cur->nidl);
1070                                 goto free_data;
1071                         }
1072                         len = NVME_NIDT_UUID_LEN;
1073                         uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
1074                         break;
1075                 default:
1076                         /* Skip unknown types */
1077                         len = cur->nidl;
1078                         break;
1079                 }
1080
1081                 len += sizeof(*cur);
1082         }
1083 free_data:
1084         kfree(data);
1085         return status;
1086 }
1087
1088 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1089 {
1090         struct nvme_command c = { };
1091
1092         c.identify.opcode = nvme_admin_identify;
1093         c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1094         c.identify.nsid = cpu_to_le32(nsid);
1095         return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1096                                     NVME_IDENTIFY_DATA_SIZE);
1097 }
1098
1099 static int nvme_identify_ns(struct nvme_ctrl *ctrl,
1100                 unsigned nsid, struct nvme_id_ns **id)
1101 {
1102         struct nvme_command c = { };
1103         int error;
1104
1105         /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1106         c.identify.opcode = nvme_admin_identify;
1107         c.identify.nsid = cpu_to_le32(nsid);
1108         c.identify.cns = NVME_ID_CNS_NS;
1109
1110         *id = kmalloc(sizeof(**id), GFP_KERNEL);
1111         if (!*id)
1112                 return -ENOMEM;
1113
1114         error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1115         if (error) {
1116                 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1117                 kfree(*id);
1118         }
1119
1120         return error;
1121 }
1122
1123 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1124                 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1125 {
1126         struct nvme_command c;
1127         union nvme_result res;
1128         int ret;
1129
1130         memset(&c, 0, sizeof(c));
1131         c.features.opcode = op;
1132         c.features.fid = cpu_to_le32(fid);
1133         c.features.dword11 = cpu_to_le32(dword11);
1134
1135         ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1136                         buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1137         if (ret >= 0 && result)
1138                 *result = le32_to_cpu(res.u32);
1139         return ret;
1140 }
1141
1142 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1143                       unsigned int dword11, void *buffer, size_t buflen,
1144                       u32 *result)
1145 {
1146         return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1147                              buflen, result);
1148 }
1149 EXPORT_SYMBOL_GPL(nvme_set_features);
1150
1151 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1152                       unsigned int dword11, void *buffer, size_t buflen,
1153                       u32 *result)
1154 {
1155         return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1156                              buflen, result);
1157 }
1158 EXPORT_SYMBOL_GPL(nvme_get_features);
1159
1160 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1161 {
1162         u32 q_count = (*count - 1) | ((*count - 1) << 16);
1163         u32 result;
1164         int status, nr_io_queues;
1165
1166         status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1167                         &result);
1168         if (status < 0)
1169                 return status;
1170
1171         /*
1172          * Degraded controllers might return an error when setting the queue
1173          * count.  We still want to be able to bring them online and offer
1174          * access to the admin queue, as that might be only way to fix them up.
1175          */
1176         if (status > 0) {
1177                 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1178                 *count = 0;
1179         } else {
1180                 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1181                 *count = min(*count, nr_io_queues);
1182         }
1183
1184         return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1187
1188 #define NVME_AEN_SUPPORTED \
1189         (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
1190
1191 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1192 {
1193         u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1194         int status;
1195
1196         if (!supported_aens)
1197                 return;
1198
1199         status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1200                         NULL, 0, &result);
1201         if (status)
1202                 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1203                          supported_aens);
1204 }
1205
1206 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1207 {
1208         struct nvme_user_io io;
1209         struct nvme_command c;
1210         unsigned length, meta_len;
1211         void __user *metadata;
1212
1213         if (copy_from_user(&io, uio, sizeof(io)))
1214                 return -EFAULT;
1215         if (io.flags)
1216                 return -EINVAL;
1217
1218         switch (io.opcode) {
1219         case nvme_cmd_write:
1220         case nvme_cmd_read:
1221         case nvme_cmd_compare:
1222                 break;
1223         default:
1224                 return -EINVAL;
1225         }
1226
1227         length = (io.nblocks + 1) << ns->lba_shift;
1228         meta_len = (io.nblocks + 1) * ns->ms;
1229         metadata = (void __user *)(uintptr_t)io.metadata;
1230
1231         if (ns->ext) {
1232                 length += meta_len;
1233                 meta_len = 0;
1234         } else if (meta_len) {
1235                 if ((io.metadata & 3) || !io.metadata)
1236                         return -EINVAL;
1237         }
1238
1239         memset(&c, 0, sizeof(c));
1240         c.rw.opcode = io.opcode;
1241         c.rw.flags = io.flags;
1242         c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1243         c.rw.slba = cpu_to_le64(io.slba);
1244         c.rw.length = cpu_to_le16(io.nblocks);
1245         c.rw.control = cpu_to_le16(io.control);
1246         c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1247         c.rw.reftag = cpu_to_le32(io.reftag);
1248         c.rw.apptag = cpu_to_le16(io.apptag);
1249         c.rw.appmask = cpu_to_le16(io.appmask);
1250
1251         return nvme_submit_user_cmd(ns->queue, &c,
1252                         (void __user *)(uintptr_t)io.addr, length,
1253                         metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1254 }
1255
1256 static u32 nvme_known_admin_effects(u8 opcode)
1257 {
1258         switch (opcode) {
1259         case nvme_admin_format_nvm:
1260                 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1261                                         NVME_CMD_EFFECTS_CSE_MASK;
1262         case nvme_admin_sanitize_nvm:
1263                 return NVME_CMD_EFFECTS_CSE_MASK;
1264         default:
1265                 break;
1266         }
1267         return 0;
1268 }
1269
1270 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1271                                                                 u8 opcode)
1272 {
1273         u32 effects = 0;
1274
1275         if (ns) {
1276                 if (ctrl->effects)
1277                         effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1278                 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1279                         dev_warn(ctrl->device,
1280                                  "IO command:%02x has unhandled effects:%08x\n",
1281                                  opcode, effects);
1282                 return 0;
1283         }
1284
1285         if (ctrl->effects)
1286                 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1287         effects |= nvme_known_admin_effects(opcode);
1288
1289         /*
1290          * For simplicity, IO to all namespaces is quiesced even if the command
1291          * effects say only one namespace is affected.
1292          */
1293         if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1294                 mutex_lock(&ctrl->scan_lock);
1295                 nvme_start_freeze(ctrl);
1296                 nvme_wait_freeze(ctrl);
1297         }
1298         return effects;
1299 }
1300
1301 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1302 {
1303         struct nvme_ns *ns;
1304
1305         down_read(&ctrl->namespaces_rwsem);
1306         list_for_each_entry(ns, &ctrl->namespaces, list)
1307                 if (ns->disk && nvme_revalidate_disk(ns->disk))
1308                         nvme_set_queue_dying(ns);
1309         up_read(&ctrl->namespaces_rwsem);
1310
1311         nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1312 }
1313
1314 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1315 {
1316         /*
1317          * Revalidate LBA changes prior to unfreezing. This is necessary to
1318          * prevent memory corruption if a logical block size was changed by
1319          * this command.
1320          */
1321         if (effects & NVME_CMD_EFFECTS_LBCC)
1322                 nvme_update_formats(ctrl);
1323         if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1324                 nvme_unfreeze(ctrl);
1325                 mutex_unlock(&ctrl->scan_lock);
1326         }
1327         if (effects & NVME_CMD_EFFECTS_CCC)
1328                 nvme_init_identify(ctrl);
1329         if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
1330                 nvme_queue_scan(ctrl);
1331 }
1332
1333 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1334                         struct nvme_passthru_cmd __user *ucmd)
1335 {
1336         struct nvme_passthru_cmd cmd;
1337         struct nvme_command c;
1338         unsigned timeout = 0;
1339         u32 effects;
1340         int status;
1341
1342         if (!capable(CAP_SYS_ADMIN))
1343                 return -EACCES;
1344         if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1345                 return -EFAULT;
1346         if (cmd.flags)
1347                 return -EINVAL;
1348
1349         memset(&c, 0, sizeof(c));
1350         c.common.opcode = cmd.opcode;
1351         c.common.flags = cmd.flags;
1352         c.common.nsid = cpu_to_le32(cmd.nsid);
1353         c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1354         c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1355         c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1356         c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1357         c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1358         c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1359         c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1360         c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1361
1362         if (cmd.timeout_ms)
1363                 timeout = msecs_to_jiffies(cmd.timeout_ms);
1364
1365         effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1366         status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1367                         (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1368                         (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
1369                         0, &cmd.result, timeout);
1370         nvme_passthru_end(ctrl, effects);
1371
1372         if (status >= 0) {
1373                 if (put_user(cmd.result, &ucmd->result))
1374                         return -EFAULT;
1375         }
1376
1377         return status;
1378 }
1379
1380 /*
1381  * Issue ioctl requests on the first available path.  Note that unlike normal
1382  * block layer requests we will not retry failed request on another controller.
1383  */
1384 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1385                 struct nvme_ns_head **head, int *srcu_idx)
1386 {
1387 #ifdef CONFIG_NVME_MULTIPATH
1388         if (disk->fops == &nvme_ns_head_ops) {
1389                 struct nvme_ns *ns;
1390
1391                 *head = disk->private_data;
1392                 *srcu_idx = srcu_read_lock(&(*head)->srcu);
1393                 ns = nvme_find_path(*head);
1394                 if (!ns)
1395                         srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1396                 return ns;
1397         }
1398 #endif
1399         *head = NULL;
1400         *srcu_idx = -1;
1401         return disk->private_data;
1402 }
1403
1404 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1405 {
1406         if (head)
1407                 srcu_read_unlock(&head->srcu, idx);
1408 }
1409
1410 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1411                 unsigned int cmd, unsigned long arg)
1412 {
1413         struct nvme_ns_head *head = NULL;
1414         void __user *argp = (void __user *)arg;
1415         struct nvme_ns *ns;
1416         int srcu_idx, ret;
1417
1418         ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1419         if (unlikely(!ns))
1420                 return -EWOULDBLOCK;
1421
1422         /*
1423          * Handle ioctls that apply to the controller instead of the namespace
1424          * seperately and drop the ns SRCU reference early.  This avoids a
1425          * deadlock when deleting namespaces using the passthrough interface.
1426          */
1427         if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
1428                 struct nvme_ctrl *ctrl = ns->ctrl;
1429
1430                 nvme_get_ctrl(ns->ctrl);
1431                 nvme_put_ns_from_disk(head, srcu_idx);
1432
1433                 if (cmd == NVME_IOCTL_ADMIN_CMD)
1434                         ret = nvme_user_cmd(ctrl, NULL, argp);
1435                 else
1436                         ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1437
1438                 nvme_put_ctrl(ctrl);
1439                 return ret;
1440         }
1441
1442         switch (cmd) {
1443         case NVME_IOCTL_ID:
1444                 force_successful_syscall_return();
1445                 ret = ns->head->ns_id;
1446                 break;
1447         case NVME_IOCTL_IO_CMD:
1448                 ret = nvme_user_cmd(ns->ctrl, ns, argp);
1449                 break;
1450         case NVME_IOCTL_SUBMIT_IO:
1451                 ret = nvme_submit_io(ns, argp);
1452                 break;
1453         default:
1454                 if (ns->ndev)
1455                         ret = nvme_nvm_ioctl(ns, cmd, arg);
1456                 else
1457                         ret = -ENOTTY;
1458         }
1459
1460         nvme_put_ns_from_disk(head, srcu_idx);
1461         return ret;
1462 }
1463
1464 static int nvme_open(struct block_device *bdev, fmode_t mode)
1465 {
1466         struct nvme_ns *ns = bdev->bd_disk->private_data;
1467
1468 #ifdef CONFIG_NVME_MULTIPATH
1469         /* should never be called due to GENHD_FL_HIDDEN */
1470         if (WARN_ON_ONCE(ns->head->disk))
1471                 goto fail;
1472 #endif
1473         if (!kref_get_unless_zero(&ns->kref))
1474                 goto fail;
1475         if (!try_module_get(ns->ctrl->ops->module))
1476                 goto fail_put_ns;
1477
1478         return 0;
1479
1480 fail_put_ns:
1481         nvme_put_ns(ns);
1482 fail:
1483         return -ENXIO;
1484 }
1485
1486 static void nvme_release(struct gendisk *disk, fmode_t mode)
1487 {
1488         struct nvme_ns *ns = disk->private_data;
1489
1490         module_put(ns->ctrl->ops->module);
1491         nvme_put_ns(ns);
1492 }
1493
1494 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1495 {
1496         /* some standard values */
1497         geo->heads = 1 << 6;
1498         geo->sectors = 1 << 5;
1499         geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1500         return 0;
1501 }
1502
1503 #ifdef CONFIG_BLK_DEV_INTEGRITY
1504 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1505 {
1506         struct blk_integrity integrity;
1507
1508         memset(&integrity, 0, sizeof(integrity));
1509         switch (pi_type) {
1510         case NVME_NS_DPS_PI_TYPE3:
1511                 integrity.profile = &t10_pi_type3_crc;
1512                 integrity.tag_size = sizeof(u16) + sizeof(u32);
1513                 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1514                 break;
1515         case NVME_NS_DPS_PI_TYPE1:
1516         case NVME_NS_DPS_PI_TYPE2:
1517                 integrity.profile = &t10_pi_type1_crc;
1518                 integrity.tag_size = sizeof(u16);
1519                 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1520                 break;
1521         default:
1522                 integrity.profile = NULL;
1523                 break;
1524         }
1525         integrity.tuple_size = ms;
1526         blk_integrity_register(disk, &integrity);
1527         blk_queue_max_integrity_segments(disk->queue, 1);
1528 }
1529 #else
1530 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1531 {
1532 }
1533 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1534
1535 static void nvme_set_chunk_size(struct nvme_ns *ns)
1536 {
1537         u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
1538         blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
1539 }
1540
1541 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1542 {
1543         struct nvme_ctrl *ctrl = ns->ctrl;
1544         struct request_queue *queue = disk->queue;
1545         u32 size = queue_logical_block_size(queue);
1546
1547         if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1548                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1549                 return;
1550         }
1551
1552         if (ctrl->nr_streams && ns->sws && ns->sgs)
1553                 size *= ns->sws * ns->sgs;
1554
1555         BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1556                         NVME_DSM_MAX_RANGES);
1557
1558         queue->limits.discard_alignment = 0;
1559         queue->limits.discard_granularity = size;
1560
1561         /* If discard is already enabled, don't reset queue limits */
1562         if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1563                 return;
1564
1565         blk_queue_max_discard_sectors(queue, UINT_MAX);
1566         blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1567
1568         if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1569                 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1570 }
1571
1572 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1573 {
1574         u32 max_sectors;
1575         unsigned short bs = 1 << ns->lba_shift;
1576
1577         if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1578             (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1579                 return;
1580         /*
1581          * Even though NVMe spec explicitly states that MDTS is not
1582          * applicable to the write-zeroes:- "The restriction does not apply to
1583          * commands that do not transfer data between the host and the
1584          * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1585          * In order to be more cautious use controller's max_hw_sectors value
1586          * to configure the maximum sectors for the write-zeroes which is
1587          * configured based on the controller's MDTS field in the
1588          * nvme_init_identify() if available.
1589          */
1590         if (ns->ctrl->max_hw_sectors == UINT_MAX)
1591                 max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
1592         else
1593                 max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
1594
1595         blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
1596 }
1597
1598 static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1599                 struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1600 {
1601         memset(ids, 0, sizeof(*ids));
1602
1603         if (ctrl->vs >= NVME_VS(1, 1, 0))
1604                 memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1605         if (ctrl->vs >= NVME_VS(1, 2, 0))
1606                 memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1607         if (ctrl->vs >= NVME_VS(1, 3, 0)) {
1608                  /* Don't treat error as fatal we potentially
1609                   * already have a NGUID or EUI-64
1610                   */
1611                 if (nvme_identify_ns_descs(ctrl, nsid, ids))
1612                         dev_warn(ctrl->device,
1613                                  "%s: Identify Descriptors failed\n", __func__);
1614         }
1615 }
1616
1617 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1618 {
1619         return !uuid_is_null(&ids->uuid) ||
1620                 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1621                 memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1622 }
1623
1624 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1625 {
1626         return uuid_equal(&a->uuid, &b->uuid) &&
1627                 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1628                 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1629 }
1630
1631 static void nvme_update_disk_info(struct gendisk *disk,
1632                 struct nvme_ns *ns, struct nvme_id_ns *id)
1633 {
1634         sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
1635         unsigned short bs = 1 << ns->lba_shift;
1636         u32 atomic_bs, phys_bs, io_opt;
1637
1638         if (ns->lba_shift > PAGE_SHIFT) {
1639                 /* unsupported block size, set capacity to 0 later */
1640                 bs = (1 << 9);
1641         }
1642         blk_mq_freeze_queue(disk->queue);
1643         blk_integrity_unregister(disk);
1644
1645         if (id->nabo == 0) {
1646                 /*
1647                  * Bit 1 indicates whether NAWUPF is defined for this namespace
1648                  * and whether it should be used instead of AWUPF. If NAWUPF ==
1649                  * 0 then AWUPF must be used instead.
1650                  */
1651                 if (id->nsfeat & (1 << 1) && id->nawupf)
1652                         atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1653                 else
1654                         atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1655         } else {
1656                 atomic_bs = bs;
1657         }
1658         phys_bs = bs;
1659         io_opt = bs;
1660         if (id->nsfeat & (1 << 4)) {
1661                 /* NPWG = Namespace Preferred Write Granularity */
1662                 phys_bs *= 1 + le16_to_cpu(id->npwg);
1663                 /* NOWS = Namespace Optimal Write Size */
1664                 io_opt *= 1 + le16_to_cpu(id->nows);
1665         }
1666
1667         blk_queue_logical_block_size(disk->queue, bs);
1668         /*
1669          * Linux filesystems assume writing a single physical block is
1670          * an atomic operation. Hence limit the physical block size to the
1671          * value of the Atomic Write Unit Power Fail parameter.
1672          */
1673         blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1674         blk_queue_io_min(disk->queue, phys_bs);
1675         blk_queue_io_opt(disk->queue, io_opt);
1676
1677         if (ns->ms && !ns->ext &&
1678             (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1679                 nvme_init_integrity(disk, ns->ms, ns->pi_type);
1680         if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
1681             ns->lba_shift > PAGE_SHIFT)
1682                 capacity = 0;
1683
1684         set_capacity(disk, capacity);
1685
1686         nvme_config_discard(disk, ns);
1687         nvme_config_write_zeroes(disk, ns);
1688
1689         if (id->nsattr & (1 << 0))
1690                 set_disk_ro(disk, true);
1691         else
1692                 set_disk_ro(disk, false);
1693
1694         blk_mq_unfreeze_queue(disk->queue);
1695 }
1696
1697 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1698 {
1699         struct nvme_ns *ns = disk->private_data;
1700
1701         /*
1702          * If identify namespace failed, use default 512 byte block size so
1703          * block layer can use before failing read/write for 0 capacity.
1704          */
1705         ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1706         if (ns->lba_shift == 0)
1707                 ns->lba_shift = 9;
1708         ns->noiob = le16_to_cpu(id->noiob);
1709         ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1710         ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1711         /* the PI implementation requires metadata equal t10 pi tuple size */
1712         if (ns->ms == sizeof(struct t10_pi_tuple))
1713                 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1714         else
1715                 ns->pi_type = 0;
1716
1717         if (ns->noiob)
1718                 nvme_set_chunk_size(ns);
1719         nvme_update_disk_info(disk, ns, id);
1720 #ifdef CONFIG_NVME_MULTIPATH
1721         if (ns->head->disk) {
1722                 nvme_update_disk_info(ns->head->disk, ns, id);
1723                 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1724         }
1725 #endif
1726 }
1727
1728 static int nvme_revalidate_disk(struct gendisk *disk)
1729 {
1730         struct nvme_ns *ns = disk->private_data;
1731         struct nvme_ctrl *ctrl = ns->ctrl;
1732         struct nvme_id_ns *id;
1733         struct nvme_ns_ids ids;
1734         int ret = 0;
1735
1736         if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1737                 set_capacity(disk, 0);
1738                 return -ENODEV;
1739         }
1740
1741         ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
1742         if (ret)
1743                 goto out;
1744
1745         if (id->ncap == 0) {
1746                 ret = -ENODEV;
1747                 goto free_id;
1748         }
1749
1750         __nvme_revalidate_disk(disk, id);
1751         nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
1752         if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
1753                 dev_err(ctrl->device,
1754                         "identifiers changed for nsid %d\n", ns->head->ns_id);
1755                 ret = -ENODEV;
1756         }
1757
1758 free_id:
1759         kfree(id);
1760 out:
1761         if (ret > 0)
1762                 ret = blk_status_to_errno(nvme_error_status(ret));
1763         return ret;
1764 }
1765
1766 static char nvme_pr_type(enum pr_type type)
1767 {
1768         switch (type) {
1769         case PR_WRITE_EXCLUSIVE:
1770                 return 1;
1771         case PR_EXCLUSIVE_ACCESS:
1772                 return 2;
1773         case PR_WRITE_EXCLUSIVE_REG_ONLY:
1774                 return 3;
1775         case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1776                 return 4;
1777         case PR_WRITE_EXCLUSIVE_ALL_REGS:
1778                 return 5;
1779         case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1780                 return 6;
1781         default:
1782                 return 0;
1783         }
1784 };
1785
1786 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
1787                                 u64 key, u64 sa_key, u8 op)
1788 {
1789         struct nvme_ns_head *head = NULL;
1790         struct nvme_ns *ns;
1791         struct nvme_command c;
1792         int srcu_idx, ret;
1793         u8 data[16] = { 0, };
1794
1795         ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1796         if (unlikely(!ns))
1797                 return -EWOULDBLOCK;
1798
1799         put_unaligned_le64(key, &data[0]);
1800         put_unaligned_le64(sa_key, &data[8]);
1801
1802         memset(&c, 0, sizeof(c));
1803         c.common.opcode = op;
1804         c.common.nsid = cpu_to_le32(ns->head->ns_id);
1805         c.common.cdw10 = cpu_to_le32(cdw10);
1806
1807         ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
1808         nvme_put_ns_from_disk(head, srcu_idx);
1809         return ret;
1810 }
1811
1812 static int nvme_pr_register(struct block_device *bdev, u64 old,
1813                 u64 new, unsigned flags)
1814 {
1815         u32 cdw10;
1816
1817         if (flags & ~PR_FL_IGNORE_KEY)
1818                 return -EOPNOTSUPP;
1819
1820         cdw10 = old ? 2 : 0;
1821         cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1822         cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
1823         return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1824 }
1825
1826 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1827                 enum pr_type type, unsigned flags)
1828 {
1829         u32 cdw10;
1830
1831         if (flags & ~PR_FL_IGNORE_KEY)
1832                 return -EOPNOTSUPP;
1833
1834         cdw10 = nvme_pr_type(type) << 8;
1835         cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1836         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1837 }
1838
1839 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
1840                 enum pr_type type, bool abort)
1841 {
1842         u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
1843         return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
1844 }
1845
1846 static int nvme_pr_clear(struct block_device *bdev, u64 key)
1847 {
1848         u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1849         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
1850 }
1851
1852 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1853 {
1854         u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
1855         return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
1856 }
1857
1858 static const struct pr_ops nvme_pr_ops = {
1859         .pr_register    = nvme_pr_register,
1860         .pr_reserve     = nvme_pr_reserve,
1861         .pr_release     = nvme_pr_release,
1862         .pr_preempt     = nvme_pr_preempt,
1863         .pr_clear       = nvme_pr_clear,
1864 };
1865
1866 #ifdef CONFIG_BLK_SED_OPAL
1867 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
1868                 bool send)
1869 {
1870         struct nvme_ctrl *ctrl = data;
1871         struct nvme_command cmd;
1872
1873         memset(&cmd, 0, sizeof(cmd));
1874         if (send)
1875                 cmd.common.opcode = nvme_admin_security_send;
1876         else
1877                 cmd.common.opcode = nvme_admin_security_recv;
1878         cmd.common.nsid = 0;
1879         cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
1880         cmd.common.cdw11 = cpu_to_le32(len);
1881
1882         return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
1883                                       ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
1884 }
1885 EXPORT_SYMBOL_GPL(nvme_sec_submit);
1886 #endif /* CONFIG_BLK_SED_OPAL */
1887
1888 static const struct block_device_operations nvme_fops = {
1889         .owner          = THIS_MODULE,
1890         .ioctl          = nvme_ioctl,
1891         .compat_ioctl   = nvme_ioctl,
1892         .open           = nvme_open,
1893         .release        = nvme_release,
1894         .getgeo         = nvme_getgeo,
1895         .revalidate_disk= nvme_revalidate_disk,
1896         .pr_ops         = &nvme_pr_ops,
1897 };
1898
1899 #ifdef CONFIG_NVME_MULTIPATH
1900 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
1901 {
1902         struct nvme_ns_head *head = bdev->bd_disk->private_data;
1903
1904         if (!kref_get_unless_zero(&head->ref))
1905                 return -ENXIO;
1906         return 0;
1907 }
1908
1909 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
1910 {
1911         nvme_put_ns_head(disk->private_data);
1912 }
1913
1914 const struct block_device_operations nvme_ns_head_ops = {
1915         .owner          = THIS_MODULE,
1916         .open           = nvme_ns_head_open,
1917         .release        = nvme_ns_head_release,
1918         .ioctl          = nvme_ioctl,
1919         .compat_ioctl   = nvme_ioctl,
1920         .getgeo         = nvme_getgeo,
1921         .pr_ops         = &nvme_pr_ops,
1922 };
1923 #endif /* CONFIG_NVME_MULTIPATH */
1924
1925 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1926 {
1927         unsigned long timeout =
1928                 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1929         u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
1930         int ret;
1931
1932         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1933                 if (csts == ~0)
1934                         return -ENODEV;
1935                 if ((csts & NVME_CSTS_RDY) == bit)
1936                         break;
1937
1938                 msleep(100);
1939                 if (fatal_signal_pending(current))
1940                         return -EINTR;
1941                 if (time_after(jiffies, timeout)) {
1942                         dev_err(ctrl->device,
1943                                 "Device not ready; aborting %s\n", enabled ?
1944                                                 "initialisation" : "reset");
1945                         return -ENODEV;
1946                 }
1947         }
1948
1949         return ret;
1950 }
1951
1952 /*
1953  * If the device has been passed off to us in an enabled state, just clear
1954  * the enabled bit.  The spec says we should set the 'shutdown notification
1955  * bits', but doing so may cause the device to complete commands to the
1956  * admin queue ... and we don't know what memory that might be pointing at!
1957  */
1958 int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
1959 {
1960         int ret;
1961
1962         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1963         ctrl->ctrl_config &= ~NVME_CC_ENABLE;
1964
1965         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1966         if (ret)
1967                 return ret;
1968
1969         if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1970                 msleep(NVME_QUIRK_DELAY_AMOUNT);
1971
1972         return nvme_wait_ready(ctrl, ctrl->cap, false);
1973 }
1974 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
1975
1976 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
1977 {
1978         /*
1979          * Default to a 4K page size, with the intention to update this
1980          * path in the future to accomodate architectures with differing
1981          * kernel and IO page sizes.
1982          */
1983         unsigned dev_page_min, page_shift = 12;
1984         int ret;
1985
1986         ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
1987         if (ret) {
1988                 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
1989                 return ret;
1990         }
1991         dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
1992
1993         if (page_shift < dev_page_min) {
1994                 dev_err(ctrl->device,
1995                         "Minimum device page size %u too large for host (%u)\n",
1996                         1 << dev_page_min, 1 << page_shift);
1997                 return -ENODEV;
1998         }
1999
2000         ctrl->page_size = 1 << page_shift;
2001
2002         ctrl->ctrl_config = NVME_CC_CSS_NVM;
2003         ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
2004         ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2005         ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2006         ctrl->ctrl_config |= NVME_CC_ENABLE;
2007
2008         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2009         if (ret)
2010                 return ret;
2011         return nvme_wait_ready(ctrl, ctrl->cap, true);
2012 }
2013 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2014
2015 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2016 {
2017         unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2018         u32 csts;
2019         int ret;
2020
2021         ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2022         ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2023
2024         ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2025         if (ret)
2026                 return ret;
2027
2028         while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2029                 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2030                         break;
2031
2032                 msleep(100);
2033                 if (fatal_signal_pending(current))
2034                         return -EINTR;
2035                 if (time_after(jiffies, timeout)) {
2036                         dev_err(ctrl->device,
2037                                 "Device shutdown incomplete; abort shutdown\n");
2038                         return -ENODEV;
2039                 }
2040         }
2041
2042         return ret;
2043 }
2044 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2045
2046 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
2047                 struct request_queue *q)
2048 {
2049         bool vwc = false;
2050
2051         if (ctrl->max_hw_sectors) {
2052                 u32 max_segments =
2053                         (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
2054
2055                 max_segments = min_not_zero(max_segments, ctrl->max_segments);
2056                 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
2057                 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
2058         }
2059         if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2060             is_power_of_2(ctrl->max_hw_sectors))
2061                 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
2062         blk_queue_virt_boundary(q, ctrl->page_size - 1);
2063         if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
2064                 vwc = true;
2065         blk_queue_write_cache(q, vwc, vwc);
2066 }
2067
2068 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2069 {
2070         __le64 ts;
2071         int ret;
2072
2073         if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2074                 return 0;
2075
2076         ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2077         ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2078                         NULL);
2079         if (ret)
2080                 dev_warn_once(ctrl->device,
2081                         "could not set timestamp (%d)\n", ret);
2082         return ret;
2083 }
2084
2085 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2086 {
2087         struct nvme_feat_host_behavior *host;
2088         int ret;
2089
2090         /* Don't bother enabling the feature if retry delay is not reported */
2091         if (!ctrl->crdt[0])
2092                 return 0;
2093
2094         host = kzalloc(sizeof(*host), GFP_KERNEL);
2095         if (!host)
2096                 return 0;
2097
2098         host->acre = NVME_ENABLE_ACRE;
2099         ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2100                                 host, sizeof(*host), NULL);
2101         kfree(host);
2102         return ret;
2103 }
2104
2105 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2106 {
2107         /*
2108          * APST (Autonomous Power State Transition) lets us program a
2109          * table of power state transitions that the controller will
2110          * perform automatically.  We configure it with a simple
2111          * heuristic: we are willing to spend at most 2% of the time
2112          * transitioning between power states.  Therefore, when running
2113          * in any given state, we will enter the next lower-power
2114          * non-operational state after waiting 50 * (enlat + exlat)
2115          * microseconds, as long as that state's exit latency is under
2116          * the requested maximum latency.
2117          *
2118          * We will not autonomously enter any non-operational state for
2119          * which the total latency exceeds ps_max_latency_us.  Users
2120          * can set ps_max_latency_us to zero to turn off APST.
2121          */
2122
2123         unsigned apste;
2124         struct nvme_feat_auto_pst *table;
2125         u64 max_lat_us = 0;
2126         int max_ps = -1;
2127         int ret;
2128
2129         /*
2130          * If APST isn't supported or if we haven't been initialized yet,
2131          * then don't do anything.
2132          */
2133         if (!ctrl->apsta)
2134                 return 0;
2135
2136         if (ctrl->npss > 31) {
2137                 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2138                 return 0;
2139         }
2140
2141         table = kzalloc(sizeof(*table), GFP_KERNEL);
2142         if (!table)
2143                 return 0;
2144
2145         if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2146                 /* Turn off APST. */
2147                 apste = 0;
2148                 dev_dbg(ctrl->device, "APST disabled\n");
2149         } else {
2150                 __le64 target = cpu_to_le64(0);
2151                 int state;
2152
2153                 /*
2154                  * Walk through all states from lowest- to highest-power.
2155                  * According to the spec, lower-numbered states use more
2156                  * power.  NPSS, despite the name, is the index of the
2157                  * lowest-power state, not the number of states.
2158                  */
2159                 for (state = (int)ctrl->npss; state >= 0; state--) {
2160                         u64 total_latency_us, exit_latency_us, transition_ms;
2161
2162                         if (target)
2163                                 table->entries[state] = target;
2164
2165                         /*
2166                          * Don't allow transitions to the deepest state
2167                          * if it's quirked off.
2168                          */
2169                         if (state == ctrl->npss &&
2170                             (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2171                                 continue;
2172
2173                         /*
2174                          * Is this state a useful non-operational state for
2175                          * higher-power states to autonomously transition to?
2176                          */
2177                         if (!(ctrl->psd[state].flags &
2178                               NVME_PS_FLAGS_NON_OP_STATE))
2179                                 continue;
2180
2181                         exit_latency_us =
2182                                 (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2183                         if (exit_latency_us > ctrl->ps_max_latency_us)
2184                                 continue;
2185
2186                         total_latency_us =
2187                                 exit_latency_us +
2188                                 le32_to_cpu(ctrl->psd[state].entry_lat);
2189
2190                         /*
2191                          * This state is good.  Use it as the APST idle
2192                          * target for higher power states.
2193                          */
2194                         transition_ms = total_latency_us + 19;
2195                         do_div(transition_ms, 20);
2196                         if (transition_ms > (1 << 24) - 1)
2197                                 transition_ms = (1 << 24) - 1;
2198
2199                         target = cpu_to_le64((state << 3) |
2200                                              (transition_ms << 8));
2201
2202                         if (max_ps == -1)
2203                                 max_ps = state;
2204
2205                         if (total_latency_us > max_lat_us)
2206                                 max_lat_us = total_latency_us;
2207                 }
2208
2209                 apste = 1;
2210
2211                 if (max_ps == -1) {
2212                         dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2213                 } else {
2214                         dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2215                                 max_ps, max_lat_us, (int)sizeof(*table), table);
2216                 }
2217         }
2218
2219         ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2220                                 table, sizeof(*table), NULL);
2221         if (ret)
2222                 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2223
2224         kfree(table);
2225         return ret;
2226 }
2227
2228 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2229 {
2230         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2231         u64 latency;
2232
2233         switch (val) {
2234         case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2235         case PM_QOS_LATENCY_ANY:
2236                 latency = U64_MAX;
2237                 break;
2238
2239         default:
2240                 latency = val;
2241         }
2242
2243         if (ctrl->ps_max_latency_us != latency) {
2244                 ctrl->ps_max_latency_us = latency;
2245                 nvme_configure_apst(ctrl);
2246         }
2247 }
2248
2249 struct nvme_core_quirk_entry {
2250         /*
2251          * NVMe model and firmware strings are padded with spaces.  For
2252          * simplicity, strings in the quirk table are padded with NULLs
2253          * instead.
2254          */
2255         u16 vid;
2256         const char *mn;
2257         const char *fr;
2258         unsigned long quirks;
2259 };
2260
2261 static const struct nvme_core_quirk_entry core_quirks[] = {
2262         {
2263                 /*
2264                  * This Toshiba device seems to die using any APST states.  See:
2265                  * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2266                  */
2267                 .vid = 0x1179,
2268                 .mn = "THNSF5256GPUK TOSHIBA",
2269                 .quirks = NVME_QUIRK_NO_APST,
2270         }
2271 };
2272
2273 /* match is null-terminated but idstr is space-padded. */
2274 static bool string_matches(const char *idstr, const char *match, size_t len)
2275 {
2276         size_t matchlen;
2277
2278         if (!match)
2279                 return true;
2280
2281         matchlen = strlen(match);
2282         WARN_ON_ONCE(matchlen > len);
2283
2284         if (memcmp(idstr, match, matchlen))
2285                 return false;
2286
2287         for (; matchlen < len; matchlen++)
2288                 if (idstr[matchlen] != ' ')
2289                         return false;
2290
2291         return true;
2292 }
2293
2294 static bool quirk_matches(const struct nvme_id_ctrl *id,
2295                           const struct nvme_core_quirk_entry *q)
2296 {
2297         return q->vid == le16_to_cpu(id->vid) &&
2298                 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2299                 string_matches(id->fr, q->fr, sizeof(id->fr));
2300 }
2301
2302 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2303                 struct nvme_id_ctrl *id)
2304 {
2305         size_t nqnlen;
2306         int off;
2307
2308         if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2309                 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2310                 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2311                         strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2312                         return;
2313                 }
2314
2315                 if (ctrl->vs >= NVME_VS(1, 2, 1))
2316                         dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2317         }
2318
2319         /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2320         off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2321                         "nqn.2014.08.org.nvmexpress:%04x%04x",
2322                         le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2323         memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2324         off += sizeof(id->sn);
2325         memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2326         off += sizeof(id->mn);
2327         memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2328 }
2329
2330 static void nvme_release_subsystem(struct device *dev)
2331 {
2332         struct nvme_subsystem *subsys =
2333                 container_of(dev, struct nvme_subsystem, dev);
2334
2335         ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
2336         kfree(subsys);
2337 }
2338
2339 static void nvme_destroy_subsystem(struct kref *ref)
2340 {
2341         struct nvme_subsystem *subsys =
2342                         container_of(ref, struct nvme_subsystem, ref);
2343
2344         mutex_lock(&nvme_subsystems_lock);
2345         list_del(&subsys->entry);
2346         mutex_unlock(&nvme_subsystems_lock);
2347
2348         ida_destroy(&subsys->ns_ida);
2349         device_del(&subsys->dev);
2350         put_device(&subsys->dev);
2351 }
2352
2353 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2354 {
2355         kref_put(&subsys->ref, nvme_destroy_subsystem);
2356 }
2357
2358 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2359 {
2360         struct nvme_subsystem *subsys;
2361
2362         lockdep_assert_held(&nvme_subsystems_lock);
2363
2364         list_for_each_entry(subsys, &nvme_subsystems, entry) {
2365                 if (strcmp(subsys->subnqn, subsysnqn))
2366                         continue;
2367                 if (!kref_get_unless_zero(&subsys->ref))
2368                         continue;
2369                 return subsys;
2370         }
2371
2372         return NULL;
2373 }
2374
2375 #define SUBSYS_ATTR_RO(_name, _mode, _show)                     \
2376         struct device_attribute subsys_attr_##_name = \
2377                 __ATTR(_name, _mode, _show, NULL)
2378
2379 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2380                                     struct device_attribute *attr,
2381                                     char *buf)
2382 {
2383         struct nvme_subsystem *subsys =
2384                 container_of(dev, struct nvme_subsystem, dev);
2385
2386         return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2387 }
2388 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2389
2390 #define nvme_subsys_show_str_function(field)                            \
2391 static ssize_t subsys_##field##_show(struct device *dev,                \
2392                             struct device_attribute *attr, char *buf)   \
2393 {                                                                       \
2394         struct nvme_subsystem *subsys =                                 \
2395                 container_of(dev, struct nvme_subsystem, dev);          \
2396         return sprintf(buf, "%.*s\n",                                   \
2397                        (int)sizeof(subsys->field), subsys->field);      \
2398 }                                                                       \
2399 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2400
2401 nvme_subsys_show_str_function(model);
2402 nvme_subsys_show_str_function(serial);
2403 nvme_subsys_show_str_function(firmware_rev);
2404
2405 static struct attribute *nvme_subsys_attrs[] = {
2406         &subsys_attr_model.attr,
2407         &subsys_attr_serial.attr,
2408         &subsys_attr_firmware_rev.attr,
2409         &subsys_attr_subsysnqn.attr,
2410 #ifdef CONFIG_NVME_MULTIPATH
2411         &subsys_attr_iopolicy.attr,
2412 #endif
2413         NULL,
2414 };
2415
2416 static struct attribute_group nvme_subsys_attrs_group = {
2417         .attrs = nvme_subsys_attrs,
2418 };
2419
2420 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2421         &nvme_subsys_attrs_group,
2422         NULL,
2423 };
2424
2425 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2426                 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2427 {
2428         struct nvme_ctrl *tmp;
2429
2430         lockdep_assert_held(&nvme_subsystems_lock);
2431
2432         list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2433                 if (tmp->state == NVME_CTRL_DELETING ||
2434                     tmp->state == NVME_CTRL_DEAD)
2435                         continue;
2436
2437                 if (tmp->cntlid == ctrl->cntlid) {
2438                         dev_err(ctrl->device,
2439                                 "Duplicate cntlid %u with %s, rejecting\n",
2440                                 ctrl->cntlid, dev_name(tmp->device));
2441                         return false;
2442                 }
2443
2444                 if ((id->cmic & (1 << 1)) ||
2445                     (ctrl->opts && ctrl->opts->discovery_nqn))
2446                         continue;
2447
2448                 dev_err(ctrl->device,
2449                         "Subsystem does not support multiple controllers\n");
2450                 return false;
2451         }
2452
2453         return true;
2454 }
2455
2456 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2457 {
2458         struct nvme_subsystem *subsys, *found;
2459         int ret;
2460
2461         subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2462         if (!subsys)
2463                 return -ENOMEM;
2464         ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL);
2465         if (ret < 0) {
2466                 kfree(subsys);
2467                 return ret;
2468         }
2469         subsys->instance = ret;
2470         mutex_init(&subsys->lock);
2471         kref_init(&subsys->ref);
2472         INIT_LIST_HEAD(&subsys->ctrls);
2473         INIT_LIST_HEAD(&subsys->nsheads);
2474         nvme_init_subnqn(subsys, ctrl, id);
2475         memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2476         memcpy(subsys->model, id->mn, sizeof(subsys->model));
2477         memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2478         subsys->vendor_id = le16_to_cpu(id->vid);
2479         subsys->cmic = id->cmic;
2480         subsys->awupf = le16_to_cpu(id->awupf);
2481 #ifdef CONFIG_NVME_MULTIPATH
2482         subsys->iopolicy = NVME_IOPOLICY_NUMA;
2483 #endif
2484
2485         subsys->dev.class = nvme_subsys_class;
2486         subsys->dev.release = nvme_release_subsystem;
2487         subsys->dev.groups = nvme_subsys_attrs_groups;
2488         dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance);
2489         device_initialize(&subsys->dev);
2490
2491         mutex_lock(&nvme_subsystems_lock);
2492         found = __nvme_find_get_subsystem(subsys->subnqn);
2493         if (found) {
2494                 put_device(&subsys->dev);
2495                 subsys = found;
2496
2497                 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2498                         ret = -EINVAL;
2499                         goto out_put_subsystem;
2500                 }
2501         } else {
2502                 ret = device_add(&subsys->dev);
2503                 if (ret) {
2504                         dev_err(ctrl->device,
2505                                 "failed to register subsystem device.\n");
2506                         goto out_unlock;
2507                 }
2508                 ida_init(&subsys->ns_ida);
2509                 list_add_tail(&subsys->entry, &nvme_subsystems);
2510         }
2511
2512         if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2513                         dev_name(ctrl->device))) {
2514                 dev_err(ctrl->device,
2515                         "failed to create sysfs link from subsystem.\n");
2516                 goto out_put_subsystem;
2517         }
2518
2519         ctrl->subsys = subsys;
2520         list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2521         mutex_unlock(&nvme_subsystems_lock);
2522         return 0;
2523
2524 out_put_subsystem:
2525         nvme_put_subsystem(subsys);
2526 out_unlock:
2527         mutex_unlock(&nvme_subsystems_lock);
2528         put_device(&subsys->dev);
2529         return ret;
2530 }
2531
2532 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2533                 void *log, size_t size, u64 offset)
2534 {
2535         struct nvme_command c = { };
2536         unsigned long dwlen = size / 4 - 1;
2537
2538         c.get_log_page.opcode = nvme_admin_get_log_page;
2539         c.get_log_page.nsid = cpu_to_le32(nsid);
2540         c.get_log_page.lid = log_page;
2541         c.get_log_page.lsp = lsp;
2542         c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2543         c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2544         c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2545         c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2546
2547         return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2548 }
2549
2550 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2551 {
2552         int ret;
2553
2554         if (!ctrl->effects)
2555                 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2556
2557         if (!ctrl->effects)
2558                 return 0;
2559
2560         ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2561                         ctrl->effects, sizeof(*ctrl->effects), 0);
2562         if (ret) {
2563                 kfree(ctrl->effects);
2564                 ctrl->effects = NULL;
2565         }
2566         return ret;
2567 }
2568
2569 /*
2570  * Initialize the cached copies of the Identify data and various controller
2571  * register in our nvme_ctrl structure.  This should be called as soon as
2572  * the admin queue is fully up and running.
2573  */
2574 int nvme_init_identify(struct nvme_ctrl *ctrl)
2575 {
2576         struct nvme_id_ctrl *id;
2577         int ret, page_shift;
2578         u32 max_hw_sectors;
2579         bool prev_apst_enabled;
2580
2581         ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2582         if (ret) {
2583                 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2584                 return ret;
2585         }
2586         page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2587         ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
2588
2589         if (ctrl->vs >= NVME_VS(1, 1, 0))
2590                 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
2591
2592         ret = nvme_identify_ctrl(ctrl, &id);
2593         if (ret) {
2594                 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2595                 return -EIO;
2596         }
2597
2598         if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2599                 ret = nvme_get_effects_log(ctrl);
2600                 if (ret < 0)
2601                         goto out_free;
2602         }
2603
2604         if (!ctrl->identified) {
2605                 int i;
2606
2607                 ret = nvme_init_subsystem(ctrl, id);
2608                 if (ret)
2609                         goto out_free;
2610
2611                 /*
2612                  * Check for quirks.  Quirk can depend on firmware version,
2613                  * so, in principle, the set of quirks present can change
2614                  * across a reset.  As a possible future enhancement, we
2615                  * could re-scan for quirks every time we reinitialize
2616                  * the device, but we'd have to make sure that the driver
2617                  * behaves intelligently if the quirks change.
2618                  */
2619                 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2620                         if (quirk_matches(id, &core_quirks[i]))
2621                                 ctrl->quirks |= core_quirks[i].quirks;
2622                 }
2623         }
2624
2625         if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2626                 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2627                 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2628         }
2629
2630         ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2631         ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2632         ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2633
2634         ctrl->oacs = le16_to_cpu(id->oacs);
2635         ctrl->oncs = le16_to_cpu(id->oncs);
2636         ctrl->mtfa = le16_to_cpu(id->mtfa);
2637         ctrl->oaes = le32_to_cpu(id->oaes);
2638         atomic_set(&ctrl->abort_limit, id->acl + 1);
2639         ctrl->vwc = id->vwc;
2640         if (id->mdts)
2641                 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2642         else
2643                 max_hw_sectors = UINT_MAX;
2644         ctrl->max_hw_sectors =
2645                 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2646
2647         nvme_set_queue_limits(ctrl, ctrl->admin_q);
2648         ctrl->sgls = le32_to_cpu(id->sgls);
2649         ctrl->kas = le16_to_cpu(id->kas);
2650         ctrl->max_namespaces = le32_to_cpu(id->mnan);
2651         ctrl->ctratt = le32_to_cpu(id->ctratt);
2652
2653         if (id->rtd3e) {
2654                 /* us -> s */
2655                 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2656
2657                 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2658                                                  shutdown_timeout, 60);
2659
2660                 if (ctrl->shutdown_timeout != shutdown_timeout)
2661                         dev_info(ctrl->device,
2662                                  "Shutdown timeout set to %u seconds\n",
2663                                  ctrl->shutdown_timeout);
2664         } else
2665                 ctrl->shutdown_timeout = shutdown_timeout;
2666
2667         ctrl->npss = id->npss;
2668         ctrl->apsta = id->apsta;
2669         prev_apst_enabled = ctrl->apst_enabled;
2670         if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2671                 if (force_apst && id->apsta) {
2672                         dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2673                         ctrl->apst_enabled = true;
2674                 } else {
2675                         ctrl->apst_enabled = false;
2676                 }
2677         } else {
2678                 ctrl->apst_enabled = id->apsta;
2679         }
2680         memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2681
2682         if (ctrl->ops->flags & NVME_F_FABRICS) {
2683                 ctrl->icdoff = le16_to_cpu(id->icdoff);
2684                 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2685                 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2686                 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2687
2688                 /*
2689                  * In fabrics we need to verify the cntlid matches the
2690                  * admin connect
2691                  */
2692                 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2693                         ret = -EINVAL;
2694                         goto out_free;
2695                 }
2696
2697                 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2698                         dev_err(ctrl->device,
2699                                 "keep-alive support is mandatory for fabrics\n");
2700                         ret = -EINVAL;
2701                         goto out_free;
2702                 }
2703         } else {
2704                 ctrl->cntlid = le16_to_cpu(id->cntlid);
2705                 ctrl->hmpre = le32_to_cpu(id->hmpre);
2706                 ctrl->hmmin = le32_to_cpu(id->hmmin);
2707                 ctrl->hmminds = le32_to_cpu(id->hmminds);
2708                 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2709         }
2710
2711         ret = nvme_mpath_init(ctrl, id);
2712         kfree(id);
2713
2714         if (ret < 0)
2715                 return ret;
2716
2717         if (ctrl->apst_enabled && !prev_apst_enabled)
2718                 dev_pm_qos_expose_latency_tolerance(ctrl->device);
2719         else if (!ctrl->apst_enabled && prev_apst_enabled)
2720                 dev_pm_qos_hide_latency_tolerance(ctrl->device);
2721
2722         ret = nvme_configure_apst(ctrl);
2723         if (ret < 0)
2724                 return ret;
2725         
2726         ret = nvme_configure_timestamp(ctrl);
2727         if (ret < 0)
2728                 return ret;
2729
2730         ret = nvme_configure_directives(ctrl);
2731         if (ret < 0)
2732                 return ret;
2733
2734         ret = nvme_configure_acre(ctrl);
2735         if (ret < 0)
2736                 return ret;
2737
2738         ctrl->identified = true;
2739
2740         return 0;
2741
2742 out_free:
2743         kfree(id);
2744         return ret;
2745 }
2746 EXPORT_SYMBOL_GPL(nvme_init_identify);
2747
2748 static int nvme_dev_open(struct inode *inode, struct file *file)
2749 {
2750         struct nvme_ctrl *ctrl =
2751                 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
2752
2753         switch (ctrl->state) {
2754         case NVME_CTRL_LIVE:
2755         case NVME_CTRL_ADMIN_ONLY:
2756                 break;
2757         default:
2758                 return -EWOULDBLOCK;
2759         }
2760
2761         file->private_data = ctrl;
2762         return 0;
2763 }
2764
2765 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
2766 {
2767         struct nvme_ns *ns;
2768         int ret;
2769
2770         down_read(&ctrl->namespaces_rwsem);
2771         if (list_empty(&ctrl->namespaces)) {
2772                 ret = -ENOTTY;
2773                 goto out_unlock;
2774         }
2775
2776         ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
2777         if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
2778                 dev_warn(ctrl->device,
2779                         "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2780                 ret = -EINVAL;
2781                 goto out_unlock;
2782         }
2783
2784         dev_warn(ctrl->device,
2785                 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2786         kref_get(&ns->kref);
2787         up_read(&ctrl->namespaces_rwsem);
2788
2789         ret = nvme_user_cmd(ctrl, ns, argp);
2790         nvme_put_ns(ns);
2791         return ret;
2792
2793 out_unlock:
2794         up_read(&ctrl->namespaces_rwsem);
2795         return ret;
2796 }
2797
2798 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
2799                 unsigned long arg)
2800 {
2801         struct nvme_ctrl *ctrl = file->private_data;
2802         void __user *argp = (void __user *)arg;
2803
2804         switch (cmd) {
2805         case NVME_IOCTL_ADMIN_CMD:
2806                 return nvme_user_cmd(ctrl, NULL, argp);
2807         case NVME_IOCTL_IO_CMD:
2808                 return nvme_dev_user_cmd(ctrl, argp);
2809         case NVME_IOCTL_RESET:
2810                 dev_warn(ctrl->device, "resetting controller\n");
2811                 return nvme_reset_ctrl_sync(ctrl);
2812         case NVME_IOCTL_SUBSYS_RESET:
2813                 return nvme_reset_subsystem(ctrl);
2814         case NVME_IOCTL_RESCAN:
2815                 nvme_queue_scan(ctrl);
2816                 return 0;
2817         default:
2818                 return -ENOTTY;
2819         }
2820 }
2821
2822 static const struct file_operations nvme_dev_fops = {
2823         .owner          = THIS_MODULE,
2824         .open           = nvme_dev_open,
2825         .unlocked_ioctl = nvme_dev_ioctl,
2826         .compat_ioctl   = nvme_dev_ioctl,
2827 };
2828
2829 static ssize_t nvme_sysfs_reset(struct device *dev,
2830                                 struct device_attribute *attr, const char *buf,
2831                                 size_t count)
2832 {
2833         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2834         int ret;
2835
2836         ret = nvme_reset_ctrl_sync(ctrl);
2837         if (ret < 0)
2838                 return ret;
2839         return count;
2840 }
2841 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
2842
2843 static ssize_t nvme_sysfs_rescan(struct device *dev,
2844                                 struct device_attribute *attr, const char *buf,
2845                                 size_t count)
2846 {
2847         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2848
2849         nvme_queue_scan(ctrl);
2850         return count;
2851 }
2852 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
2853
2854 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
2855 {
2856         struct gendisk *disk = dev_to_disk(dev);
2857
2858         if (disk->fops == &nvme_fops)
2859                 return nvme_get_ns_from_dev(dev)->head;
2860         else
2861                 return disk->private_data;
2862 }
2863
2864 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
2865                 char *buf)
2866 {
2867         struct nvme_ns_head *head = dev_to_ns_head(dev);
2868         struct nvme_ns_ids *ids = &head->ids;
2869         struct nvme_subsystem *subsys = head->subsys;
2870         int serial_len = sizeof(subsys->serial);
2871         int model_len = sizeof(subsys->model);
2872
2873         if (!uuid_is_null(&ids->uuid))
2874                 return sprintf(buf, "uuid.%pU\n", &ids->uuid);
2875
2876         if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2877                 return sprintf(buf, "eui.%16phN\n", ids->nguid);
2878
2879         if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2880                 return sprintf(buf, "eui.%8phN\n", ids->eui64);
2881
2882         while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
2883                                   subsys->serial[serial_len - 1] == '\0'))
2884                 serial_len--;
2885         while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
2886                                  subsys->model[model_len - 1] == '\0'))
2887                 model_len--;
2888
2889         return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
2890                 serial_len, subsys->serial, model_len, subsys->model,
2891                 head->ns_id);
2892 }
2893 static DEVICE_ATTR_RO(wwid);
2894
2895 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
2896                 char *buf)
2897 {
2898         return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
2899 }
2900 static DEVICE_ATTR_RO(nguid);
2901
2902 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
2903                 char *buf)
2904 {
2905         struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
2906
2907         /* For backward compatibility expose the NGUID to userspace if
2908          * we have no UUID set
2909          */
2910         if (uuid_is_null(&ids->uuid)) {
2911                 printk_ratelimited(KERN_WARNING
2912                                    "No UUID available providing old NGUID\n");
2913                 return sprintf(buf, "%pU\n", ids->nguid);
2914         }
2915         return sprintf(buf, "%pU\n", &ids->uuid);
2916 }
2917 static DEVICE_ATTR_RO(uuid);
2918
2919 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
2920                 char *buf)
2921 {
2922         return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
2923 }
2924 static DEVICE_ATTR_RO(eui);
2925
2926 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
2927                 char *buf)
2928 {
2929         return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
2930 }
2931 static DEVICE_ATTR_RO(nsid);
2932
2933 static struct attribute *nvme_ns_id_attrs[] = {
2934         &dev_attr_wwid.attr,
2935         &dev_attr_uuid.attr,
2936         &dev_attr_nguid.attr,
2937         &dev_attr_eui.attr,
2938         &dev_attr_nsid.attr,
2939 #ifdef CONFIG_NVME_MULTIPATH
2940         &dev_attr_ana_grpid.attr,
2941         &dev_attr_ana_state.attr,
2942 #endif
2943         NULL,
2944 };
2945
2946 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
2947                 struct attribute *a, int n)
2948 {
2949         struct device *dev = container_of(kobj, struct device, kobj);
2950         struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
2951
2952         if (a == &dev_attr_uuid.attr) {
2953                 if (uuid_is_null(&ids->uuid) &&
2954                     !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2955                         return 0;
2956         }
2957         if (a == &dev_attr_nguid.attr) {
2958                 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
2959                         return 0;
2960         }
2961         if (a == &dev_attr_eui.attr) {
2962                 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
2963                         return 0;
2964         }
2965 #ifdef CONFIG_NVME_MULTIPATH
2966         if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
2967                 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
2968                         return 0;
2969                 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
2970                         return 0;
2971         }
2972 #endif
2973         return a->mode;
2974 }
2975
2976 static const struct attribute_group nvme_ns_id_attr_group = {
2977         .attrs          = nvme_ns_id_attrs,
2978         .is_visible     = nvme_ns_id_attrs_are_visible,
2979 };
2980
2981 const struct attribute_group *nvme_ns_id_attr_groups[] = {
2982         &nvme_ns_id_attr_group,
2983 #ifdef CONFIG_NVM
2984         &nvme_nvm_attr_group,
2985 #endif
2986         NULL,
2987 };
2988
2989 #define nvme_show_str_function(field)                                           \
2990 static ssize_t  field##_show(struct device *dev,                                \
2991                             struct device_attribute *attr, char *buf)           \
2992 {                                                                               \
2993         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                          \
2994         return sprintf(buf, "%.*s\n",                                           \
2995                 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field);         \
2996 }                                                                               \
2997 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2998
2999 nvme_show_str_function(model);
3000 nvme_show_str_function(serial);
3001 nvme_show_str_function(firmware_rev);
3002
3003 #define nvme_show_int_function(field)                                           \
3004 static ssize_t  field##_show(struct device *dev,                                \
3005                             struct device_attribute *attr, char *buf)           \
3006 {                                                                               \
3007         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);                          \
3008         return sprintf(buf, "%d\n", ctrl->field);       \
3009 }                                                                               \
3010 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3011
3012 nvme_show_int_function(cntlid);
3013 nvme_show_int_function(numa_node);
3014
3015 static ssize_t nvme_sysfs_delete(struct device *dev,
3016                                 struct device_attribute *attr, const char *buf,
3017                                 size_t count)
3018 {
3019         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3020
3021         if (device_remove_file_self(dev, attr))
3022                 nvme_delete_ctrl_sync(ctrl);
3023         return count;
3024 }
3025 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3026
3027 static ssize_t nvme_sysfs_show_transport(struct device *dev,
3028                                          struct device_attribute *attr,
3029                                          char *buf)
3030 {
3031         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3032
3033         return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
3034 }
3035 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3036
3037 static ssize_t nvme_sysfs_show_state(struct device *dev,
3038                                      struct device_attribute *attr,
3039                                      char *buf)
3040 {
3041         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3042         static const char *const state_name[] = {
3043                 [NVME_CTRL_NEW]         = "new",
3044                 [NVME_CTRL_LIVE]        = "live",
3045                 [NVME_CTRL_ADMIN_ONLY]  = "only-admin",
3046                 [NVME_CTRL_RESETTING]   = "resetting",
3047                 [NVME_CTRL_CONNECTING]  = "connecting",
3048                 [NVME_CTRL_DELETING]    = "deleting",
3049                 [NVME_CTRL_DEAD]        = "dead",
3050         };
3051
3052         if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3053             state_name[ctrl->state])
3054                 return sprintf(buf, "%s\n", state_name[ctrl->state]);
3055
3056         return sprintf(buf, "unknown state\n");
3057 }
3058
3059 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3060
3061 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3062                                          struct device_attribute *attr,
3063                                          char *buf)
3064 {
3065         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3066
3067         return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
3068 }
3069 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3070
3071 static ssize_t nvme_sysfs_show_address(struct device *dev,
3072                                          struct device_attribute *attr,
3073                                          char *buf)
3074 {
3075         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3076
3077         return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3078 }
3079 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3080
3081 static struct attribute *nvme_dev_attrs[] = {
3082         &dev_attr_reset_controller.attr,
3083         &dev_attr_rescan_controller.attr,
3084         &dev_attr_model.attr,
3085         &dev_attr_serial.attr,
3086         &dev_attr_firmware_rev.attr,
3087         &dev_attr_cntlid.attr,
3088         &dev_attr_delete_controller.attr,
3089         &dev_attr_transport.attr,
3090         &dev_attr_subsysnqn.attr,
3091         &dev_attr_address.attr,
3092         &dev_attr_state.attr,
3093         &dev_attr_numa_node.attr,
3094         NULL
3095 };
3096
3097 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3098                 struct attribute *a, int n)
3099 {
3100         struct device *dev = container_of(kobj, struct device, kobj);
3101         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3102
3103         if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3104                 return 0;
3105         if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3106                 return 0;
3107
3108         return a->mode;
3109 }
3110
3111 static struct attribute_group nvme_dev_attrs_group = {
3112         .attrs          = nvme_dev_attrs,
3113         .is_visible     = nvme_dev_attrs_are_visible,
3114 };
3115
3116 static const struct attribute_group *nvme_dev_attr_groups[] = {
3117         &nvme_dev_attrs_group,
3118         NULL,
3119 };
3120
3121 static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
3122                 unsigned nsid)
3123 {
3124         struct nvme_ns_head *h;
3125
3126         lockdep_assert_held(&subsys->lock);
3127
3128         list_for_each_entry(h, &subsys->nsheads, entry) {
3129                 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
3130                         return h;
3131         }
3132
3133         return NULL;
3134 }
3135
3136 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3137                 struct nvme_ns_head *new)
3138 {
3139         struct nvme_ns_head *h;
3140
3141         lockdep_assert_held(&subsys->lock);
3142
3143         list_for_each_entry(h, &subsys->nsheads, entry) {
3144                 if (nvme_ns_ids_valid(&new->ids) &&
3145                     !list_empty(&h->list) &&
3146                     nvme_ns_ids_equal(&new->ids, &h->ids))
3147                         return -EINVAL;
3148         }
3149
3150         return 0;
3151 }
3152
3153 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3154                 unsigned nsid, struct nvme_id_ns *id)
3155 {
3156         struct nvme_ns_head *head;
3157         size_t size = sizeof(*head);
3158         int ret = -ENOMEM;
3159
3160 #ifdef CONFIG_NVME_MULTIPATH
3161         size += num_possible_nodes() * sizeof(struct nvme_ns *);
3162 #endif
3163
3164         head = kzalloc(size, GFP_KERNEL);
3165         if (!head)
3166                 goto out;
3167         ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3168         if (ret < 0)
3169                 goto out_free_head;
3170         head->instance = ret;
3171         INIT_LIST_HEAD(&head->list);
3172         ret = init_srcu_struct(&head->srcu);
3173         if (ret)
3174                 goto out_ida_remove;
3175         head->subsys = ctrl->subsys;
3176         head->ns_id = nsid;
3177         kref_init(&head->ref);
3178
3179         nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
3180
3181         ret = __nvme_check_ids(ctrl->subsys, head);
3182         if (ret) {
3183                 dev_err(ctrl->device,
3184                         "duplicate IDs for nsid %d\n", nsid);
3185                 goto out_cleanup_srcu;
3186         }
3187
3188         ret = nvme_mpath_alloc_disk(ctrl, head);
3189         if (ret)
3190                 goto out_cleanup_srcu;
3191
3192         list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3193
3194         kref_get(&ctrl->subsys->ref);
3195
3196         return head;
3197 out_cleanup_srcu:
3198         cleanup_srcu_struct(&head->srcu);
3199 out_ida_remove:
3200         ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3201 out_free_head:
3202         kfree(head);
3203 out:
3204         return ERR_PTR(ret);
3205 }
3206
3207 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3208                 struct nvme_id_ns *id)
3209 {
3210         struct nvme_ctrl *ctrl = ns->ctrl;
3211         bool is_shared = id->nmic & (1 << 0);
3212         struct nvme_ns_head *head = NULL;
3213         int ret = 0;
3214
3215         mutex_lock(&ctrl->subsys->lock);
3216         if (is_shared)
3217                 head = __nvme_find_ns_head(ctrl->subsys, nsid);
3218         if (!head) {
3219                 head = nvme_alloc_ns_head(ctrl, nsid, id);
3220                 if (IS_ERR(head)) {
3221                         ret = PTR_ERR(head);
3222                         goto out_unlock;
3223                 }
3224         } else {
3225                 struct nvme_ns_ids ids;
3226
3227                 nvme_report_ns_ids(ctrl, nsid, id, &ids);
3228                 if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3229                         dev_err(ctrl->device,
3230                                 "IDs don't match for shared namespace %d\n",
3231                                         nsid);
3232                         ret = -EINVAL;
3233                         goto out_unlock;
3234                 }
3235         }
3236
3237         list_add_tail(&ns->siblings, &head->list);
3238         ns->head = head;
3239
3240 out_unlock:
3241         mutex_unlock(&ctrl->subsys->lock);
3242         return ret;
3243 }
3244
3245 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3246 {
3247         struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3248         struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3249
3250         return nsa->head->ns_id - nsb->head->ns_id;
3251 }
3252
3253 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3254 {
3255         struct nvme_ns *ns, *ret = NULL;
3256
3257         down_read(&ctrl->namespaces_rwsem);
3258         list_for_each_entry(ns, &ctrl->namespaces, list) {
3259                 if (ns->head->ns_id == nsid) {
3260                         if (!kref_get_unless_zero(&ns->kref))
3261                                 continue;
3262                         ret = ns;
3263                         break;
3264                 }
3265                 if (ns->head->ns_id > nsid)
3266                         break;
3267         }
3268         up_read(&ctrl->namespaces_rwsem);
3269         return ret;
3270 }
3271
3272 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
3273 {
3274         struct streams_directive_params s;
3275         int ret;
3276
3277         if (!ctrl->nr_streams)
3278                 return 0;
3279
3280         ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
3281         if (ret)
3282                 return ret;
3283
3284         ns->sws = le32_to_cpu(s.sws);
3285         ns->sgs = le16_to_cpu(s.sgs);
3286
3287         if (ns->sws) {
3288                 unsigned int bs = 1 << ns->lba_shift;
3289
3290                 blk_queue_io_min(ns->queue, bs * ns->sws);
3291                 if (ns->sgs)
3292                         blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
3293         }
3294
3295         return 0;
3296 }
3297
3298 static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3299 {
3300         struct nvme_ns *ns;
3301         struct gendisk *disk;
3302         struct nvme_id_ns *id;
3303         char disk_name[DISK_NAME_LEN];
3304         int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3305
3306         ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3307         if (!ns)
3308                 return -ENOMEM;
3309
3310         ns->queue = blk_mq_init_queue(ctrl->tagset);
3311         if (IS_ERR(ns->queue)) {
3312                 ret = PTR_ERR(ns->queue);
3313                 goto out_free_ns;
3314         }
3315
3316         if (ctrl->opts && ctrl->opts->data_digest)
3317                 ns->queue->backing_dev_info->capabilities
3318                         |= BDI_CAP_STABLE_WRITES;
3319
3320         blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3321         if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3322                 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3323
3324         ns->queue->queuedata = ns;
3325         ns->ctrl = ctrl;
3326
3327         kref_init(&ns->kref);
3328         ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3329
3330         blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3331         nvme_set_queue_limits(ctrl, ns->queue);
3332
3333         ret = nvme_identify_ns(ctrl, nsid, &id);
3334         if (ret)
3335                 goto out_free_queue;
3336
3337         if (id->ncap == 0) {
3338                 ret = -EINVAL;
3339                 goto out_free_id;
3340         }
3341
3342         ret = nvme_init_ns_head(ns, nsid, id);
3343         if (ret)
3344                 goto out_free_id;
3345         nvme_setup_streams_ns(ctrl, ns);
3346         nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3347
3348         disk = alloc_disk_node(0, node);
3349         if (!disk) {
3350                 ret = -ENOMEM;
3351                 goto out_unlink_ns;
3352         }
3353
3354         disk->fops = &nvme_fops;
3355         disk->private_data = ns;
3356         disk->queue = ns->queue;
3357         disk->flags = flags;
3358         memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3359         ns->disk = disk;
3360
3361         __nvme_revalidate_disk(disk, id);
3362
3363         if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3364                 ret = nvme_nvm_register(ns, disk_name, node);
3365                 if (ret) {
3366                         dev_warn(ctrl->device, "LightNVM init failure\n");
3367                         goto out_put_disk;
3368                 }
3369         }
3370
3371         down_write(&ctrl->namespaces_rwsem);
3372         list_add_tail(&ns->list, &ctrl->namespaces);
3373         up_write(&ctrl->namespaces_rwsem);
3374
3375         nvme_get_ctrl(ctrl);
3376
3377         device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3378
3379         nvme_mpath_add_disk(ns, id);
3380         nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3381         kfree(id);
3382
3383         return 0;
3384  out_put_disk:
3385         put_disk(ns->disk);
3386  out_unlink_ns:
3387         mutex_lock(&ctrl->subsys->lock);
3388         list_del_rcu(&ns->siblings);
3389         mutex_unlock(&ctrl->subsys->lock);
3390         nvme_put_ns_head(ns->head);
3391  out_free_id:
3392         kfree(id);
3393  out_free_queue:
3394         blk_cleanup_queue(ns->queue);
3395  out_free_ns:
3396         kfree(ns);
3397         if (ret > 0)
3398                 ret = blk_status_to_errno(nvme_error_status(ret));
3399         return ret;
3400 }
3401
3402 static void nvme_ns_remove(struct nvme_ns *ns)
3403 {
3404         if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3405                 return;
3406
3407         nvme_fault_inject_fini(&ns->fault_inject);
3408
3409         mutex_lock(&ns->ctrl->subsys->lock);
3410         list_del_rcu(&ns->siblings);
3411         mutex_unlock(&ns->ctrl->subsys->lock);
3412         synchronize_rcu(); /* guarantee not available in head->list */
3413         nvme_mpath_clear_current_path(ns);
3414         synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
3415
3416         if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3417                 del_gendisk(ns->disk);
3418                 blk_cleanup_queue(ns->queue);
3419                 if (blk_get_integrity(ns->disk))
3420                         blk_integrity_unregister(ns->disk);
3421         }
3422
3423         down_write(&ns->ctrl->namespaces_rwsem);
3424         list_del_init(&ns->list);
3425         up_write(&ns->ctrl->namespaces_rwsem);
3426
3427         nvme_mpath_check_last_path(ns);
3428         nvme_put_ns(ns);
3429 }
3430
3431 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3432 {
3433         struct nvme_ns *ns;
3434
3435         ns = nvme_find_get_ns(ctrl, nsid);
3436         if (ns) {
3437                 if (ns->disk && revalidate_disk(ns->disk))
3438                         nvme_ns_remove(ns);
3439                 nvme_put_ns(ns);
3440         } else
3441                 nvme_alloc_ns(ctrl, nsid);
3442 }
3443
3444 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3445                                         unsigned nsid)
3446 {
3447         struct nvme_ns *ns, *next;
3448         LIST_HEAD(rm_list);
3449
3450         down_write(&ctrl->namespaces_rwsem);
3451         list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3452                 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3453                         list_move_tail(&ns->list, &rm_list);
3454         }
3455         up_write(&ctrl->namespaces_rwsem);
3456
3457         list_for_each_entry_safe(ns, next, &rm_list, list)
3458                 nvme_ns_remove(ns);
3459
3460 }
3461
3462 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
3463 {
3464         struct nvme_ns *ns;
3465         __le32 *ns_list;
3466         unsigned i, j, nsid, prev = 0;
3467         unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
3468         int ret = 0;
3469
3470         ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3471         if (!ns_list)
3472                 return -ENOMEM;
3473
3474         for (i = 0; i < num_lists; i++) {
3475                 ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3476                 if (ret)
3477                         goto free;
3478
3479                 for (j = 0; j < min(nn, 1024U); j++) {
3480                         nsid = le32_to_cpu(ns_list[j]);
3481                         if (!nsid)
3482                                 goto out;
3483
3484                         nvme_validate_ns(ctrl, nsid);
3485
3486                         while (++prev < nsid) {
3487                                 ns = nvme_find_get_ns(ctrl, prev);
3488                                 if (ns) {
3489                                         nvme_ns_remove(ns);
3490                                         nvme_put_ns(ns);
3491                                 }
3492                         }
3493                 }
3494                 nn -= j;
3495         }
3496  out:
3497         nvme_remove_invalid_namespaces(ctrl, prev);
3498  free:
3499         kfree(ns_list);
3500         return ret;
3501 }
3502
3503 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
3504 {
3505         unsigned i;
3506
3507         for (i = 1; i <= nn; i++)
3508                 nvme_validate_ns(ctrl, i);
3509
3510         nvme_remove_invalid_namespaces(ctrl, nn);
3511 }
3512
3513 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3514 {
3515         size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3516         __le32 *log;
3517         int error;
3518
3519         log = kzalloc(log_size, GFP_KERNEL);
3520         if (!log)
3521                 return;
3522
3523         /*
3524          * We need to read the log to clear the AEN, but we don't want to rely
3525          * on it for the changed namespace information as userspace could have
3526          * raced with us in reading the log page, which could cause us to miss
3527          * updates.
3528          */
3529         error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3530                         log_size, 0);
3531         if (error)
3532                 dev_warn(ctrl->device,
3533                         "reading changed ns log failed: %d\n", error);
3534
3535         kfree(log);
3536 }
3537
3538 static void nvme_scan_work(struct work_struct *work)
3539 {
3540         struct nvme_ctrl *ctrl =
3541                 container_of(work, struct nvme_ctrl, scan_work);
3542         struct nvme_id_ctrl *id;
3543         unsigned nn;
3544
3545         if (ctrl->state != NVME_CTRL_LIVE)
3546                 return;
3547
3548         WARN_ON_ONCE(!ctrl->tagset);
3549
3550         if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3551                 dev_info(ctrl->device, "rescanning namespaces.\n");
3552                 nvme_clear_changed_ns_log(ctrl);
3553         }
3554
3555         if (nvme_identify_ctrl(ctrl, &id))
3556                 return;
3557
3558         mutex_lock(&ctrl->scan_lock);
3559         nn = le32_to_cpu(id->nn);
3560         if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3561             !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
3562                 if (!nvme_scan_ns_list(ctrl, nn))
3563                         goto out_free_id;
3564         }
3565         nvme_scan_ns_sequential(ctrl, nn);
3566 out_free_id:
3567         mutex_unlock(&ctrl->scan_lock);
3568         kfree(id);
3569         down_write(&ctrl->namespaces_rwsem);
3570         list_sort(NULL, &ctrl->namespaces, ns_cmp);
3571         up_write(&ctrl->namespaces_rwsem);
3572 }
3573
3574 /*
3575  * This function iterates the namespace list unlocked to allow recovery from
3576  * controller failure. It is up to the caller to ensure the namespace list is
3577  * not modified by scan work while this function is executing.
3578  */
3579 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3580 {
3581         struct nvme_ns *ns, *next;
3582         LIST_HEAD(ns_list);
3583
3584         /* prevent racing with ns scanning */
3585         flush_work(&ctrl->scan_work);
3586
3587         /*
3588          * The dead states indicates the controller was not gracefully
3589          * disconnected. In that case, we won't be able to flush any data while
3590          * removing the namespaces' disks; fail all the queues now to avoid
3591          * potentially having to clean up the failed sync later.
3592          */
3593         if (ctrl->state == NVME_CTRL_DEAD)
3594                 nvme_kill_queues(ctrl);
3595
3596         down_write(&ctrl->namespaces_rwsem);
3597         list_splice_init(&ctrl->namespaces, &ns_list);
3598         up_write(&ctrl->namespaces_rwsem);
3599
3600         list_for_each_entry_safe(ns, next, &ns_list, list)
3601                 nvme_ns_remove(ns);
3602 }
3603 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3604
3605 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3606 {
3607         char *envp[2] = { NULL, NULL };
3608         u32 aen_result = ctrl->aen_result;
3609
3610         ctrl->aen_result = 0;
3611         if (!aen_result)
3612                 return;
3613
3614         envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3615         if (!envp[0])
3616                 return;
3617         kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3618         kfree(envp[0]);
3619 }
3620
3621 static void nvme_async_event_work(struct work_struct *work)
3622 {
3623         struct nvme_ctrl *ctrl =
3624                 container_of(work, struct nvme_ctrl, async_event_work);
3625
3626         nvme_aen_uevent(ctrl);
3627         ctrl->ops->submit_async_event(ctrl);
3628 }
3629
3630 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3631 {
3632
3633         u32 csts;
3634
3635         if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3636                 return false;
3637
3638         if (csts == ~0)
3639                 return false;
3640
3641         return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3642 }
3643
3644 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3645 {
3646         struct nvme_fw_slot_info_log *log;
3647
3648         log = kmalloc(sizeof(*log), GFP_KERNEL);
3649         if (!log)
3650                 return;
3651
3652         if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
3653                         sizeof(*log), 0))
3654                 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
3655         kfree(log);
3656 }
3657
3658 static void nvme_fw_act_work(struct work_struct *work)
3659 {
3660         struct nvme_ctrl *ctrl = container_of(work,
3661                                 struct nvme_ctrl, fw_act_work);
3662         unsigned long fw_act_timeout;
3663
3664         if (ctrl->mtfa)
3665                 fw_act_timeout = jiffies +
3666                                 msecs_to_jiffies(ctrl->mtfa * 100);
3667         else
3668                 fw_act_timeout = jiffies +
3669                                 msecs_to_jiffies(admin_timeout * 1000);
3670
3671         nvme_stop_queues(ctrl);
3672         while (nvme_ctrl_pp_status(ctrl)) {
3673                 if (time_after(jiffies, fw_act_timeout)) {
3674                         dev_warn(ctrl->device,
3675                                 "Fw activation timeout, reset controller\n");
3676                         nvme_reset_ctrl(ctrl);
3677                         break;
3678                 }
3679                 msleep(100);
3680         }
3681
3682         if (ctrl->state != NVME_CTRL_LIVE)
3683                 return;
3684
3685         nvme_start_queues(ctrl);
3686         /* read FW slot information to clear the AER */
3687         nvme_get_fw_slot_info(ctrl);
3688 }
3689
3690 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3691 {
3692         u32 aer_notice_type = (result & 0xff00) >> 8;
3693
3694         trace_nvme_async_event(ctrl, aer_notice_type);
3695
3696         switch (aer_notice_type) {
3697         case NVME_AER_NOTICE_NS_CHANGED:
3698                 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
3699                 nvme_queue_scan(ctrl);
3700                 break;
3701         case NVME_AER_NOTICE_FW_ACT_STARTING:
3702                 queue_work(nvme_wq, &ctrl->fw_act_work);
3703                 break;
3704 #ifdef CONFIG_NVME_MULTIPATH
3705         case NVME_AER_NOTICE_ANA:
3706                 if (!ctrl->ana_log_buf)
3707                         break;
3708                 queue_work(nvme_wq, &ctrl->ana_work);
3709                 break;
3710 #endif
3711         default:
3712                 dev_warn(ctrl->device, "async event result %08x\n", result);
3713         }
3714 }
3715
3716 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
3717                 volatile union nvme_result *res)
3718 {
3719         u32 result = le32_to_cpu(res->u32);
3720         u32 aer_type = result & 0x07;
3721
3722         if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
3723                 return;
3724
3725         switch (aer_type) {
3726         case NVME_AER_NOTICE:
3727                 nvme_handle_aen_notice(ctrl, result);
3728                 break;
3729         case NVME_AER_ERROR:
3730         case NVME_AER_SMART:
3731         case NVME_AER_CSS:
3732         case NVME_AER_VS:
3733                 trace_nvme_async_event(ctrl, aer_type);
3734                 ctrl->aen_result = result;
3735                 break;
3736         default:
3737                 break;
3738         }
3739         queue_work(nvme_wq, &ctrl->async_event_work);
3740 }
3741 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
3742
3743 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
3744 {
3745         nvme_mpath_stop(ctrl);
3746         nvme_stop_keep_alive(ctrl);
3747         flush_work(&ctrl->async_event_work);
3748         cancel_work_sync(&ctrl->fw_act_work);
3749 }
3750 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
3751
3752 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
3753 {
3754         if (ctrl->kato)
3755                 nvme_start_keep_alive(ctrl);
3756
3757         if (ctrl->queue_count > 1) {
3758                 nvme_queue_scan(ctrl);
3759                 nvme_enable_aen(ctrl);
3760                 queue_work(nvme_wq, &ctrl->async_event_work);
3761                 nvme_start_queues(ctrl);
3762         }
3763 }
3764 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
3765
3766 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
3767 {
3768         nvme_fault_inject_fini(&ctrl->fault_inject);
3769         dev_pm_qos_hide_latency_tolerance(ctrl->device);
3770         cdev_device_del(&ctrl->cdev, ctrl->device);
3771 }
3772 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
3773
3774 static void nvme_free_ctrl(struct device *dev)
3775 {
3776         struct nvme_ctrl *ctrl =
3777                 container_of(dev, struct nvme_ctrl, ctrl_device);
3778         struct nvme_subsystem *subsys = ctrl->subsys;
3779
3780         ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3781         kfree(ctrl->effects);
3782         nvme_mpath_uninit(ctrl);
3783         __free_page(ctrl->discard_page);
3784
3785         if (subsys) {
3786                 mutex_lock(&nvme_subsystems_lock);
3787                 list_del(&ctrl->subsys_entry);
3788                 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
3789                 mutex_unlock(&nvme_subsystems_lock);
3790         }
3791
3792         ctrl->ops->free_ctrl(ctrl);
3793
3794         if (subsys)
3795                 nvme_put_subsystem(subsys);
3796 }
3797
3798 /*
3799  * Initialize a NVMe controller structures.  This needs to be called during
3800  * earliest initialization so that we have the initialized structured around
3801  * during probing.
3802  */
3803 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3804                 const struct nvme_ctrl_ops *ops, unsigned long quirks)
3805 {
3806         int ret;
3807
3808         ctrl->state = NVME_CTRL_NEW;
3809         spin_lock_init(&ctrl->lock);
3810         mutex_init(&ctrl->scan_lock);
3811         INIT_LIST_HEAD(&ctrl->namespaces);
3812         init_rwsem(&ctrl->namespaces_rwsem);
3813         ctrl->dev = dev;
3814         ctrl->ops = ops;
3815         ctrl->quirks = quirks;
3816         INIT_WORK(&ctrl->scan_work, nvme_scan_work);
3817         INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
3818         INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
3819         INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
3820
3821         INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
3822         memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
3823         ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
3824
3825         BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
3826                         PAGE_SIZE);
3827         ctrl->discard_page = alloc_page(GFP_KERNEL);
3828         if (!ctrl->discard_page) {
3829                 ret = -ENOMEM;
3830                 goto out;
3831         }
3832
3833         ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
3834         if (ret < 0)
3835                 goto out;
3836         ctrl->instance = ret;
3837
3838         device_initialize(&ctrl->ctrl_device);
3839         ctrl->device = &ctrl->ctrl_device;
3840         ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
3841         ctrl->device->class = nvme_class;
3842         ctrl->device->parent = ctrl->dev;
3843         ctrl->device->groups = nvme_dev_attr_groups;
3844         ctrl->device->release = nvme_free_ctrl;
3845         dev_set_drvdata(ctrl->device, ctrl);
3846         ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
3847         if (ret)
3848                 goto out_release_instance;
3849
3850         cdev_init(&ctrl->cdev, &nvme_dev_fops);
3851         ctrl->cdev.owner = ops->module;
3852         ret = cdev_device_add(&ctrl->cdev, ctrl->device);
3853         if (ret)
3854                 goto out_free_name;
3855
3856         /*
3857          * Initialize latency tolerance controls.  The sysfs files won't
3858          * be visible to userspace unless the device actually supports APST.
3859          */
3860         ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
3861         dev_pm_qos_update_user_latency_tolerance(ctrl->device,
3862                 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
3863
3864         nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
3865
3866         return 0;
3867 out_free_name:
3868         kfree_const(ctrl->device->kobj.name);
3869 out_release_instance:
3870         ida_simple_remove(&nvme_instance_ida, ctrl->instance);
3871 out:
3872         if (ctrl->discard_page)
3873                 __free_page(ctrl->discard_page);
3874         return ret;
3875 }
3876 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
3877
3878 /**
3879  * nvme_kill_queues(): Ends all namespace queues
3880  * @ctrl: the dead controller that needs to end
3881  *
3882  * Call this function when the driver determines it is unable to get the
3883  * controller in a state capable of servicing IO.
3884  */
3885 void nvme_kill_queues(struct nvme_ctrl *ctrl)
3886 {
3887         struct nvme_ns *ns;
3888
3889         down_read(&ctrl->namespaces_rwsem);
3890
3891         /* Forcibly unquiesce queues to avoid blocking dispatch */
3892         if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
3893                 blk_mq_unquiesce_queue(ctrl->admin_q);
3894
3895         list_for_each_entry(ns, &ctrl->namespaces, list)
3896                 nvme_set_queue_dying(ns);
3897
3898         up_read(&ctrl->namespaces_rwsem);
3899 }
3900 EXPORT_SYMBOL_GPL(nvme_kill_queues);
3901
3902 void nvme_unfreeze(struct nvme_ctrl *ctrl)
3903 {
3904         struct nvme_ns *ns;
3905
3906         down_read(&ctrl->namespaces_rwsem);
3907         list_for_each_entry(ns, &ctrl->namespaces, list)
3908                 blk_mq_unfreeze_queue(ns->queue);
3909         up_read(&ctrl->namespaces_rwsem);
3910 }
3911 EXPORT_SYMBOL_GPL(nvme_unfreeze);
3912
3913 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
3914 {
3915         struct nvme_ns *ns;
3916
3917         down_read(&ctrl->namespaces_rwsem);
3918         list_for_each_entry(ns, &ctrl->namespaces, list) {
3919                 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
3920                 if (timeout <= 0)
3921                         break;
3922         }
3923         up_read(&ctrl->namespaces_rwsem);
3924 }
3925 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
3926
3927 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
3928 {
3929         struct nvme_ns *ns;
3930
3931         down_read(&ctrl->namespaces_rwsem);
3932         list_for_each_entry(ns, &ctrl->namespaces, list)
3933                 blk_mq_freeze_queue_wait(ns->queue);
3934         up_read(&ctrl->namespaces_rwsem);
3935 }
3936 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
3937
3938 void nvme_start_freeze(struct nvme_ctrl *ctrl)
3939 {
3940         struct nvme_ns *ns;
3941
3942         down_read(&ctrl->namespaces_rwsem);
3943         list_for_each_entry(ns, &ctrl->namespaces, list)
3944                 blk_freeze_queue_start(ns->queue);
3945         up_read(&ctrl->namespaces_rwsem);
3946 }
3947 EXPORT_SYMBOL_GPL(nvme_start_freeze);
3948
3949 void nvme_stop_queues(struct nvme_ctrl *ctrl)
3950 {
3951         struct nvme_ns *ns;
3952
3953         down_read(&ctrl->namespaces_rwsem);
3954         list_for_each_entry(ns, &ctrl->namespaces, list)
3955                 blk_mq_quiesce_queue(ns->queue);
3956         up_read(&ctrl->namespaces_rwsem);
3957 }
3958 EXPORT_SYMBOL_GPL(nvme_stop_queues);
3959
3960 void nvme_start_queues(struct nvme_ctrl *ctrl)
3961 {
3962         struct nvme_ns *ns;
3963
3964         down_read(&ctrl->namespaces_rwsem);
3965         list_for_each_entry(ns, &ctrl->namespaces, list)
3966                 blk_mq_unquiesce_queue(ns->queue);
3967         up_read(&ctrl->namespaces_rwsem);
3968 }
3969 EXPORT_SYMBOL_GPL(nvme_start_queues);
3970
3971
3972 void nvme_sync_queues(struct nvme_ctrl *ctrl)
3973 {
3974         struct nvme_ns *ns;
3975
3976         down_read(&ctrl->namespaces_rwsem);
3977         list_for_each_entry(ns, &ctrl->namespaces, list)
3978                 blk_sync_queue(ns->queue);
3979         up_read(&ctrl->namespaces_rwsem);
3980 }
3981 EXPORT_SYMBOL_GPL(nvme_sync_queues);
3982
3983 /*
3984  * Check we didn't inadvertently grow the command structure sizes:
3985  */
3986 static inline void _nvme_check_size(void)
3987 {
3988         BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
3989         BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
3990         BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
3991         BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
3992         BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
3993         BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
3994         BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
3995         BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
3996         BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
3997         BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
3998         BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
3999         BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4000         BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4001         BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4002         BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4003         BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4004         BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4005 }
4006
4007
4008 static int __init nvme_core_init(void)
4009 {
4010         int result = -ENOMEM;
4011
4012         _nvme_check_size();
4013
4014         nvme_wq = alloc_workqueue("nvme-wq",
4015                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4016         if (!nvme_wq)
4017                 goto out;
4018
4019         nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4020                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4021         if (!nvme_reset_wq)
4022                 goto destroy_wq;
4023
4024         nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4025                         WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4026         if (!nvme_delete_wq)
4027                 goto destroy_reset_wq;
4028
4029         result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
4030         if (result < 0)
4031                 goto destroy_delete_wq;
4032
4033         nvme_class = class_create(THIS_MODULE, "nvme");
4034         if (IS_ERR(nvme_class)) {
4035                 result = PTR_ERR(nvme_class);
4036                 goto unregister_chrdev;
4037         }
4038
4039         nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4040         if (IS_ERR(nvme_subsys_class)) {
4041                 result = PTR_ERR(nvme_subsys_class);
4042                 goto destroy_class;
4043         }
4044         return 0;
4045
4046 destroy_class:
4047         class_destroy(nvme_class);
4048 unregister_chrdev:
4049         unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4050 destroy_delete_wq:
4051         destroy_workqueue(nvme_delete_wq);
4052 destroy_reset_wq:
4053         destroy_workqueue(nvme_reset_wq);
4054 destroy_wq:
4055         destroy_workqueue(nvme_wq);
4056 out:
4057         return result;
4058 }
4059
4060 static void __exit nvme_core_exit(void)
4061 {
4062         ida_destroy(&nvme_subsystems_ida);
4063         class_destroy(nvme_subsys_class);
4064         class_destroy(nvme_class);
4065         unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4066         destroy_workqueue(nvme_delete_wq);
4067         destroy_workqueue(nvme_reset_wq);
4068         destroy_workqueue(nvme_wq);
4069 }
4070
4071 MODULE_LICENSE("GPL");
4072 MODULE_VERSION("1.0");
4073 module_init(nvme_core_init);
4074 module_exit(nvme_core_exit);