]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/nvme/target/fc.c
PM / QoS: Remove global notifiers
[linux.git] / drivers / nvme / target / fc.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT              4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE              2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43         struct nvmefc_tgt_ls_req        *lsreq;
44         struct nvmefc_tgt_fcp_req       *fcpreq;        /* only if RS */
45
46         struct list_head                ls_list;        /* tgtport->ls_list */
47
48         struct nvmet_fc_tgtport         *tgtport;
49         struct nvmet_fc_tgt_assoc       *assoc;
50
51         u8                              *rqstbuf;
52         u8                              *rspbuf;
53         u16                             rqstdatalen;
54         dma_addr_t                      rspdma;
55
56         struct scatterlist              sg[2];
57
58         struct work_struct              work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_KB_PER_XFR         256
62
63 enum nvmet_fcp_datadir {
64         NVMET_FCP_NODATA,
65         NVMET_FCP_WRITE,
66         NVMET_FCP_READ,
67         NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71         struct nvmefc_tgt_fcp_req       *fcpreq;
72
73         struct nvme_fc_cmd_iu           cmdiubuf;
74         struct nvme_fc_ersp_iu          rspiubuf;
75         dma_addr_t                      rspdma;
76         struct scatterlist              *data_sg;
77         struct scatterlist              *next_sg;
78         int                             data_sg_cnt;
79         u32                             next_sg_offset;
80         u32                             total_length;
81         u32                             offset;
82         enum nvmet_fcp_datadir          io_dir;
83         bool                            active;
84         bool                            abort;
85         spinlock_t                      flock;
86
87         struct nvmet_req                req;
88         struct work_struct              work;
89
90         struct nvmet_fc_tgtport         *tgtport;
91         struct nvmet_fc_tgt_queue       *queue;
92
93         struct list_head                fcp_list;       /* tgtport->fcp_list */
94 };
95
96 struct nvmet_fc_tgtport {
97
98         struct nvmet_fc_target_port     fc_target_port;
99
100         struct list_head                tgt_list; /* nvmet_fc_target_list */
101         struct device                   *dev;   /* dev for dma mapping */
102         struct nvmet_fc_target_template *ops;
103
104         struct nvmet_fc_ls_iod          *iod;
105         spinlock_t                      lock;
106         struct list_head                ls_list;
107         struct list_head                ls_busylist;
108         struct list_head                assoc_list;
109         struct ida                      assoc_cnt;
110         struct nvmet_port               *port;
111         struct kref                     ref;
112 };
113
114 struct nvmet_fc_tgt_queue {
115         bool                            ninetypercent;
116         u16                             qid;
117         u16                             sqsize;
118         u16                             ersp_ratio;
119         u16                             sqhd;
120         int                             cpu;
121         atomic_t                        connected;
122         atomic_t                        sqtail;
123         atomic_t                        zrspcnt;
124         atomic_t                        rsn;
125         spinlock_t                      qlock;
126         struct nvmet_port               *port;
127         struct nvmet_cq                 nvme_cq;
128         struct nvmet_sq                 nvme_sq;
129         struct nvmet_fc_tgt_assoc       *assoc;
130         struct nvmet_fc_fcp_iod         *fod;           /* array of fcp_iods */
131         struct list_head                fod_list;
132         struct workqueue_struct         *work_q;
133         struct kref                     ref;
134 } __aligned(sizeof(unsigned long long));
135
136 struct nvmet_fc_tgt_assoc {
137         u64                             association_id;
138         u32                             a_id;
139         struct nvmet_fc_tgtport         *tgtport;
140         struct list_head                a_list;
141         struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES];
142         struct kref                     ref;
143 };
144
145
146 static inline int
147 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
148 {
149         return (iodptr - iodptr->tgtport->iod);
150 }
151
152 static inline int
153 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
154 {
155         return (fodptr - fodptr->queue->fod);
156 }
157
158
159 /*
160  * Association and Connection IDs:
161  *
162  * Association ID will have random number in upper 6 bytes and zero
163  *   in lower 2 bytes
164  *
165  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
166  *
167  * note: Association ID = Connection ID for queue 0
168  */
169 #define BYTES_FOR_QID                   sizeof(u16)
170 #define BYTES_FOR_QID_SHIFT             (BYTES_FOR_QID * 8)
171 #define NVMET_FC_QUEUEID_MASK           ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
172
173 static inline u64
174 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
175 {
176         return (assoc->association_id | qid);
177 }
178
179 static inline u64
180 nvmet_fc_getassociationid(u64 connectionid)
181 {
182         return connectionid & ~NVMET_FC_QUEUEID_MASK;
183 }
184
185 static inline u16
186 nvmet_fc_getqueueid(u64 connectionid)
187 {
188         return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
189 }
190
191 static inline struct nvmet_fc_tgtport *
192 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
193 {
194         return container_of(targetport, struct nvmet_fc_tgtport,
195                                  fc_target_port);
196 }
197
198 static inline struct nvmet_fc_fcp_iod *
199 nvmet_req_to_fod(struct nvmet_req *nvme_req)
200 {
201         return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
202 }
203
204
205 /* *************************** Globals **************************** */
206
207
208 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
209
210 static LIST_HEAD(nvmet_fc_target_list);
211 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
212
213
214 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
215 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
216 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
217 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
218 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
219 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
220 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
221 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
222
223
224 /* *********************** FC-NVME DMA Handling **************************** */
225
226 /*
227  * The fcloop device passes in a NULL device pointer. Real LLD's will
228  * pass in a valid device pointer. If NULL is passed to the dma mapping
229  * routines, depending on the platform, it may or may not succeed, and
230  * may crash.
231  *
232  * As such:
233  * Wrapper all the dma routines and check the dev pointer.
234  *
235  * If simple mappings (return just a dma address, we'll noop them,
236  * returning a dma address of 0.
237  *
238  * On more complex mappings (dma_map_sg), a pseudo routine fills
239  * in the scatter list, setting all dma addresses to 0.
240  */
241
242 static inline dma_addr_t
243 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
244                 enum dma_data_direction dir)
245 {
246         return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
247 }
248
249 static inline int
250 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
251 {
252         return dev ? dma_mapping_error(dev, dma_addr) : 0;
253 }
254
255 static inline void
256 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
257         enum dma_data_direction dir)
258 {
259         if (dev)
260                 dma_unmap_single(dev, addr, size, dir);
261 }
262
263 static inline void
264 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
265                 enum dma_data_direction dir)
266 {
267         if (dev)
268                 dma_sync_single_for_cpu(dev, addr, size, dir);
269 }
270
271 static inline void
272 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
273                 enum dma_data_direction dir)
274 {
275         if (dev)
276                 dma_sync_single_for_device(dev, addr, size, dir);
277 }
278
279 /* pseudo dma_map_sg call */
280 static int
281 fc_map_sg(struct scatterlist *sg, int nents)
282 {
283         struct scatterlist *s;
284         int i;
285
286         WARN_ON(nents == 0 || sg[0].length == 0);
287
288         for_each_sg(sg, s, nents, i) {
289                 s->dma_address = 0L;
290 #ifdef CONFIG_NEED_SG_DMA_LENGTH
291                 s->dma_length = s->length;
292 #endif
293         }
294         return nents;
295 }
296
297 static inline int
298 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
299                 enum dma_data_direction dir)
300 {
301         return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
302 }
303
304 static inline void
305 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
306                 enum dma_data_direction dir)
307 {
308         if (dev)
309                 dma_unmap_sg(dev, sg, nents, dir);
310 }
311
312
313 /* *********************** FC-NVME Port Management ************************ */
314
315
316 static int
317 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
318 {
319         struct nvmet_fc_ls_iod *iod;
320         int i;
321
322         iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
323                         GFP_KERNEL);
324         if (!iod)
325                 return -ENOMEM;
326
327         tgtport->iod = iod;
328
329         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
330                 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
331                 iod->tgtport = tgtport;
332                 list_add_tail(&iod->ls_list, &tgtport->ls_list);
333
334                 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
335                         GFP_KERNEL);
336                 if (!iod->rqstbuf)
337                         goto out_fail;
338
339                 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
340
341                 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
342                                                 NVME_FC_MAX_LS_BUFFER_SIZE,
343                                                 DMA_TO_DEVICE);
344                 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
345                         goto out_fail;
346         }
347
348         return 0;
349
350 out_fail:
351         kfree(iod->rqstbuf);
352         list_del(&iod->ls_list);
353         for (iod--, i--; i >= 0; iod--, i--) {
354                 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
355                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
356                 kfree(iod->rqstbuf);
357                 list_del(&iod->ls_list);
358         }
359
360         kfree(iod);
361
362         return -EFAULT;
363 }
364
365 static void
366 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
367 {
368         struct nvmet_fc_ls_iod *iod = tgtport->iod;
369         int i;
370
371         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
372                 fc_dma_unmap_single(tgtport->dev,
373                                 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
374                                 DMA_TO_DEVICE);
375                 kfree(iod->rqstbuf);
376                 list_del(&iod->ls_list);
377         }
378         kfree(tgtport->iod);
379 }
380
381 static struct nvmet_fc_ls_iod *
382 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
383 {
384         static struct nvmet_fc_ls_iod *iod;
385         unsigned long flags;
386
387         spin_lock_irqsave(&tgtport->lock, flags);
388         iod = list_first_entry_or_null(&tgtport->ls_list,
389                                         struct nvmet_fc_ls_iod, ls_list);
390         if (iod)
391                 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
392         spin_unlock_irqrestore(&tgtport->lock, flags);
393         return iod;
394 }
395
396
397 static void
398 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
399                         struct nvmet_fc_ls_iod *iod)
400 {
401         unsigned long flags;
402
403         spin_lock_irqsave(&tgtport->lock, flags);
404         list_move(&iod->ls_list, &tgtport->ls_list);
405         spin_unlock_irqrestore(&tgtport->lock, flags);
406 }
407
408 static void
409 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
410                                 struct nvmet_fc_tgt_queue *queue)
411 {
412         struct nvmet_fc_fcp_iod *fod = queue->fod;
413         int i;
414
415         for (i = 0; i < queue->sqsize; fod++, i++) {
416                 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
417                 fod->tgtport = tgtport;
418                 fod->queue = queue;
419                 fod->active = false;
420                 list_add_tail(&fod->fcp_list, &queue->fod_list);
421                 spin_lock_init(&fod->flock);
422
423                 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
424                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
425                 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
426                         list_del(&fod->fcp_list);
427                         for (fod--, i--; i >= 0; fod--, i--) {
428                                 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
429                                                 sizeof(fod->rspiubuf),
430                                                 DMA_TO_DEVICE);
431                                 fod->rspdma = 0L;
432                                 list_del(&fod->fcp_list);
433                         }
434
435                         return;
436                 }
437         }
438 }
439
440 static void
441 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
442                                 struct nvmet_fc_tgt_queue *queue)
443 {
444         struct nvmet_fc_fcp_iod *fod = queue->fod;
445         int i;
446
447         for (i = 0; i < queue->sqsize; fod++, i++) {
448                 if (fod->rspdma)
449                         fc_dma_unmap_single(tgtport->dev, fod->rspdma,
450                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
451         }
452 }
453
454 static struct nvmet_fc_fcp_iod *
455 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
456 {
457         static struct nvmet_fc_fcp_iod *fod;
458         unsigned long flags;
459
460         spin_lock_irqsave(&queue->qlock, flags);
461         fod = list_first_entry_or_null(&queue->fod_list,
462                                         struct nvmet_fc_fcp_iod, fcp_list);
463         if (fod) {
464                 list_del(&fod->fcp_list);
465                 fod->active = true;
466                 fod->abort = false;
467                 /*
468                  * no queue reference is taken, as it was taken by the
469                  * queue lookup just prior to the allocation. The iod
470                  * will "inherit" that reference.
471                  */
472         }
473         spin_unlock_irqrestore(&queue->qlock, flags);
474         return fod;
475 }
476
477
478 static void
479 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
480                         struct nvmet_fc_fcp_iod *fod)
481 {
482         unsigned long flags;
483
484         spin_lock_irqsave(&queue->qlock, flags);
485         list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
486         fod->active = false;
487         spin_unlock_irqrestore(&queue->qlock, flags);
488
489         /*
490          * release the reference taken at queue lookup and fod allocation
491          */
492         nvmet_fc_tgt_q_put(queue);
493 }
494
495 static int
496 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
497 {
498         int cpu, idx, cnt;
499
500         if (!(tgtport->ops->target_features &
501                         NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
502             tgtport->ops->max_hw_queues == 1)
503                 return WORK_CPU_UNBOUND;
504
505         /* Simple cpu selection based on qid modulo active cpu count */
506         idx = !qid ? 0 : (qid - 1) % num_active_cpus();
507
508         /* find the n'th active cpu */
509         for (cpu = 0, cnt = 0; ; ) {
510                 if (cpu_active(cpu)) {
511                         if (cnt == idx)
512                                 break;
513                         cnt++;
514                 }
515                 cpu = (cpu + 1) % num_possible_cpus();
516         }
517
518         return cpu;
519 }
520
521 static struct nvmet_fc_tgt_queue *
522 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
523                         u16 qid, u16 sqsize)
524 {
525         struct nvmet_fc_tgt_queue *queue;
526         unsigned long flags;
527         int ret;
528
529         if (qid >= NVMET_NR_QUEUES)
530                 return NULL;
531
532         queue = kzalloc((sizeof(*queue) +
533                                 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
534                                 GFP_KERNEL);
535         if (!queue)
536                 return NULL;
537
538         if (!nvmet_fc_tgt_a_get(assoc))
539                 goto out_free_queue;
540
541         queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
542                                 assoc->tgtport->fc_target_port.port_num,
543                                 assoc->a_id, qid);
544         if (!queue->work_q)
545                 goto out_a_put;
546
547         queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
548         queue->qid = qid;
549         queue->sqsize = sqsize;
550         queue->assoc = assoc;
551         queue->port = assoc->tgtport->port;
552         queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
553         INIT_LIST_HEAD(&queue->fod_list);
554         atomic_set(&queue->connected, 0);
555         atomic_set(&queue->sqtail, 0);
556         atomic_set(&queue->rsn, 1);
557         atomic_set(&queue->zrspcnt, 0);
558         spin_lock_init(&queue->qlock);
559         kref_init(&queue->ref);
560
561         nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
562
563         ret = nvmet_sq_init(&queue->nvme_sq);
564         if (ret)
565                 goto out_fail_iodlist;
566
567         WARN_ON(assoc->queues[qid]);
568         spin_lock_irqsave(&assoc->tgtport->lock, flags);
569         assoc->queues[qid] = queue;
570         spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
571
572         return queue;
573
574 out_fail_iodlist:
575         nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
576         destroy_workqueue(queue->work_q);
577 out_a_put:
578         nvmet_fc_tgt_a_put(assoc);
579 out_free_queue:
580         kfree(queue);
581         return NULL;
582 }
583
584
585 static void
586 nvmet_fc_tgt_queue_free(struct kref *ref)
587 {
588         struct nvmet_fc_tgt_queue *queue =
589                 container_of(ref, struct nvmet_fc_tgt_queue, ref);
590         unsigned long flags;
591
592         spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
593         queue->assoc->queues[queue->qid] = NULL;
594         spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
595
596         nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
597
598         nvmet_fc_tgt_a_put(queue->assoc);
599
600         destroy_workqueue(queue->work_q);
601
602         kfree(queue);
603 }
604
605 static void
606 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
607 {
608         kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
609 }
610
611 static int
612 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
613 {
614         return kref_get_unless_zero(&queue->ref);
615 }
616
617
618 static void
619 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
620                                 struct nvmefc_tgt_fcp_req *fcpreq)
621 {
622         int ret;
623
624         fcpreq->op = NVMET_FCOP_ABORT;
625         fcpreq->offset = 0;
626         fcpreq->timeout = 0;
627         fcpreq->transfer_length = 0;
628         fcpreq->transferred_length = 0;
629         fcpreq->fcp_error = 0;
630         fcpreq->sg_cnt = 0;
631
632         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
633         if (ret)
634                 /* should never reach here !! */
635                 WARN_ON(1);
636 }
637
638
639 static void
640 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
641 {
642         struct nvmet_fc_fcp_iod *fod = queue->fod;
643         unsigned long flags;
644         int i;
645         bool disconnect;
646
647         disconnect = atomic_xchg(&queue->connected, 0);
648
649         spin_lock_irqsave(&queue->qlock, flags);
650         /* about outstanding io's */
651         for (i = 0; i < queue->sqsize; fod++, i++) {
652                 if (fod->active) {
653                         spin_lock(&fod->flock);
654                         fod->abort = true;
655                         spin_unlock(&fod->flock);
656                 }
657         }
658         spin_unlock_irqrestore(&queue->qlock, flags);
659
660         flush_workqueue(queue->work_q);
661
662         if (disconnect)
663                 nvmet_sq_destroy(&queue->nvme_sq);
664
665         nvmet_fc_tgt_q_put(queue);
666 }
667
668 static struct nvmet_fc_tgt_queue *
669 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
670                                 u64 connection_id)
671 {
672         struct nvmet_fc_tgt_assoc *assoc;
673         struct nvmet_fc_tgt_queue *queue;
674         u64 association_id = nvmet_fc_getassociationid(connection_id);
675         u16 qid = nvmet_fc_getqueueid(connection_id);
676         unsigned long flags;
677
678         spin_lock_irqsave(&tgtport->lock, flags);
679         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
680                 if (association_id == assoc->association_id) {
681                         queue = assoc->queues[qid];
682                         if (queue &&
683                             (!atomic_read(&queue->connected) ||
684                              !nvmet_fc_tgt_q_get(queue)))
685                                 queue = NULL;
686                         spin_unlock_irqrestore(&tgtport->lock, flags);
687                         return queue;
688                 }
689         }
690         spin_unlock_irqrestore(&tgtport->lock, flags);
691         return NULL;
692 }
693
694 static struct nvmet_fc_tgt_assoc *
695 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
696 {
697         struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
698         unsigned long flags;
699         u64 ran;
700         int idx;
701         bool needrandom = true;
702
703         assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
704         if (!assoc)
705                 return NULL;
706
707         idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
708         if (idx < 0)
709                 goto out_free_assoc;
710
711         if (!nvmet_fc_tgtport_get(tgtport))
712                 goto out_ida_put;
713
714         assoc->tgtport = tgtport;
715         assoc->a_id = idx;
716         INIT_LIST_HEAD(&assoc->a_list);
717         kref_init(&assoc->ref);
718
719         while (needrandom) {
720                 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
721                 ran = ran << BYTES_FOR_QID_SHIFT;
722
723                 spin_lock_irqsave(&tgtport->lock, flags);
724                 needrandom = false;
725                 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
726                         if (ran == tmpassoc->association_id) {
727                                 needrandom = true;
728                                 break;
729                         }
730                 if (!needrandom) {
731                         assoc->association_id = ran;
732                         list_add_tail(&assoc->a_list, &tgtport->assoc_list);
733                 }
734                 spin_unlock_irqrestore(&tgtport->lock, flags);
735         }
736
737         return assoc;
738
739 out_ida_put:
740         ida_simple_remove(&tgtport->assoc_cnt, idx);
741 out_free_assoc:
742         kfree(assoc);
743         return NULL;
744 }
745
746 static void
747 nvmet_fc_target_assoc_free(struct kref *ref)
748 {
749         struct nvmet_fc_tgt_assoc *assoc =
750                 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
751         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
752         unsigned long flags;
753
754         spin_lock_irqsave(&tgtport->lock, flags);
755         list_del(&assoc->a_list);
756         spin_unlock_irqrestore(&tgtport->lock, flags);
757         ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
758         kfree(assoc);
759         nvmet_fc_tgtport_put(tgtport);
760 }
761
762 static void
763 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
764 {
765         kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
766 }
767
768 static int
769 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
770 {
771         return kref_get_unless_zero(&assoc->ref);
772 }
773
774 static void
775 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
776 {
777         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
778         struct nvmet_fc_tgt_queue *queue;
779         unsigned long flags;
780         int i;
781
782         spin_lock_irqsave(&tgtport->lock, flags);
783         for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
784                 queue = assoc->queues[i];
785                 if (queue) {
786                         if (!nvmet_fc_tgt_q_get(queue))
787                                 continue;
788                         spin_unlock_irqrestore(&tgtport->lock, flags);
789                         nvmet_fc_delete_target_queue(queue);
790                         nvmet_fc_tgt_q_put(queue);
791                         spin_lock_irqsave(&tgtport->lock, flags);
792                 }
793         }
794         spin_unlock_irqrestore(&tgtport->lock, flags);
795
796         nvmet_fc_tgt_a_put(assoc);
797 }
798
799 static struct nvmet_fc_tgt_assoc *
800 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
801                                 u64 association_id)
802 {
803         struct nvmet_fc_tgt_assoc *assoc;
804         struct nvmet_fc_tgt_assoc *ret = NULL;
805         unsigned long flags;
806
807         spin_lock_irqsave(&tgtport->lock, flags);
808         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
809                 if (association_id == assoc->association_id) {
810                         ret = assoc;
811                         nvmet_fc_tgt_a_get(assoc);
812                         break;
813                 }
814         }
815         spin_unlock_irqrestore(&tgtport->lock, flags);
816
817         return ret;
818 }
819
820
821 /**
822  * nvme_fc_register_targetport - transport entry point called by an
823  *                              LLDD to register the existence of a local
824  *                              NVME subystem FC port.
825  * @pinfo:     pointer to information about the port to be registered
826  * @template:  LLDD entrypoints and operational parameters for the port
827  * @dev:       physical hardware device node port corresponds to. Will be
828  *             used for DMA mappings
829  * @portptr:   pointer to a local port pointer. Upon success, the routine
830  *             will allocate a nvme_fc_local_port structure and place its
831  *             address in the local port pointer. Upon failure, local port
832  *             pointer will be set to NULL.
833  *
834  * Returns:
835  * a completion status. Must be 0 upon success; a negative errno
836  * (ex: -ENXIO) upon failure.
837  */
838 int
839 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
840                         struct nvmet_fc_target_template *template,
841                         struct device *dev,
842                         struct nvmet_fc_target_port **portptr)
843 {
844         struct nvmet_fc_tgtport *newrec;
845         unsigned long flags;
846         int ret, idx;
847
848         if (!template->xmt_ls_rsp || !template->fcp_op ||
849             !template->targetport_delete ||
850             !template->max_hw_queues || !template->max_sgl_segments ||
851             !template->max_dif_sgl_segments || !template->dma_boundary) {
852                 ret = -EINVAL;
853                 goto out_regtgt_failed;
854         }
855
856         newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
857                          GFP_KERNEL);
858         if (!newrec) {
859                 ret = -ENOMEM;
860                 goto out_regtgt_failed;
861         }
862
863         idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
864         if (idx < 0) {
865                 ret = -ENOSPC;
866                 goto out_fail_kfree;
867         }
868
869         if (!get_device(dev) && dev) {
870                 ret = -ENODEV;
871                 goto out_ida_put;
872         }
873
874         newrec->fc_target_port.node_name = pinfo->node_name;
875         newrec->fc_target_port.port_name = pinfo->port_name;
876         newrec->fc_target_port.private = &newrec[1];
877         newrec->fc_target_port.port_id = pinfo->port_id;
878         newrec->fc_target_port.port_num = idx;
879         INIT_LIST_HEAD(&newrec->tgt_list);
880         newrec->dev = dev;
881         newrec->ops = template;
882         spin_lock_init(&newrec->lock);
883         INIT_LIST_HEAD(&newrec->ls_list);
884         INIT_LIST_HEAD(&newrec->ls_busylist);
885         INIT_LIST_HEAD(&newrec->assoc_list);
886         kref_init(&newrec->ref);
887         ida_init(&newrec->assoc_cnt);
888
889         ret = nvmet_fc_alloc_ls_iodlist(newrec);
890         if (ret) {
891                 ret = -ENOMEM;
892                 goto out_free_newrec;
893         }
894
895         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
896         list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
897         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
898
899         *portptr = &newrec->fc_target_port;
900         return 0;
901
902 out_free_newrec:
903         put_device(dev);
904 out_ida_put:
905         ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
906 out_fail_kfree:
907         kfree(newrec);
908 out_regtgt_failed:
909         *portptr = NULL;
910         return ret;
911 }
912 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
913
914
915 static void
916 nvmet_fc_free_tgtport(struct kref *ref)
917 {
918         struct nvmet_fc_tgtport *tgtport =
919                 container_of(ref, struct nvmet_fc_tgtport, ref);
920         struct device *dev = tgtport->dev;
921         unsigned long flags;
922
923         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
924         list_del(&tgtport->tgt_list);
925         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
926
927         nvmet_fc_free_ls_iodlist(tgtport);
928
929         /* let the LLDD know we've finished tearing it down */
930         tgtport->ops->targetport_delete(&tgtport->fc_target_port);
931
932         ida_simple_remove(&nvmet_fc_tgtport_cnt,
933                         tgtport->fc_target_port.port_num);
934
935         ida_destroy(&tgtport->assoc_cnt);
936
937         kfree(tgtport);
938
939         put_device(dev);
940 }
941
942 static void
943 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
944 {
945         kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
946 }
947
948 static int
949 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
950 {
951         return kref_get_unless_zero(&tgtport->ref);
952 }
953
954 static void
955 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
956 {
957         struct nvmet_fc_tgt_assoc *assoc, *next;
958         unsigned long flags;
959
960         spin_lock_irqsave(&tgtport->lock, flags);
961         list_for_each_entry_safe(assoc, next,
962                                 &tgtport->assoc_list, a_list) {
963                 if (!nvmet_fc_tgt_a_get(assoc))
964                         continue;
965                 spin_unlock_irqrestore(&tgtport->lock, flags);
966                 nvmet_fc_delete_target_assoc(assoc);
967                 nvmet_fc_tgt_a_put(assoc);
968                 spin_lock_irqsave(&tgtport->lock, flags);
969         }
970         spin_unlock_irqrestore(&tgtport->lock, flags);
971 }
972
973 /*
974  * nvmet layer has called to terminate an association
975  */
976 static void
977 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
978 {
979         struct nvmet_fc_tgtport *tgtport, *next;
980         struct nvmet_fc_tgt_assoc *assoc;
981         struct nvmet_fc_tgt_queue *queue;
982         unsigned long flags;
983         bool found_ctrl = false;
984
985         /* this is a bit ugly, but don't want to make locks layered */
986         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
987         list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
988                         tgt_list) {
989                 if (!nvmet_fc_tgtport_get(tgtport))
990                         continue;
991                 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
992
993                 spin_lock_irqsave(&tgtport->lock, flags);
994                 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
995                         queue = assoc->queues[0];
996                         if (queue && queue->nvme_sq.ctrl == ctrl) {
997                                 if (nvmet_fc_tgt_a_get(assoc))
998                                         found_ctrl = true;
999                                 break;
1000                         }
1001                 }
1002                 spin_unlock_irqrestore(&tgtport->lock, flags);
1003
1004                 nvmet_fc_tgtport_put(tgtport);
1005
1006                 if (found_ctrl) {
1007                         nvmet_fc_delete_target_assoc(assoc);
1008                         nvmet_fc_tgt_a_put(assoc);
1009                         return;
1010                 }
1011
1012                 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1013         }
1014         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1015 }
1016
1017 /**
1018  * nvme_fc_unregister_targetport - transport entry point called by an
1019  *                              LLDD to deregister/remove a previously
1020  *                              registered a local NVME subsystem FC port.
1021  * @tgtport: pointer to the (registered) target port that is to be
1022  *           deregistered.
1023  *
1024  * Returns:
1025  * a completion status. Must be 0 upon success; a negative errno
1026  * (ex: -ENXIO) upon failure.
1027  */
1028 int
1029 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1030 {
1031         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1032
1033         /* terminate any outstanding associations */
1034         __nvmet_fc_free_assocs(tgtport);
1035
1036         nvmet_fc_tgtport_put(tgtport);
1037
1038         return 0;
1039 }
1040 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1041
1042
1043 /* *********************** FC-NVME LS Handling **************************** */
1044
1045
1046 static void
1047 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
1048 {
1049         struct fcnvme_ls_acc_hdr *acc = buf;
1050
1051         acc->w0.ls_cmd = ls_cmd;
1052         acc->desc_list_len = desc_len;
1053         acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1054         acc->rqst.desc_len =
1055                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1056         acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1057 }
1058
1059 static int
1060 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1061                         u8 reason, u8 explanation, u8 vendor)
1062 {
1063         struct fcnvme_ls_rjt *rjt = buf;
1064
1065         nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1066                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1067                         ls_cmd);
1068         rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1069         rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1070         rjt->rjt.reason_code = reason;
1071         rjt->rjt.reason_explanation = explanation;
1072         rjt->rjt.vendor = vendor;
1073
1074         return sizeof(struct fcnvme_ls_rjt);
1075 }
1076
1077 /* Validation Error indexes into the string table below */
1078 enum {
1079         VERR_NO_ERROR           = 0,
1080         VERR_CR_ASSOC_LEN       = 1,
1081         VERR_CR_ASSOC_RQST_LEN  = 2,
1082         VERR_CR_ASSOC_CMD       = 3,
1083         VERR_CR_ASSOC_CMD_LEN   = 4,
1084         VERR_ERSP_RATIO         = 5,
1085         VERR_ASSOC_ALLOC_FAIL   = 6,
1086         VERR_QUEUE_ALLOC_FAIL   = 7,
1087         VERR_CR_CONN_LEN        = 8,
1088         VERR_CR_CONN_RQST_LEN   = 9,
1089         VERR_ASSOC_ID           = 10,
1090         VERR_ASSOC_ID_LEN       = 11,
1091         VERR_NO_ASSOC           = 12,
1092         VERR_CONN_ID            = 13,
1093         VERR_CONN_ID_LEN        = 14,
1094         VERR_NO_CONN            = 15,
1095         VERR_CR_CONN_CMD        = 16,
1096         VERR_CR_CONN_CMD_LEN    = 17,
1097         VERR_DISCONN_LEN        = 18,
1098         VERR_DISCONN_RQST_LEN   = 19,
1099         VERR_DISCONN_CMD        = 20,
1100         VERR_DISCONN_CMD_LEN    = 21,
1101         VERR_DISCONN_SCOPE      = 22,
1102         VERR_RS_LEN             = 23,
1103         VERR_RS_RQST_LEN        = 24,
1104         VERR_RS_CMD             = 25,
1105         VERR_RS_CMD_LEN         = 26,
1106         VERR_RS_RCTL            = 27,
1107         VERR_RS_RO              = 28,
1108 };
1109
1110 static char *validation_errors[] = {
1111         "OK",
1112         "Bad CR_ASSOC Length",
1113         "Bad CR_ASSOC Rqst Length",
1114         "Not CR_ASSOC Cmd",
1115         "Bad CR_ASSOC Cmd Length",
1116         "Bad Ersp Ratio",
1117         "Association Allocation Failed",
1118         "Queue Allocation Failed",
1119         "Bad CR_CONN Length",
1120         "Bad CR_CONN Rqst Length",
1121         "Not Association ID",
1122         "Bad Association ID Length",
1123         "No Association",
1124         "Not Connection ID",
1125         "Bad Connection ID Length",
1126         "No Connection",
1127         "Not CR_CONN Cmd",
1128         "Bad CR_CONN Cmd Length",
1129         "Bad DISCONN Length",
1130         "Bad DISCONN Rqst Length",
1131         "Not DISCONN Cmd",
1132         "Bad DISCONN Cmd Length",
1133         "Bad Disconnect Scope",
1134         "Bad RS Length",
1135         "Bad RS Rqst Length",
1136         "Not RS Cmd",
1137         "Bad RS Cmd Length",
1138         "Bad RS R_CTL",
1139         "Bad RS Relative Offset",
1140 };
1141
1142 static void
1143 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1144                         struct nvmet_fc_ls_iod *iod)
1145 {
1146         struct fcnvme_ls_cr_assoc_rqst *rqst =
1147                                 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1148         struct fcnvme_ls_cr_assoc_acc *acc =
1149                                 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1150         struct nvmet_fc_tgt_queue *queue;
1151         int ret = 0;
1152
1153         memset(acc, 0, sizeof(*acc));
1154
1155         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1156                 ret = VERR_CR_ASSOC_LEN;
1157         else if (rqst->desc_list_len !=
1158                         fcnvme_lsdesc_len(
1159                                 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1160                 ret = VERR_CR_ASSOC_RQST_LEN;
1161         else if (rqst->assoc_cmd.desc_tag !=
1162                         cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1163                 ret = VERR_CR_ASSOC_CMD;
1164         else if (rqst->assoc_cmd.desc_len !=
1165                         fcnvme_lsdesc_len(
1166                                 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1167                 ret = VERR_CR_ASSOC_CMD_LEN;
1168         else if (!rqst->assoc_cmd.ersp_ratio ||
1169                  (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1170                                 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1171                 ret = VERR_ERSP_RATIO;
1172
1173         else {
1174                 /* new association w/ admin queue */
1175                 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1176                 if (!iod->assoc)
1177                         ret = VERR_ASSOC_ALLOC_FAIL;
1178                 else {
1179                         queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1180                                         be16_to_cpu(rqst->assoc_cmd.sqsize));
1181                         if (!queue)
1182                                 ret = VERR_QUEUE_ALLOC_FAIL;
1183                 }
1184         }
1185
1186         if (ret) {
1187                 dev_err(tgtport->dev,
1188                         "Create Association LS failed: %s\n",
1189                         validation_errors[ret]);
1190                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1191                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1192                                 ELS_RJT_LOGIC,
1193                                 ELS_EXPL_NONE, 0);
1194                 return;
1195         }
1196
1197         queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1198         atomic_set(&queue->connected, 1);
1199         queue->sqhd = 0;        /* best place to init value */
1200
1201         /* format a response */
1202
1203         iod->lsreq->rsplen = sizeof(*acc);
1204
1205         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1206                         fcnvme_lsdesc_len(
1207                                 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1208                         FCNVME_LS_CREATE_ASSOCIATION);
1209         acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1210         acc->associd.desc_len =
1211                         fcnvme_lsdesc_len(
1212                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1213         acc->associd.association_id =
1214                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1215         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1216         acc->connectid.desc_len =
1217                         fcnvme_lsdesc_len(
1218                                 sizeof(struct fcnvme_lsdesc_conn_id));
1219         acc->connectid.connection_id = acc->associd.association_id;
1220 }
1221
1222 static void
1223 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1224                         struct nvmet_fc_ls_iod *iod)
1225 {
1226         struct fcnvme_ls_cr_conn_rqst *rqst =
1227                                 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1228         struct fcnvme_ls_cr_conn_acc *acc =
1229                                 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1230         struct nvmet_fc_tgt_queue *queue;
1231         int ret = 0;
1232
1233         memset(acc, 0, sizeof(*acc));
1234
1235         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1236                 ret = VERR_CR_CONN_LEN;
1237         else if (rqst->desc_list_len !=
1238                         fcnvme_lsdesc_len(
1239                                 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1240                 ret = VERR_CR_CONN_RQST_LEN;
1241         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1242                 ret = VERR_ASSOC_ID;
1243         else if (rqst->associd.desc_len !=
1244                         fcnvme_lsdesc_len(
1245                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1246                 ret = VERR_ASSOC_ID_LEN;
1247         else if (rqst->connect_cmd.desc_tag !=
1248                         cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1249                 ret = VERR_CR_CONN_CMD;
1250         else if (rqst->connect_cmd.desc_len !=
1251                         fcnvme_lsdesc_len(
1252                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1253                 ret = VERR_CR_CONN_CMD_LEN;
1254         else if (!rqst->connect_cmd.ersp_ratio ||
1255                  (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1256                                 be16_to_cpu(rqst->connect_cmd.sqsize)))
1257                 ret = VERR_ERSP_RATIO;
1258
1259         else {
1260                 /* new io queue */
1261                 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1262                                 be64_to_cpu(rqst->associd.association_id));
1263                 if (!iod->assoc)
1264                         ret = VERR_NO_ASSOC;
1265                 else {
1266                         queue = nvmet_fc_alloc_target_queue(iod->assoc,
1267                                         be16_to_cpu(rqst->connect_cmd.qid),
1268                                         be16_to_cpu(rqst->connect_cmd.sqsize));
1269                         if (!queue)
1270                                 ret = VERR_QUEUE_ALLOC_FAIL;
1271
1272                         /* release get taken in nvmet_fc_find_target_assoc */
1273                         nvmet_fc_tgt_a_put(iod->assoc);
1274                 }
1275         }
1276
1277         if (ret) {
1278                 dev_err(tgtport->dev,
1279                         "Create Connection LS failed: %s\n",
1280                         validation_errors[ret]);
1281                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1282                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1283                                 (ret == VERR_NO_ASSOC) ?
1284                                                 ELS_RJT_PROT : ELS_RJT_LOGIC,
1285                                 ELS_EXPL_NONE, 0);
1286                 return;
1287         }
1288
1289         queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1290         atomic_set(&queue->connected, 1);
1291         queue->sqhd = 0;        /* best place to init value */
1292
1293         /* format a response */
1294
1295         iod->lsreq->rsplen = sizeof(*acc);
1296
1297         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1298                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1299                         FCNVME_LS_CREATE_CONNECTION);
1300         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1301         acc->connectid.desc_len =
1302                         fcnvme_lsdesc_len(
1303                                 sizeof(struct fcnvme_lsdesc_conn_id));
1304         acc->connectid.connection_id =
1305                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1306                                 be16_to_cpu(rqst->connect_cmd.qid)));
1307 }
1308
1309 static void
1310 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1311                         struct nvmet_fc_ls_iod *iod)
1312 {
1313         struct fcnvme_ls_disconnect_rqst *rqst =
1314                         (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1315         struct fcnvme_ls_disconnect_acc *acc =
1316                         (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1317         struct nvmet_fc_tgt_queue *queue;
1318         struct nvmet_fc_tgt_assoc *assoc;
1319         int ret = 0;
1320         bool del_assoc = false;
1321
1322         memset(acc, 0, sizeof(*acc));
1323
1324         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1325                 ret = VERR_DISCONN_LEN;
1326         else if (rqst->desc_list_len !=
1327                         fcnvme_lsdesc_len(
1328                                 sizeof(struct fcnvme_ls_disconnect_rqst)))
1329                 ret = VERR_DISCONN_RQST_LEN;
1330         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1331                 ret = VERR_ASSOC_ID;
1332         else if (rqst->associd.desc_len !=
1333                         fcnvme_lsdesc_len(
1334                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1335                 ret = VERR_ASSOC_ID_LEN;
1336         else if (rqst->discon_cmd.desc_tag !=
1337                         cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1338                 ret = VERR_DISCONN_CMD;
1339         else if (rqst->discon_cmd.desc_len !=
1340                         fcnvme_lsdesc_len(
1341                                 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1342                 ret = VERR_DISCONN_CMD_LEN;
1343         else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1344                         (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1345                 ret = VERR_DISCONN_SCOPE;
1346         else {
1347                 /* match an active association */
1348                 assoc = nvmet_fc_find_target_assoc(tgtport,
1349                                 be64_to_cpu(rqst->associd.association_id));
1350                 iod->assoc = assoc;
1351                 if (!assoc)
1352                         ret = VERR_NO_ASSOC;
1353         }
1354
1355         if (ret) {
1356                 dev_err(tgtport->dev,
1357                         "Disconnect LS failed: %s\n",
1358                         validation_errors[ret]);
1359                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1360                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1361                                 (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
1362                                 ELS_EXPL_NONE, 0);
1363                 return;
1364         }
1365
1366         /* format a response */
1367
1368         iod->lsreq->rsplen = sizeof(*acc);
1369
1370         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1371                         fcnvme_lsdesc_len(
1372                                 sizeof(struct fcnvme_ls_disconnect_acc)),
1373                         FCNVME_LS_DISCONNECT);
1374
1375
1376         if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
1377                 queue = nvmet_fc_find_target_queue(tgtport,
1378                                         be64_to_cpu(rqst->discon_cmd.id));
1379                 if (queue) {
1380                         int qid = queue->qid;
1381
1382                         nvmet_fc_delete_target_queue(queue);
1383
1384                         /* release the get taken by find_target_queue */
1385                         nvmet_fc_tgt_q_put(queue);
1386
1387                         /* tear association down if io queue terminated */
1388                         if (!qid)
1389                                 del_assoc = true;
1390                 }
1391         }
1392
1393         /* release get taken in nvmet_fc_find_target_assoc */
1394         nvmet_fc_tgt_a_put(iod->assoc);
1395
1396         if (del_assoc)
1397                 nvmet_fc_delete_target_assoc(iod->assoc);
1398 }
1399
1400
1401 /* *********************** NVME Ctrl Routines **************************** */
1402
1403
1404 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1405
1406 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1407
1408 static void
1409 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1410 {
1411         struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1412         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1413
1414         fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1415                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1416         nvmet_fc_free_ls_iod(tgtport, iod);
1417         nvmet_fc_tgtport_put(tgtport);
1418 }
1419
1420 static void
1421 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1422                                 struct nvmet_fc_ls_iod *iod)
1423 {
1424         int ret;
1425
1426         fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1427                                   NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1428
1429         ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1430         if (ret)
1431                 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1432 }
1433
1434 /*
1435  * Actual processing routine for received FC-NVME LS Requests from the LLD
1436  */
1437 static void
1438 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1439                         struct nvmet_fc_ls_iod *iod)
1440 {
1441         struct fcnvme_ls_rqst_w0 *w0 =
1442                         (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1443
1444         iod->lsreq->nvmet_fc_private = iod;
1445         iod->lsreq->rspbuf = iod->rspbuf;
1446         iod->lsreq->rspdma = iod->rspdma;
1447         iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1448         /* Be preventative. handlers will later set to valid length */
1449         iod->lsreq->rsplen = 0;
1450
1451         iod->assoc = NULL;
1452
1453         /*
1454          * handlers:
1455          *   parse request input, execute the request, and format the
1456          *   LS response
1457          */
1458         switch (w0->ls_cmd) {
1459         case FCNVME_LS_CREATE_ASSOCIATION:
1460                 /* Creates Association and initial Admin Queue/Connection */
1461                 nvmet_fc_ls_create_association(tgtport, iod);
1462                 break;
1463         case FCNVME_LS_CREATE_CONNECTION:
1464                 /* Creates an IO Queue/Connection */
1465                 nvmet_fc_ls_create_connection(tgtport, iod);
1466                 break;
1467         case FCNVME_LS_DISCONNECT:
1468                 /* Terminate a Queue/Connection or the Association */
1469                 nvmet_fc_ls_disconnect(tgtport, iod);
1470                 break;
1471         default:
1472                 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1473                                 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1474                                 ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
1475         }
1476
1477         nvmet_fc_xmt_ls_rsp(tgtport, iod);
1478 }
1479
1480 /*
1481  * Actual processing routine for received FC-NVME LS Requests from the LLD
1482  */
1483 static void
1484 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1485 {
1486         struct nvmet_fc_ls_iod *iod =
1487                 container_of(work, struct nvmet_fc_ls_iod, work);
1488         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1489
1490         nvmet_fc_handle_ls_rqst(tgtport, iod);
1491 }
1492
1493
1494 /**
1495  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1496  *                       upon the reception of a NVME LS request.
1497  *
1498  * The nvmet-fc layer will copy payload to an internal structure for
1499  * processing.  As such, upon completion of the routine, the LLDD may
1500  * immediately free/reuse the LS request buffer passed in the call.
1501  *
1502  * If this routine returns error, the LLDD should abort the exchange.
1503  *
1504  * @tgtport:    pointer to the (registered) target port the LS was
1505  *              received on.
1506  * @lsreq:      pointer to a lsreq request structure to be used to reference
1507  *              the exchange corresponding to the LS.
1508  * @lsreqbuf:   pointer to the buffer containing the LS Request
1509  * @lsreqbuf_len: length, in bytes, of the received LS request
1510  */
1511 int
1512 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1513                         struct nvmefc_tgt_ls_req *lsreq,
1514                         void *lsreqbuf, u32 lsreqbuf_len)
1515 {
1516         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1517         struct nvmet_fc_ls_iod *iod;
1518
1519         if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1520                 return -E2BIG;
1521
1522         if (!nvmet_fc_tgtport_get(tgtport))
1523                 return -ESHUTDOWN;
1524
1525         iod = nvmet_fc_alloc_ls_iod(tgtport);
1526         if (!iod) {
1527                 nvmet_fc_tgtport_put(tgtport);
1528                 return -ENOENT;
1529         }
1530
1531         iod->lsreq = lsreq;
1532         iod->fcpreq = NULL;
1533         memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1534         iod->rqstdatalen = lsreqbuf_len;
1535
1536         schedule_work(&iod->work);
1537
1538         return 0;
1539 }
1540 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1541
1542
1543 /*
1544  * **********************
1545  * Start of FCP handling
1546  * **********************
1547  */
1548
1549 static int
1550 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1551 {
1552         struct scatterlist *sg;
1553         struct page *page;
1554         unsigned int nent;
1555         u32 page_len, length;
1556         int i = 0;
1557
1558         length = fod->total_length;
1559         nent = DIV_ROUND_UP(length, PAGE_SIZE);
1560         sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1561         if (!sg)
1562                 goto out;
1563
1564         sg_init_table(sg, nent);
1565
1566         while (length) {
1567                 page_len = min_t(u32, length, PAGE_SIZE);
1568
1569                 page = alloc_page(GFP_KERNEL);
1570                 if (!page)
1571                         goto out_free_pages;
1572
1573                 sg_set_page(&sg[i], page, page_len, 0);
1574                 length -= page_len;
1575                 i++;
1576         }
1577
1578         fod->data_sg = sg;
1579         fod->data_sg_cnt = nent;
1580         fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1581                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1582                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1583                                 /* note: write from initiator perspective */
1584
1585         return 0;
1586
1587 out_free_pages:
1588         while (i > 0) {
1589                 i--;
1590                 __free_page(sg_page(&sg[i]));
1591         }
1592         kfree(sg);
1593         fod->data_sg = NULL;
1594         fod->data_sg_cnt = 0;
1595 out:
1596         return NVME_SC_INTERNAL;
1597 }
1598
1599 static void
1600 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1601 {
1602         struct scatterlist *sg;
1603         int count;
1604
1605         if (!fod->data_sg || !fod->data_sg_cnt)
1606                 return;
1607
1608         fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1609                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1610                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1611         for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1612                 __free_page(sg_page(sg));
1613         kfree(fod->data_sg);
1614 }
1615
1616
1617 static bool
1618 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1619 {
1620         u32 sqtail, used;
1621
1622         /* egad, this is ugly. And sqtail is just a best guess */
1623         sqtail = atomic_read(&q->sqtail) % q->sqsize;
1624
1625         used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1626         return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1627 }
1628
1629 /*
1630  * Prep RSP payload.
1631  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1632  */
1633 static void
1634 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1635                                 struct nvmet_fc_fcp_iod *fod)
1636 {
1637         struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1638         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1639         struct nvme_completion *cqe = &ersp->cqe;
1640         u32 *cqewd = (u32 *)cqe;
1641         bool send_ersp = false;
1642         u32 rsn, rspcnt, xfr_length;
1643
1644         if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1645                 xfr_length = fod->total_length;
1646         else
1647                 xfr_length = fod->offset;
1648
1649         /*
1650          * check to see if we can send a 0's rsp.
1651          *   Note: to send a 0's response, the NVME-FC host transport will
1652          *   recreate the CQE. The host transport knows: sq id, SQHD (last
1653          *   seen in an ersp), and command_id. Thus it will create a
1654          *   zero-filled CQE with those known fields filled in. Transport
1655          *   must send an ersp for any condition where the cqe won't match
1656          *   this.
1657          *
1658          * Here are the FC-NVME mandated cases where we must send an ersp:
1659          *  every N responses, where N=ersp_ratio
1660          *  force fabric commands to send ersp's (not in FC-NVME but good
1661          *    practice)
1662          *  normal cmds: any time status is non-zero, or status is zero
1663          *     but words 0 or 1 are non-zero.
1664          *  the SQ is 90% or more full
1665          *  the cmd is a fused command
1666          *  transferred data length not equal to cmd iu length
1667          */
1668         rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1669         if (!(rspcnt % fod->queue->ersp_ratio) ||
1670             sqe->opcode == nvme_fabrics_command ||
1671             xfr_length != fod->total_length ||
1672             (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1673             (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1674             queue_90percent_full(fod->queue, cqe->sq_head))
1675                 send_ersp = true;
1676
1677         /* re-set the fields */
1678         fod->fcpreq->rspaddr = ersp;
1679         fod->fcpreq->rspdma = fod->rspdma;
1680
1681         if (!send_ersp) {
1682                 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1683                 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1684         } else {
1685                 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1686                 rsn = atomic_inc_return(&fod->queue->rsn);
1687                 ersp->rsn = cpu_to_be32(rsn);
1688                 ersp->xfrd_len = cpu_to_be32(xfr_length);
1689                 fod->fcpreq->rsplen = sizeof(*ersp);
1690         }
1691
1692         fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1693                                   sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1694 }
1695
1696 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1697
1698 static void
1699 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1700                                 struct nvmet_fc_fcp_iod *fod)
1701 {
1702         int ret;
1703
1704         fod->fcpreq->op = NVMET_FCOP_RSP;
1705         fod->fcpreq->timeout = 0;
1706
1707         nvmet_fc_prep_fcp_rsp(tgtport, fod);
1708
1709         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1710         if (ret)
1711                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1712 }
1713
1714 static void
1715 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1716                                 struct nvmet_fc_fcp_iod *fod, u8 op)
1717 {
1718         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1719         struct scatterlist *sg, *datasg;
1720         u32 tlen, sg_off;
1721         int ret;
1722
1723         fcpreq->op = op;
1724         fcpreq->offset = fod->offset;
1725         fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1726         tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1727                         (fod->total_length - fod->offset));
1728         tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1729         tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1730                                         * PAGE_SIZE);
1731         fcpreq->transfer_length = tlen;
1732         fcpreq->transferred_length = 0;
1733         fcpreq->fcp_error = 0;
1734         fcpreq->rsplen = 0;
1735
1736         fcpreq->sg_cnt = 0;
1737
1738         datasg = fod->next_sg;
1739         sg_off = fod->next_sg_offset;
1740
1741         for (sg = fcpreq->sg ; tlen; sg++) {
1742                 *sg = *datasg;
1743                 if (sg_off) {
1744                         sg->offset += sg_off;
1745                         sg->length -= sg_off;
1746                         sg->dma_address += sg_off;
1747                         sg_off = 0;
1748                 }
1749                 if (tlen < sg->length) {
1750                         sg->length = tlen;
1751                         fod->next_sg = datasg;
1752                         fod->next_sg_offset += tlen;
1753                 } else if (tlen == sg->length) {
1754                         fod->next_sg_offset = 0;
1755                         fod->next_sg = sg_next(datasg);
1756                 } else {
1757                         fod->next_sg_offset = 0;
1758                         datasg = sg_next(datasg);
1759                 }
1760                 tlen -= sg->length;
1761                 fcpreq->sg_cnt++;
1762         }
1763
1764         /*
1765          * If the last READDATA request: check if LLDD supports
1766          * combined xfr with response.
1767          */
1768         if ((op == NVMET_FCOP_READDATA) &&
1769             ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1770             (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1771                 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1772                 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1773         }
1774
1775         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1776         if (ret) {
1777                 /*
1778                  * should be ok to set w/o lock as its in the thread of
1779                  * execution (not an async timer routine) and doesn't
1780                  * contend with any clearing action
1781                  */
1782                 fod->abort = true;
1783
1784                 if (op == NVMET_FCOP_WRITEDATA)
1785                         nvmet_req_complete(&fod->req,
1786                                         NVME_SC_FC_TRANSPORT_ERROR);
1787                 else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1788                         fcpreq->fcp_error = ret;
1789                         fcpreq->transferred_length = 0;
1790                         nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1791                 }
1792         }
1793 }
1794
1795 static void
1796 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1797 {
1798         struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1799         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1800         unsigned long flags;
1801         bool abort;
1802
1803         spin_lock_irqsave(&fod->flock, flags);
1804         abort = fod->abort;
1805         spin_unlock_irqrestore(&fod->flock, flags);
1806
1807         /* if in the middle of an io and we need to tear down */
1808         if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
1809                 /* data no longer needed */
1810                 nvmet_fc_free_tgt_pgs(fod);
1811
1812                 if (fcpreq->fcp_error || abort)
1813                         nvmet_req_complete(&fod->req, fcpreq->fcp_error);
1814
1815                 return;
1816         }
1817
1818         switch (fcpreq->op) {
1819
1820         case NVMET_FCOP_WRITEDATA:
1821                 if (abort || fcpreq->fcp_error ||
1822                     fcpreq->transferred_length != fcpreq->transfer_length) {
1823                         nvmet_req_complete(&fod->req,
1824                                         NVME_SC_FC_TRANSPORT_ERROR);
1825                         return;
1826                 }
1827
1828                 fod->offset += fcpreq->transferred_length;
1829                 if (fod->offset != fod->total_length) {
1830                         /* transfer the next chunk */
1831                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1832                                                 NVMET_FCOP_WRITEDATA);
1833                         return;
1834                 }
1835
1836                 /* data transfer complete, resume with nvmet layer */
1837
1838                 fod->req.execute(&fod->req);
1839
1840                 break;
1841
1842         case NVMET_FCOP_READDATA:
1843         case NVMET_FCOP_READDATA_RSP:
1844                 if (abort || fcpreq->fcp_error ||
1845                     fcpreq->transferred_length != fcpreq->transfer_length) {
1846                         /* data no longer needed */
1847                         nvmet_fc_free_tgt_pgs(fod);
1848
1849                         nvmet_fc_abort_op(tgtport, fod->fcpreq);
1850                         return;
1851                 }
1852
1853                 /* success */
1854
1855                 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1856                         /* data no longer needed */
1857                         nvmet_fc_free_tgt_pgs(fod);
1858                         fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1859                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1860                         nvmet_fc_free_fcp_iod(fod->queue, fod);
1861                         return;
1862                 }
1863
1864                 fod->offset += fcpreq->transferred_length;
1865                 if (fod->offset != fod->total_length) {
1866                         /* transfer the next chunk */
1867                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1868                                                 NVMET_FCOP_READDATA);
1869                         return;
1870                 }
1871
1872                 /* data transfer complete, send response */
1873
1874                 /* data no longer needed */
1875                 nvmet_fc_free_tgt_pgs(fod);
1876
1877                 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1878
1879                 break;
1880
1881         case NVMET_FCOP_RSP:
1882         case NVMET_FCOP_ABORT:
1883                 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1884                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1885                 nvmet_fc_free_fcp_iod(fod->queue, fod);
1886                 break;
1887
1888         default:
1889                 nvmet_fc_free_tgt_pgs(fod);
1890                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1891                 break;
1892         }
1893 }
1894
1895 /*
1896  * actual completion handler after execution by the nvmet layer
1897  */
1898 static void
1899 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1900                         struct nvmet_fc_fcp_iod *fod, int status)
1901 {
1902         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1903         struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1904         unsigned long flags;
1905         bool abort;
1906
1907         spin_lock_irqsave(&fod->flock, flags);
1908         abort = fod->abort;
1909         spin_unlock_irqrestore(&fod->flock, flags);
1910
1911         /* if we have a CQE, snoop the last sq_head value */
1912         if (!status)
1913                 fod->queue->sqhd = cqe->sq_head;
1914
1915         if (abort) {
1916                 /* data no longer needed */
1917                 nvmet_fc_free_tgt_pgs(fod);
1918
1919                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1920                 return;
1921         }
1922
1923         /* if an error handling the cmd post initial parsing */
1924         if (status) {
1925                 /* fudge up a failed CQE status for our transport error */
1926                 memset(cqe, 0, sizeof(*cqe));
1927                 cqe->sq_head = fod->queue->sqhd;        /* echo last cqe sqhd */
1928                 cqe->sq_id = cpu_to_le16(fod->queue->qid);
1929                 cqe->command_id = sqe->command_id;
1930                 cqe->status = cpu_to_le16(status);
1931         } else {
1932
1933                 /*
1934                  * try to push the data even if the SQE status is non-zero.
1935                  * There may be a status where data still was intended to
1936                  * be moved
1937                  */
1938                 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
1939                         /* push the data over before sending rsp */
1940                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1941                                                 NVMET_FCOP_READDATA);
1942                         return;
1943                 }
1944
1945                 /* writes & no data - fall thru */
1946         }
1947
1948         /* data no longer needed */
1949         nvmet_fc_free_tgt_pgs(fod);
1950
1951         nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1952 }
1953
1954
1955 static void
1956 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
1957 {
1958         struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
1959         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1960
1961         __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
1962 }
1963
1964
1965 /*
1966  * Actual processing routine for received FC-NVME LS Requests from the LLD
1967  */
1968 void
1969 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
1970                         struct nvmet_fc_fcp_iod *fod)
1971 {
1972         struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
1973         int ret;
1974
1975         /*
1976          * Fused commands are currently not supported in the linux
1977          * implementation.
1978          *
1979          * As such, the implementation of the FC transport does not
1980          * look at the fused commands and order delivery to the upper
1981          * layer until we have both based on csn.
1982          */
1983
1984         fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
1985
1986         fod->total_length = be32_to_cpu(cmdiu->data_len);
1987         if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
1988                 fod->io_dir = NVMET_FCP_WRITE;
1989                 if (!nvme_is_write(&cmdiu->sqe))
1990                         goto transport_error;
1991         } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
1992                 fod->io_dir = NVMET_FCP_READ;
1993                 if (nvme_is_write(&cmdiu->sqe))
1994                         goto transport_error;
1995         } else {
1996                 fod->io_dir = NVMET_FCP_NODATA;
1997                 if (fod->total_length)
1998                         goto transport_error;
1999         }
2000
2001         fod->req.cmd = &fod->cmdiubuf.sqe;
2002         fod->req.rsp = &fod->rspiubuf.cqe;
2003         fod->req.port = fod->queue->port;
2004
2005         /* ensure nvmet handlers will set cmd handler callback */
2006         fod->req.execute = NULL;
2007
2008         /* clear any response payload */
2009         memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2010
2011         ret = nvmet_req_init(&fod->req,
2012                                 &fod->queue->nvme_cq,
2013                                 &fod->queue->nvme_sq,
2014                                 &nvmet_fc_tgt_fcp_ops);
2015         if (!ret) {     /* bad SQE content */
2016                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2017                 return;
2018         }
2019
2020         /* keep a running counter of tail position */
2021         atomic_inc(&fod->queue->sqtail);
2022
2023         fod->data_sg = NULL;
2024         fod->data_sg_cnt = 0;
2025         if (fod->total_length) {
2026                 ret = nvmet_fc_alloc_tgt_pgs(fod);
2027                 if (ret) {
2028                         nvmet_req_complete(&fod->req, ret);
2029                         return;
2030                 }
2031         }
2032         fod->req.sg = fod->data_sg;
2033         fod->req.sg_cnt = fod->data_sg_cnt;
2034         fod->offset = 0;
2035         fod->next_sg = fod->data_sg;
2036         fod->next_sg_offset = 0;
2037
2038         if (fod->io_dir == NVMET_FCP_WRITE) {
2039                 /* pull the data over before invoking nvmet layer */
2040                 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2041                 return;
2042         }
2043
2044         /*
2045          * Reads or no data:
2046          *
2047          * can invoke the nvmet_layer now. If read data, cmd completion will
2048          * push the data
2049          */
2050
2051         fod->req.execute(&fod->req);
2052
2053         return;
2054
2055 transport_error:
2056         nvmet_fc_abort_op(tgtport, fod->fcpreq);
2057 }
2058
2059 /*
2060  * Actual processing routine for received FC-NVME LS Requests from the LLD
2061  */
2062 static void
2063 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2064 {
2065         struct nvmet_fc_fcp_iod *fod =
2066                 container_of(work, struct nvmet_fc_fcp_iod, work);
2067         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2068
2069         nvmet_fc_handle_fcp_rqst(tgtport, fod);
2070 }
2071
2072 /**
2073  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2074  *                       upon the reception of a NVME FCP CMD IU.
2075  *
2076  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2077  * layer for processing.
2078  *
2079  * The nvmet-fc layer will copy cmd payload to an internal structure for
2080  * processing.  As such, upon completion of the routine, the LLDD may
2081  * immediately free/reuse the CMD IU buffer passed in the call.
2082  *
2083  * If this routine returns error, the lldd should abort the exchange.
2084  *
2085  * @target_port: pointer to the (registered) target port the FCP CMD IU
2086  *              was receive on.
2087  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2088  *              the exchange corresponding to the FCP Exchange.
2089  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2090  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2091  */
2092 int
2093 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2094                         struct nvmefc_tgt_fcp_req *fcpreq,
2095                         void *cmdiubuf, u32 cmdiubuf_len)
2096 {
2097         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2098         struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2099         struct nvmet_fc_tgt_queue *queue;
2100         struct nvmet_fc_fcp_iod *fod;
2101
2102         /* validate iu, so the connection id can be used to find the queue */
2103         if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2104                         (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2105                         (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2106                         (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2107                 return -EIO;
2108
2109
2110         queue = nvmet_fc_find_target_queue(tgtport,
2111                                 be64_to_cpu(cmdiu->connection_id));
2112         if (!queue)
2113                 return -ENOTCONN;
2114
2115         /*
2116          * note: reference taken by find_target_queue
2117          * After successful fod allocation, the fod will inherit the
2118          * ownership of that reference and will remove the reference
2119          * when the fod is freed.
2120          */
2121
2122         fod = nvmet_fc_alloc_fcp_iod(queue);
2123         if (!fod) {
2124                 /* release the queue lookup reference */
2125                 nvmet_fc_tgt_q_put(queue);
2126                 return -ENOENT;
2127         }
2128
2129         fcpreq->nvmet_fc_private = fod;
2130         fod->fcpreq = fcpreq;
2131         /*
2132          * put all admin cmds on hw queue id 0. All io commands go to
2133          * the respective hw queue based on a modulo basis
2134          */
2135         fcpreq->hwqid = queue->qid ?
2136                         ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2137         memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2138
2139         queue_work_on(queue->cpu, queue->work_q, &fod->work);
2140
2141         return 0;
2142 }
2143 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2144
2145 enum {
2146         FCT_TRADDR_ERR          = 0,
2147         FCT_TRADDR_WWNN         = 1 << 0,
2148         FCT_TRADDR_WWPN         = 1 << 1,
2149 };
2150
2151 struct nvmet_fc_traddr {
2152         u64     nn;
2153         u64     pn;
2154 };
2155
2156 static const match_table_t traddr_opt_tokens = {
2157         { FCT_TRADDR_WWNN,      "nn-%s"         },
2158         { FCT_TRADDR_WWPN,      "pn-%s"         },
2159         { FCT_TRADDR_ERR,       NULL            }
2160 };
2161
2162 static int
2163 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2164 {
2165         substring_t args[MAX_OPT_ARGS];
2166         char *options, *o, *p;
2167         int token, ret = 0;
2168         u64 token64;
2169
2170         options = o = kstrdup(buf, GFP_KERNEL);
2171         if (!options)
2172                 return -ENOMEM;
2173
2174         while ((p = strsep(&o, ",\n")) != NULL) {
2175                 if (!*p)
2176                         continue;
2177
2178                 token = match_token(p, traddr_opt_tokens, args);
2179                 switch (token) {
2180                 case FCT_TRADDR_WWNN:
2181                         if (match_u64(args, &token64)) {
2182                                 ret = -EINVAL;
2183                                 goto out;
2184                         }
2185                         traddr->nn = token64;
2186                         break;
2187                 case FCT_TRADDR_WWPN:
2188                         if (match_u64(args, &token64)) {
2189                                 ret = -EINVAL;
2190                                 goto out;
2191                         }
2192                         traddr->pn = token64;
2193                         break;
2194                 default:
2195                         pr_warn("unknown traddr token or missing value '%s'\n",
2196                                         p);
2197                         ret = -EINVAL;
2198                         goto out;
2199                 }
2200         }
2201
2202 out:
2203         kfree(options);
2204         return ret;
2205 }
2206
2207 static int
2208 nvmet_fc_add_port(struct nvmet_port *port)
2209 {
2210         struct nvmet_fc_tgtport *tgtport;
2211         struct nvmet_fc_traddr traddr = { 0L, 0L };
2212         unsigned long flags;
2213         int ret;
2214
2215         /* validate the address info */
2216         if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2217             (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2218                 return -EINVAL;
2219
2220         /* map the traddr address info to a target port */
2221
2222         ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2223         if (ret)
2224                 return ret;
2225
2226         ret = -ENXIO;
2227         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2228         list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2229                 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2230                     (tgtport->fc_target_port.port_name == traddr.pn)) {
2231                         /* a FC port can only be 1 nvmet port id */
2232                         if (!tgtport->port) {
2233                                 tgtport->port = port;
2234                                 port->priv = tgtport;
2235                                 ret = 0;
2236                         } else
2237                                 ret = -EALREADY;
2238                         break;
2239                 }
2240         }
2241         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2242         return ret;
2243 }
2244
2245 static void
2246 nvmet_fc_remove_port(struct nvmet_port *port)
2247 {
2248         struct nvmet_fc_tgtport *tgtport = port->priv;
2249         unsigned long flags;
2250
2251         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2252         if (tgtport->port == port) {
2253                 nvmet_fc_tgtport_put(tgtport);
2254                 tgtport->port = NULL;
2255         }
2256         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2257 }
2258
2259 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2260         .owner                  = THIS_MODULE,
2261         .type                   = NVMF_TRTYPE_FC,
2262         .msdbd                  = 1,
2263         .add_port               = nvmet_fc_add_port,
2264         .remove_port            = nvmet_fc_remove_port,
2265         .queue_response         = nvmet_fc_fcp_nvme_cmd_done,
2266         .delete_ctrl            = nvmet_fc_delete_ctrl,
2267 };
2268
2269 static int __init nvmet_fc_init_module(void)
2270 {
2271         return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2272 }
2273
2274 static void __exit nvmet_fc_exit_module(void)
2275 {
2276         /* sanity check - all lports should be removed */
2277         if (!list_empty(&nvmet_fc_target_list))
2278                 pr_warn("%s: targetport list not empty\n", __func__);
2279
2280         nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2281
2282         ida_destroy(&nvmet_fc_tgtport_cnt);
2283 }
2284
2285 module_init(nvmet_fc_init_module);
2286 module_exit(nvmet_fc_exit_module);
2287
2288 MODULE_LICENSE("GPL v2");