]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/bfa/bfa_fcpim.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 292
[linux.git] / drivers / scsi / bfa / bfa_fcpim.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4  * Copyright (c) 2014- QLogic Corporation.
5  * All rights reserved
6  * www.qlogic.com
7  *
8  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9  */
10
11 #include "bfad_drv.h"
12 #include "bfa_modules.h"
13
14 BFA_TRC_FILE(HAL, FCPIM);
15
16 /*
17  *  BFA ITNIM Related definitions
18  */
19 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
20
21 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
22         (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
23
24 #define bfa_fcpim_additn(__itnim)                                       \
25         list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
26 #define bfa_fcpim_delitn(__itnim)       do {                            \
27         WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
28         bfa_itnim_update_del_itn_stats(__itnim);      \
29         list_del(&(__itnim)->qe);      \
30         WARN_ON(!list_empty(&(__itnim)->io_q));                         \
31         WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
32         WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
33 } while (0)
34
35 #define bfa_itnim_online_cb(__itnim) do {                               \
36         if ((__itnim)->bfa->fcs)                                        \
37                 bfa_cb_itnim_online((__itnim)->ditn);      \
38         else {                                                          \
39                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
40                 __bfa_cb_itnim_online, (__itnim));      \
41         }                                                               \
42 } while (0)
43
44 #define bfa_itnim_offline_cb(__itnim) do {                              \
45         if ((__itnim)->bfa->fcs)                                        \
46                 bfa_cb_itnim_offline((__itnim)->ditn);      \
47         else {                                                          \
48                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
49                 __bfa_cb_itnim_offline, (__itnim));      \
50         }                                                               \
51 } while (0)
52
53 #define bfa_itnim_sler_cb(__itnim) do {                                 \
54         if ((__itnim)->bfa->fcs)                                        \
55                 bfa_cb_itnim_sler((__itnim)->ditn);      \
56         else {                                                          \
57                 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
58                 __bfa_cb_itnim_sler, (__itnim));      \
59         }                                                               \
60 } while (0)
61
62 enum bfa_ioim_lm_ua_status {
63         BFA_IOIM_LM_UA_RESET = 0,
64         BFA_IOIM_LM_UA_SET = 1,
65 };
66
67 /*
68  *  itnim state machine event
69  */
70 enum bfa_itnim_event {
71         BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
72         BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
73         BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
74         BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
75         BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
76         BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
77         BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
78         BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
79         BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
80 };
81
82 /*
83  *  BFA IOIM related definitions
84  */
85 #define bfa_ioim_move_to_comp_q(__ioim) do {                            \
86         list_del(&(__ioim)->qe);                                        \
87         list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
88 } while (0)
89
90
91 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
92         if ((__fcpim)->profile_comp)                                    \
93                 (__fcpim)->profile_comp(__ioim);                        \
94 } while (0)
95
96 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
97         if ((__fcpim)->profile_start)                                   \
98                 (__fcpim)->profile_start(__ioim);                       \
99 } while (0)
100
101 /*
102  * IO state machine events
103  */
104 enum bfa_ioim_event {
105         BFA_IOIM_SM_START       = 1,    /*  io start request from host */
106         BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
107         BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
108         BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
109         BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
110         BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
111         BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
112         BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
113         BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
114         BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
115         BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
116         BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
117         BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
118         BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
119         BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
120         BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
121         BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
122         BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
123 };
124
125
126 /*
127  *  BFA TSKIM related definitions
128  */
129
130 /*
131  * task management completion handling
132  */
133 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
134         bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
135         bfa_tskim_notify_comp(__tskim);      \
136 } while (0)
137
138 #define bfa_tskim_notify_comp(__tskim) do {                             \
139         if ((__tskim)->notify)                                          \
140                 bfa_itnim_tskdone((__tskim)->itnim);      \
141 } while (0)
142
143
144 enum bfa_tskim_event {
145         BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
146         BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
147         BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
148         BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
149         BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
150         BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
151         BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
152         BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
153         BFA_TSKIM_SM_UTAG       = 10,   /*  TM completion unknown tag  */
154 };
155
156 /*
157  * forward declaration for BFA ITNIM functions
158  */
159 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
160 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
161 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
162 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
163 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
164 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
165 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
166 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
167 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
168 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
169 static void     bfa_itnim_iotov(void *itnim_arg);
170 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
171 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
172 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
173
174 /*
175  * forward declaration of ITNIM state machine
176  */
177 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
178                                         enum bfa_itnim_event event);
179 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
180                                         enum bfa_itnim_event event);
181 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
182                                         enum bfa_itnim_event event);
183 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
184                                         enum bfa_itnim_event event);
185 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
186                                         enum bfa_itnim_event event);
187 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
188                                         enum bfa_itnim_event event);
189 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
190                                         enum bfa_itnim_event event);
191 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
192                                         enum bfa_itnim_event event);
193 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
194                                         enum bfa_itnim_event event);
195 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
196                                         enum bfa_itnim_event event);
197 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
198                                         enum bfa_itnim_event event);
199 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
200                                         enum bfa_itnim_event event);
201 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
202                                         enum bfa_itnim_event event);
203 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
204                                         enum bfa_itnim_event event);
205 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
206                                         enum bfa_itnim_event event);
207
208 /*
209  * forward declaration for BFA IOIM functions
210  */
211 static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
212 static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
213 static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
214 static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
215 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
216 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
217 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
218 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
219 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
220 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
221
222 /*
223  * forward declaration of BFA IO state machine
224  */
225 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
226                                         enum bfa_ioim_event event);
227 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
228                                         enum bfa_ioim_event event);
229 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
230                                         enum bfa_ioim_event event);
231 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
232                                         enum bfa_ioim_event event);
233 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
234                                         enum bfa_ioim_event event);
235 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
236                                         enum bfa_ioim_event event);
237 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
238                                         enum bfa_ioim_event event);
239 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
240                                         enum bfa_ioim_event event);
241 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
242                                         enum bfa_ioim_event event);
243 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
244                                         enum bfa_ioim_event event);
245 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
246                                         enum bfa_ioim_event event);
247 static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
248                                         enum bfa_ioim_event event);
249 /*
250  * forward declaration for BFA TSKIM functions
251  */
252 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
253 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
254 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
255                                         struct scsi_lun lun);
256 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
257 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
258 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
259 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
260 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
261 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
262
263 /*
264  * forward declaration of BFA TSKIM state machine
265  */
266 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
267                                         enum bfa_tskim_event event);
268 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
269                                         enum bfa_tskim_event event);
270 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
271                                         enum bfa_tskim_event event);
272 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
273                                         enum bfa_tskim_event event);
274 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
275                                         enum bfa_tskim_event event);
276 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
277                                         enum bfa_tskim_event event);
278 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
279                                         enum bfa_tskim_event event);
280 /*
281  *  BFA FCP Initiator Mode module
282  */
283
284 /*
285  * Compute and return memory needed by FCP(im) module.
286  */
287 static void
288 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
289 {
290         bfa_itnim_meminfo(cfg, km_len);
291
292         /*
293          * IO memory
294          */
295         *km_len += cfg->fwcfg.num_ioim_reqs *
296           (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
297
298         /*
299          * task management command memory
300          */
301         if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
302                 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
303         *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
304 }
305
306
307 static void
308 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
309                 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
310 {
311         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
312         struct bfa_s *bfa = fcp->bfa;
313
314         bfa_trc(bfa, cfg->drvcfg.path_tov);
315         bfa_trc(bfa, cfg->fwcfg.num_rports);
316         bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
317         bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
318
319         fcpim->fcp              = fcp;
320         fcpim->bfa              = bfa;
321         fcpim->num_itnims       = cfg->fwcfg.num_rports;
322         fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
323         fcpim->path_tov         = cfg->drvcfg.path_tov;
324         fcpim->delay_comp       = cfg->drvcfg.delay_comp;
325         fcpim->profile_comp = NULL;
326         fcpim->profile_start = NULL;
327
328         bfa_itnim_attach(fcpim);
329         bfa_tskim_attach(fcpim);
330         bfa_ioim_attach(fcpim);
331 }
332
333 void
334 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
335 {
336         struct bfa_fcpim_s *fcpim = &fcp->fcpim;
337         struct bfa_itnim_s *itnim;
338         struct list_head *qe, *qen;
339
340         /* Enqueue unused ioim resources to free_q */
341         list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
342
343         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
344                 itnim = (struct bfa_itnim_s *) qe;
345                 bfa_itnim_iocdisable(itnim);
346         }
347 }
348
349 void
350 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
351 {
352         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
353
354         fcpim->path_tov = path_tov * 1000;
355         if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
356                 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
357 }
358
359 u16
360 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
361 {
362         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
363
364         return fcpim->path_tov / 1000;
365 }
366
367 #define bfa_fcpim_add_iostats(__l, __r, __stats)        \
368         (__l->__stats += __r->__stats)
369
370 void
371 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
372                 struct bfa_itnim_iostats_s *rstats)
373 {
374         bfa_fcpim_add_iostats(lstats, rstats, total_ios);
375         bfa_fcpim_add_iostats(lstats, rstats, qresumes);
376         bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
377         bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
378         bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
379         bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
380         bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
381         bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
382         bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
383         bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
384         bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
385         bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
386         bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
387         bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
388         bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
389         bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
390         bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
391         bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
392         bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
393         bfa_fcpim_add_iostats(lstats, rstats, onlines);
394         bfa_fcpim_add_iostats(lstats, rstats, offlines);
395         bfa_fcpim_add_iostats(lstats, rstats, creates);
396         bfa_fcpim_add_iostats(lstats, rstats, deletes);
397         bfa_fcpim_add_iostats(lstats, rstats, create_comps);
398         bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
399         bfa_fcpim_add_iostats(lstats, rstats, sler_events);
400         bfa_fcpim_add_iostats(lstats, rstats, fw_create);
401         bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
402         bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
403         bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
404         bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
405         bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
406         bfa_fcpim_add_iostats(lstats, rstats, tm_success);
407         bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
408         bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
409         bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
410         bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
411         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
412         bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
413         bfa_fcpim_add_iostats(lstats, rstats, io_comps);
414         bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
415         bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
416         bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
417         bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
418 }
419
420 bfa_status_t
421 bfa_fcpim_port_iostats(struct bfa_s *bfa,
422                 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
423 {
424         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
425         struct list_head *qe, *qen;
426         struct bfa_itnim_s *itnim;
427
428         /* accumulate IO stats from itnim */
429         memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
430         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
431                 itnim = (struct bfa_itnim_s *) qe;
432                 if (itnim->rport->rport_info.lp_tag != lp_tag)
433                         continue;
434                 bfa_fcpim_add_stats(stats, &(itnim->stats));
435         }
436         return BFA_STATUS_OK;
437 }
438
439 void
440 bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
441 {
442         struct bfa_itnim_latency_s *io_lat =
443                         &(ioim->itnim->ioprofile.io_latency);
444         u32 val, idx;
445
446         val = (u32)(jiffies - ioim->start_time);
447         idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
448         bfa_itnim_ioprofile_update(ioim->itnim, idx);
449
450         io_lat->count[idx]++;
451         io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
452         io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
453         io_lat->avg[idx] += val;
454 }
455
456 void
457 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
458 {
459         ioim->start_time = jiffies;
460 }
461
462 bfa_status_t
463 bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
464 {
465         struct bfa_itnim_s *itnim;
466         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
467         struct list_head *qe, *qen;
468
469         /* accumulate IO stats from itnim */
470         list_for_each_safe(qe, qen, &fcpim->itnim_q) {
471                 itnim = (struct bfa_itnim_s *) qe;
472                 bfa_itnim_clear_stats(itnim);
473         }
474         fcpim->io_profile = BFA_TRUE;
475         fcpim->io_profile_start_time = time;
476         fcpim->profile_comp = bfa_ioim_profile_comp;
477         fcpim->profile_start = bfa_ioim_profile_start;
478         return BFA_STATUS_OK;
479 }
480
481 bfa_status_t
482 bfa_fcpim_profile_off(struct bfa_s *bfa)
483 {
484         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
485         fcpim->io_profile = BFA_FALSE;
486         fcpim->io_profile_start_time = 0;
487         fcpim->profile_comp = NULL;
488         fcpim->profile_start = NULL;
489         return BFA_STATUS_OK;
490 }
491
492 u16
493 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
494 {
495         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
496
497         return fcpim->q_depth;
498 }
499
500 /*
501  *  BFA ITNIM module state machine functions
502  */
503
504 /*
505  * Beginning/unallocated state - no events expected.
506  */
507 static void
508 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
509 {
510         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
511         bfa_trc(itnim->bfa, event);
512
513         switch (event) {
514         case BFA_ITNIM_SM_CREATE:
515                 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
516                 itnim->is_online = BFA_FALSE;
517                 bfa_fcpim_additn(itnim);
518                 break;
519
520         default:
521                 bfa_sm_fault(itnim->bfa, event);
522         }
523 }
524
525 /*
526  * Beginning state, only online event expected.
527  */
528 static void
529 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
530 {
531         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
532         bfa_trc(itnim->bfa, event);
533
534         switch (event) {
535         case BFA_ITNIM_SM_ONLINE:
536                 if (bfa_itnim_send_fwcreate(itnim))
537                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
538                 else
539                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
540                 break;
541
542         case BFA_ITNIM_SM_DELETE:
543                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
544                 bfa_fcpim_delitn(itnim);
545                 break;
546
547         case BFA_ITNIM_SM_HWFAIL:
548                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
549                 break;
550
551         default:
552                 bfa_sm_fault(itnim->bfa, event);
553         }
554 }
555
556 /*
557  *      Waiting for itnim create response from firmware.
558  */
559 static void
560 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
561 {
562         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
563         bfa_trc(itnim->bfa, event);
564
565         switch (event) {
566         case BFA_ITNIM_SM_FWRSP:
567                 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
568                 itnim->is_online = BFA_TRUE;
569                 bfa_itnim_iotov_online(itnim);
570                 bfa_itnim_online_cb(itnim);
571                 break;
572
573         case BFA_ITNIM_SM_DELETE:
574                 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
575                 break;
576
577         case BFA_ITNIM_SM_OFFLINE:
578                 if (bfa_itnim_send_fwdelete(itnim))
579                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
580                 else
581                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
582                 break;
583
584         case BFA_ITNIM_SM_HWFAIL:
585                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
586                 break;
587
588         default:
589                 bfa_sm_fault(itnim->bfa, event);
590         }
591 }
592
593 static void
594 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
595                         enum bfa_itnim_event event)
596 {
597         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
598         bfa_trc(itnim->bfa, event);
599
600         switch (event) {
601         case BFA_ITNIM_SM_QRESUME:
602                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
603                 bfa_itnim_send_fwcreate(itnim);
604                 break;
605
606         case BFA_ITNIM_SM_DELETE:
607                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
608                 bfa_reqq_wcancel(&itnim->reqq_wait);
609                 bfa_fcpim_delitn(itnim);
610                 break;
611
612         case BFA_ITNIM_SM_OFFLINE:
613                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
614                 bfa_reqq_wcancel(&itnim->reqq_wait);
615                 bfa_itnim_offline_cb(itnim);
616                 break;
617
618         case BFA_ITNIM_SM_HWFAIL:
619                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
620                 bfa_reqq_wcancel(&itnim->reqq_wait);
621                 break;
622
623         default:
624                 bfa_sm_fault(itnim->bfa, event);
625         }
626 }
627
628 /*
629  * Waiting for itnim create response from firmware, a delete is pending.
630  */
631 static void
632 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
633                                 enum bfa_itnim_event event)
634 {
635         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
636         bfa_trc(itnim->bfa, event);
637
638         switch (event) {
639         case BFA_ITNIM_SM_FWRSP:
640                 if (bfa_itnim_send_fwdelete(itnim))
641                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
642                 else
643                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
644                 break;
645
646         case BFA_ITNIM_SM_HWFAIL:
647                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
648                 bfa_fcpim_delitn(itnim);
649                 break;
650
651         default:
652                 bfa_sm_fault(itnim->bfa, event);
653         }
654 }
655
656 /*
657  * Online state - normal parking state.
658  */
659 static void
660 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
661 {
662         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
663         bfa_trc(itnim->bfa, event);
664
665         switch (event) {
666         case BFA_ITNIM_SM_OFFLINE:
667                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
668                 itnim->is_online = BFA_FALSE;
669                 bfa_itnim_iotov_start(itnim);
670                 bfa_itnim_cleanup(itnim);
671                 break;
672
673         case BFA_ITNIM_SM_DELETE:
674                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
675                 itnim->is_online = BFA_FALSE;
676                 bfa_itnim_cleanup(itnim);
677                 break;
678
679         case BFA_ITNIM_SM_SLER:
680                 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
681                 itnim->is_online = BFA_FALSE;
682                 bfa_itnim_iotov_start(itnim);
683                 bfa_itnim_sler_cb(itnim);
684                 break;
685
686         case BFA_ITNIM_SM_HWFAIL:
687                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
688                 itnim->is_online = BFA_FALSE;
689                 bfa_itnim_iotov_start(itnim);
690                 bfa_itnim_iocdisable_cleanup(itnim);
691                 break;
692
693         default:
694                 bfa_sm_fault(itnim->bfa, event);
695         }
696 }
697
698 /*
699  * Second level error recovery need.
700  */
701 static void
702 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
703 {
704         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
705         bfa_trc(itnim->bfa, event);
706
707         switch (event) {
708         case BFA_ITNIM_SM_OFFLINE:
709                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
710                 bfa_itnim_cleanup(itnim);
711                 break;
712
713         case BFA_ITNIM_SM_DELETE:
714                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
715                 bfa_itnim_cleanup(itnim);
716                 bfa_itnim_iotov_delete(itnim);
717                 break;
718
719         case BFA_ITNIM_SM_HWFAIL:
720                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
721                 bfa_itnim_iocdisable_cleanup(itnim);
722                 break;
723
724         default:
725                 bfa_sm_fault(itnim->bfa, event);
726         }
727 }
728
729 /*
730  * Going offline. Waiting for active IO cleanup.
731  */
732 static void
733 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
734                                  enum bfa_itnim_event event)
735 {
736         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
737         bfa_trc(itnim->bfa, event);
738
739         switch (event) {
740         case BFA_ITNIM_SM_CLEANUP:
741                 if (bfa_itnim_send_fwdelete(itnim))
742                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
743                 else
744                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
745                 break;
746
747         case BFA_ITNIM_SM_DELETE:
748                 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
749                 bfa_itnim_iotov_delete(itnim);
750                 break;
751
752         case BFA_ITNIM_SM_HWFAIL:
753                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
754                 bfa_itnim_iocdisable_cleanup(itnim);
755                 bfa_itnim_offline_cb(itnim);
756                 break;
757
758         case BFA_ITNIM_SM_SLER:
759                 break;
760
761         default:
762                 bfa_sm_fault(itnim->bfa, event);
763         }
764 }
765
766 /*
767  * Deleting itnim. Waiting for active IO cleanup.
768  */
769 static void
770 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
771                                 enum bfa_itnim_event event)
772 {
773         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
774         bfa_trc(itnim->bfa, event);
775
776         switch (event) {
777         case BFA_ITNIM_SM_CLEANUP:
778                 if (bfa_itnim_send_fwdelete(itnim))
779                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
780                 else
781                         bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
782                 break;
783
784         case BFA_ITNIM_SM_HWFAIL:
785                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
786                 bfa_itnim_iocdisable_cleanup(itnim);
787                 break;
788
789         default:
790                 bfa_sm_fault(itnim->bfa, event);
791         }
792 }
793
794 /*
795  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
796  */
797 static void
798 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
799 {
800         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
801         bfa_trc(itnim->bfa, event);
802
803         switch (event) {
804         case BFA_ITNIM_SM_FWRSP:
805                 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
806                 bfa_itnim_offline_cb(itnim);
807                 break;
808
809         case BFA_ITNIM_SM_DELETE:
810                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
811                 break;
812
813         case BFA_ITNIM_SM_HWFAIL:
814                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
815                 bfa_itnim_offline_cb(itnim);
816                 break;
817
818         default:
819                 bfa_sm_fault(itnim->bfa, event);
820         }
821 }
822
823 static void
824 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
825                         enum bfa_itnim_event event)
826 {
827         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
828         bfa_trc(itnim->bfa, event);
829
830         switch (event) {
831         case BFA_ITNIM_SM_QRESUME:
832                 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
833                 bfa_itnim_send_fwdelete(itnim);
834                 break;
835
836         case BFA_ITNIM_SM_DELETE:
837                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
838                 break;
839
840         case BFA_ITNIM_SM_HWFAIL:
841                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
842                 bfa_reqq_wcancel(&itnim->reqq_wait);
843                 bfa_itnim_offline_cb(itnim);
844                 break;
845
846         default:
847                 bfa_sm_fault(itnim->bfa, event);
848         }
849 }
850
851 /*
852  * Offline state.
853  */
854 static void
855 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
856 {
857         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
858         bfa_trc(itnim->bfa, event);
859
860         switch (event) {
861         case BFA_ITNIM_SM_DELETE:
862                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
863                 bfa_itnim_iotov_delete(itnim);
864                 bfa_fcpim_delitn(itnim);
865                 break;
866
867         case BFA_ITNIM_SM_ONLINE:
868                 if (bfa_itnim_send_fwcreate(itnim))
869                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
870                 else
871                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
872                 break;
873
874         case BFA_ITNIM_SM_HWFAIL:
875                 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
876                 break;
877
878         default:
879                 bfa_sm_fault(itnim->bfa, event);
880         }
881 }
882
883 static void
884 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
885                                 enum bfa_itnim_event event)
886 {
887         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
888         bfa_trc(itnim->bfa, event);
889
890         switch (event) {
891         case BFA_ITNIM_SM_DELETE:
892                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
893                 bfa_itnim_iotov_delete(itnim);
894                 bfa_fcpim_delitn(itnim);
895                 break;
896
897         case BFA_ITNIM_SM_OFFLINE:
898                 bfa_itnim_offline_cb(itnim);
899                 break;
900
901         case BFA_ITNIM_SM_ONLINE:
902                 if (bfa_itnim_send_fwcreate(itnim))
903                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
904                 else
905                         bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
906                 break;
907
908         case BFA_ITNIM_SM_HWFAIL:
909                 break;
910
911         default:
912                 bfa_sm_fault(itnim->bfa, event);
913         }
914 }
915
916 /*
917  * Itnim is deleted, waiting for firmware response to delete.
918  */
919 static void
920 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
921 {
922         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
923         bfa_trc(itnim->bfa, event);
924
925         switch (event) {
926         case BFA_ITNIM_SM_FWRSP:
927         case BFA_ITNIM_SM_HWFAIL:
928                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
929                 bfa_fcpim_delitn(itnim);
930                 break;
931
932         default:
933                 bfa_sm_fault(itnim->bfa, event);
934         }
935 }
936
937 static void
938 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
939                 enum bfa_itnim_event event)
940 {
941         bfa_trc(itnim->bfa, itnim->rport->rport_tag);
942         bfa_trc(itnim->bfa, event);
943
944         switch (event) {
945         case BFA_ITNIM_SM_QRESUME:
946                 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
947                 bfa_itnim_send_fwdelete(itnim);
948                 break;
949
950         case BFA_ITNIM_SM_HWFAIL:
951                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
952                 bfa_reqq_wcancel(&itnim->reqq_wait);
953                 bfa_fcpim_delitn(itnim);
954                 break;
955
956         default:
957                 bfa_sm_fault(itnim->bfa, event);
958         }
959 }
960
961 /*
962  * Initiate cleanup of all IOs on an IOC failure.
963  */
964 static void
965 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
966 {
967         struct bfa_tskim_s *tskim;
968         struct bfa_ioim_s *ioim;
969         struct list_head        *qe, *qen;
970
971         list_for_each_safe(qe, qen, &itnim->tsk_q) {
972                 tskim = (struct bfa_tskim_s *) qe;
973                 bfa_tskim_iocdisable(tskim);
974         }
975
976         list_for_each_safe(qe, qen, &itnim->io_q) {
977                 ioim = (struct bfa_ioim_s *) qe;
978                 bfa_ioim_iocdisable(ioim);
979         }
980
981         /*
982          * For IO request in pending queue, we pretend an early timeout.
983          */
984         list_for_each_safe(qe, qen, &itnim->pending_q) {
985                 ioim = (struct bfa_ioim_s *) qe;
986                 bfa_ioim_tov(ioim);
987         }
988
989         list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
990                 ioim = (struct bfa_ioim_s *) qe;
991                 bfa_ioim_iocdisable(ioim);
992         }
993 }
994
995 /*
996  * IO cleanup completion
997  */
998 static void
999 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1000 {
1001         struct bfa_itnim_s *itnim = itnim_cbarg;
1002
1003         bfa_stats(itnim, cleanup_comps);
1004         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1005 }
1006
1007 /*
1008  * Initiate cleanup of all IOs.
1009  */
1010 static void
1011 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1012 {
1013         struct bfa_ioim_s  *ioim;
1014         struct bfa_tskim_s *tskim;
1015         struct list_head        *qe, *qen;
1016
1017         bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1018
1019         list_for_each_safe(qe, qen, &itnim->io_q) {
1020                 ioim = (struct bfa_ioim_s *) qe;
1021
1022                 /*
1023                  * Move IO to a cleanup queue from active queue so that a later
1024                  * TM will not pickup this IO.
1025                  */
1026                 list_del(&ioim->qe);
1027                 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1028
1029                 bfa_wc_up(&itnim->wc);
1030                 bfa_ioim_cleanup(ioim);
1031         }
1032
1033         list_for_each_safe(qe, qen, &itnim->tsk_q) {
1034                 tskim = (struct bfa_tskim_s *) qe;
1035                 bfa_wc_up(&itnim->wc);
1036                 bfa_tskim_cleanup(tskim);
1037         }
1038
1039         bfa_wc_wait(&itnim->wc);
1040 }
1041
1042 static void
1043 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1044 {
1045         struct bfa_itnim_s *itnim = cbarg;
1046
1047         if (complete)
1048                 bfa_cb_itnim_online(itnim->ditn);
1049 }
1050
1051 static void
1052 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1053 {
1054         struct bfa_itnim_s *itnim = cbarg;
1055
1056         if (complete)
1057                 bfa_cb_itnim_offline(itnim->ditn);
1058 }
1059
1060 static void
1061 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1062 {
1063         struct bfa_itnim_s *itnim = cbarg;
1064
1065         if (complete)
1066                 bfa_cb_itnim_sler(itnim->ditn);
1067 }
1068
1069 /*
1070  * Call to resume any I/O requests waiting for room in request queue.
1071  */
1072 static void
1073 bfa_itnim_qresume(void *cbarg)
1074 {
1075         struct bfa_itnim_s *itnim = cbarg;
1076
1077         bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1078 }
1079
1080 /*
1081  *  bfa_itnim_public
1082  */
1083
1084 void
1085 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1086 {
1087         bfa_wc_down(&itnim->wc);
1088 }
1089
1090 void
1091 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1092 {
1093         bfa_wc_down(&itnim->wc);
1094 }
1095
1096 void
1097 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1098 {
1099         /*
1100          * ITN memory
1101          */
1102         *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1103 }
1104
1105 void
1106 bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1107 {
1108         struct bfa_s    *bfa = fcpim->bfa;
1109         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1110         struct bfa_itnim_s *itnim;
1111         int     i, j;
1112
1113         INIT_LIST_HEAD(&fcpim->itnim_q);
1114
1115         itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1116         fcpim->itnim_arr = itnim;
1117
1118         for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1119                 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1120                 itnim->bfa = bfa;
1121                 itnim->fcpim = fcpim;
1122                 itnim->reqq = BFA_REQQ_QOS_LO;
1123                 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1124                 itnim->iotov_active = BFA_FALSE;
1125                 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1126
1127                 INIT_LIST_HEAD(&itnim->io_q);
1128                 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1129                 INIT_LIST_HEAD(&itnim->pending_q);
1130                 INIT_LIST_HEAD(&itnim->tsk_q);
1131                 INIT_LIST_HEAD(&itnim->delay_comp_q);
1132                 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1133                         itnim->ioprofile.io_latency.min[j] = ~0;
1134                 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1135         }
1136
1137         bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1138 }
1139
1140 void
1141 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1142 {
1143         bfa_stats(itnim, ioc_disabled);
1144         bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1145 }
1146
1147 static bfa_boolean_t
1148 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1149 {
1150         struct bfi_itn_create_req_s *m;
1151
1152         itnim->msg_no++;
1153
1154         /*
1155          * check for room in queue to send request now
1156          */
1157         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1158         if (!m) {
1159                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1160                 return BFA_FALSE;
1161         }
1162
1163         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1164                         bfa_fn_lpu(itnim->bfa));
1165         m->fw_handle = itnim->rport->fw_handle;
1166         m->class = FC_CLASS_3;
1167         m->seq_rec = itnim->seq_rec;
1168         m->msg_no = itnim->msg_no;
1169         bfa_stats(itnim, fw_create);
1170
1171         /*
1172          * queue I/O message to firmware
1173          */
1174         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1175         return BFA_TRUE;
1176 }
1177
1178 static bfa_boolean_t
1179 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1180 {
1181         struct bfi_itn_delete_req_s *m;
1182
1183         /*
1184          * check for room in queue to send request now
1185          */
1186         m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1187         if (!m) {
1188                 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1189                 return BFA_FALSE;
1190         }
1191
1192         bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1193                         bfa_fn_lpu(itnim->bfa));
1194         m->fw_handle = itnim->rport->fw_handle;
1195         bfa_stats(itnim, fw_delete);
1196
1197         /*
1198          * queue I/O message to firmware
1199          */
1200         bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1201         return BFA_TRUE;
1202 }
1203
1204 /*
1205  * Cleanup all pending failed inflight requests.
1206  */
1207 static void
1208 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1209 {
1210         struct bfa_ioim_s *ioim;
1211         struct list_head *qe, *qen;
1212
1213         list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1214                 ioim = (struct bfa_ioim_s *)qe;
1215                 bfa_ioim_delayed_comp(ioim, iotov);
1216         }
1217 }
1218
1219 /*
1220  * Start all pending IO requests.
1221  */
1222 static void
1223 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1224 {
1225         struct bfa_ioim_s *ioim;
1226
1227         bfa_itnim_iotov_stop(itnim);
1228
1229         /*
1230          * Abort all inflight IO requests in the queue
1231          */
1232         bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1233
1234         /*
1235          * Start all pending IO requests.
1236          */
1237         while (!list_empty(&itnim->pending_q)) {
1238                 bfa_q_deq(&itnim->pending_q, &ioim);
1239                 list_add_tail(&ioim->qe, &itnim->io_q);
1240                 bfa_ioim_start(ioim);
1241         }
1242 }
1243
1244 /*
1245  * Fail all pending IO requests
1246  */
1247 static void
1248 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1249 {
1250         struct bfa_ioim_s *ioim;
1251
1252         /*
1253          * Fail all inflight IO requests in the queue
1254          */
1255         bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1256
1257         /*
1258          * Fail any pending IO requests.
1259          */
1260         while (!list_empty(&itnim->pending_q)) {
1261                 bfa_q_deq(&itnim->pending_q, &ioim);
1262                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1263                 bfa_ioim_tov(ioim);
1264         }
1265 }
1266
1267 /*
1268  * IO TOV timer callback. Fail any pending IO requests.
1269  */
1270 static void
1271 bfa_itnim_iotov(void *itnim_arg)
1272 {
1273         struct bfa_itnim_s *itnim = itnim_arg;
1274
1275         itnim->iotov_active = BFA_FALSE;
1276
1277         bfa_cb_itnim_tov_begin(itnim->ditn);
1278         bfa_itnim_iotov_cleanup(itnim);
1279         bfa_cb_itnim_tov(itnim->ditn);
1280 }
1281
1282 /*
1283  * Start IO TOV timer for failing back pending IO requests in offline state.
1284  */
1285 static void
1286 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1287 {
1288         if (itnim->fcpim->path_tov > 0) {
1289
1290                 itnim->iotov_active = BFA_TRUE;
1291                 WARN_ON(!bfa_itnim_hold_io(itnim));
1292                 bfa_timer_start(itnim->bfa, &itnim->timer,
1293                         bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1294         }
1295 }
1296
1297 /*
1298  * Stop IO TOV timer.
1299  */
1300 static void
1301 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1302 {
1303         if (itnim->iotov_active) {
1304                 itnim->iotov_active = BFA_FALSE;
1305                 bfa_timer_stop(&itnim->timer);
1306         }
1307 }
1308
1309 /*
1310  * Stop IO TOV timer.
1311  */
1312 static void
1313 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1314 {
1315         bfa_boolean_t pathtov_active = BFA_FALSE;
1316
1317         if (itnim->iotov_active)
1318                 pathtov_active = BFA_TRUE;
1319
1320         bfa_itnim_iotov_stop(itnim);
1321         if (pathtov_active)
1322                 bfa_cb_itnim_tov_begin(itnim->ditn);
1323         bfa_itnim_iotov_cleanup(itnim);
1324         if (pathtov_active)
1325                 bfa_cb_itnim_tov(itnim->ditn);
1326 }
1327
1328 static void
1329 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1330 {
1331         struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1332         fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1333                 itnim->stats.iocomp_aborted;
1334         fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1335                 itnim->stats.iocomp_timedout;
1336         fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1337                 itnim->stats.iocom_sqer_needed;
1338         fcpim->del_itn_stats.del_itn_iocom_res_free +=
1339                 itnim->stats.iocom_res_free;
1340         fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1341                 itnim->stats.iocom_hostabrts;
1342         fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1343         fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1344         fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1345 }
1346
1347 /*
1348  * bfa_itnim_public
1349  */
1350
1351 /*
1352  * Itnim interrupt processing.
1353  */
1354 void
1355 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1356 {
1357         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1358         union bfi_itn_i2h_msg_u msg;
1359         struct bfa_itnim_s *itnim;
1360
1361         bfa_trc(bfa, m->mhdr.msg_id);
1362
1363         msg.msg = m;
1364
1365         switch (m->mhdr.msg_id) {
1366         case BFI_ITN_I2H_CREATE_RSP:
1367                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1368                                                 msg.create_rsp->bfa_handle);
1369                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1370                 bfa_stats(itnim, create_comps);
1371                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1372                 break;
1373
1374         case BFI_ITN_I2H_DELETE_RSP:
1375                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1376                                                 msg.delete_rsp->bfa_handle);
1377                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1378                 bfa_stats(itnim, delete_comps);
1379                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1380                 break;
1381
1382         case BFI_ITN_I2H_SLER_EVENT:
1383                 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1384                                                 msg.sler_event->bfa_handle);
1385                 bfa_stats(itnim, sler_events);
1386                 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1387                 break;
1388
1389         default:
1390                 bfa_trc(bfa, m->mhdr.msg_id);
1391                 WARN_ON(1);
1392         }
1393 }
1394
1395 /*
1396  * bfa_itnim_api
1397  */
1398
1399 struct bfa_itnim_s *
1400 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1401 {
1402         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1403         struct bfa_itnim_s *itnim;
1404
1405         bfa_itn_create(bfa, rport, bfa_itnim_isr);
1406
1407         itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1408         WARN_ON(itnim->rport != rport);
1409
1410         itnim->ditn = ditn;
1411
1412         bfa_stats(itnim, creates);
1413         bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1414
1415         return itnim;
1416 }
1417
1418 void
1419 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1420 {
1421         bfa_stats(itnim, deletes);
1422         bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1423 }
1424
1425 void
1426 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1427 {
1428         itnim->seq_rec = seq_rec;
1429         bfa_stats(itnim, onlines);
1430         bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1431 }
1432
1433 void
1434 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1435 {
1436         bfa_stats(itnim, offlines);
1437         bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1438 }
1439
1440 /*
1441  * Return true if itnim is considered offline for holding off IO request.
1442  * IO is not held if itnim is being deleted.
1443  */
1444 bfa_boolean_t
1445 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1446 {
1447         return itnim->fcpim->path_tov && itnim->iotov_active &&
1448                 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1449                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1450                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1451                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1452                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1453                  bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1454 }
1455
1456 #define bfa_io_lat_clock_res_div        HZ
1457 #define bfa_io_lat_clock_res_mul        1000
1458 bfa_status_t
1459 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1460                         struct bfa_itnim_ioprofile_s *ioprofile)
1461 {
1462         struct bfa_fcpim_s *fcpim;
1463
1464         if (!itnim)
1465                 return BFA_STATUS_NO_FCPIM_NEXUS;
1466
1467         fcpim = BFA_FCPIM(itnim->bfa);
1468
1469         if (!fcpim->io_profile)
1470                 return BFA_STATUS_IOPROFILE_OFF;
1471
1472         itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1473         /* unsigned 32-bit time_t overflow here in y2106 */
1474         itnim->ioprofile.io_profile_start_time =
1475                                 bfa_io_profile_start_time(itnim->bfa);
1476         itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1477         itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1478         *ioprofile = itnim->ioprofile;
1479
1480         return BFA_STATUS_OK;
1481 }
1482
1483 void
1484 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1485 {
1486         int j;
1487
1488         if (!itnim)
1489                 return;
1490
1491         memset(&itnim->stats, 0, sizeof(itnim->stats));
1492         memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1493         for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1494                 itnim->ioprofile.io_latency.min[j] = ~0;
1495 }
1496
1497 /*
1498  *  BFA IO module state machine functions
1499  */
1500
1501 /*
1502  * IO is not started (unallocated).
1503  */
1504 static void
1505 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1506 {
1507         switch (event) {
1508         case BFA_IOIM_SM_START:
1509                 if (!bfa_itnim_is_online(ioim->itnim)) {
1510                         if (!bfa_itnim_hold_io(ioim->itnim)) {
1511                                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1512                                 list_del(&ioim->qe);
1513                                 list_add_tail(&ioim->qe,
1514                                         &ioim->fcpim->ioim_comp_q);
1515                                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1516                                                 __bfa_cb_ioim_pathtov, ioim);
1517                         } else {
1518                                 list_del(&ioim->qe);
1519                                 list_add_tail(&ioim->qe,
1520                                         &ioim->itnim->pending_q);
1521                         }
1522                         break;
1523                 }
1524
1525                 if (ioim->nsges > BFI_SGE_INLINE) {
1526                         if (!bfa_ioim_sgpg_alloc(ioim)) {
1527                                 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1528                                 return;
1529                         }
1530                 }
1531
1532                 if (!bfa_ioim_send_ioreq(ioim)) {
1533                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1534                         break;
1535                 }
1536
1537                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1538                 break;
1539
1540         case BFA_IOIM_SM_IOTOV:
1541                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1542                 bfa_ioim_move_to_comp_q(ioim);
1543                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1544                                 __bfa_cb_ioim_pathtov, ioim);
1545                 break;
1546
1547         case BFA_IOIM_SM_ABORT:
1548                 /*
1549                  * IO in pending queue can get abort requests. Complete abort
1550                  * requests immediately.
1551                  */
1552                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1553                 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1554                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1555                         __bfa_cb_ioim_abort, ioim);
1556                 break;
1557
1558         default:
1559                 bfa_sm_fault(ioim->bfa, event);
1560         }
1561 }
1562
1563 /*
1564  * IO is waiting for SG pages.
1565  */
1566 static void
1567 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1568 {
1569         bfa_trc(ioim->bfa, ioim->iotag);
1570         bfa_trc(ioim->bfa, event);
1571
1572         switch (event) {
1573         case BFA_IOIM_SM_SGALLOCED:
1574                 if (!bfa_ioim_send_ioreq(ioim)) {
1575                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1576                         break;
1577                 }
1578                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1579                 break;
1580
1581         case BFA_IOIM_SM_CLEANUP:
1582                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1583                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1584                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1585                               ioim);
1586                 bfa_ioim_notify_cleanup(ioim);
1587                 break;
1588
1589         case BFA_IOIM_SM_ABORT:
1590                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1591                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1592                 bfa_ioim_move_to_comp_q(ioim);
1593                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1594                               ioim);
1595                 break;
1596
1597         case BFA_IOIM_SM_HWFAIL:
1598                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599                 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1600                 bfa_ioim_move_to_comp_q(ioim);
1601                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1602                               ioim);
1603                 break;
1604
1605         default:
1606                 bfa_sm_fault(ioim->bfa, event);
1607         }
1608 }
1609
1610 /*
1611  * IO is active.
1612  */
1613 static void
1614 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615 {
1616         switch (event) {
1617         case BFA_IOIM_SM_COMP_GOOD:
1618                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1619                 bfa_ioim_move_to_comp_q(ioim);
1620                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1621                               __bfa_cb_ioim_good_comp, ioim);
1622                 break;
1623
1624         case BFA_IOIM_SM_COMP:
1625                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1626                 bfa_ioim_move_to_comp_q(ioim);
1627                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1628                               ioim);
1629                 break;
1630
1631         case BFA_IOIM_SM_DONE:
1632                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1633                 bfa_ioim_move_to_comp_q(ioim);
1634                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1635                               ioim);
1636                 break;
1637
1638         case BFA_IOIM_SM_ABORT:
1639                 ioim->iosp->abort_explicit = BFA_TRUE;
1640                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1641
1642                 if (bfa_ioim_send_abort(ioim))
1643                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1644                 else {
1645                         bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1646                         bfa_stats(ioim->itnim, qwait);
1647                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1648                                           &ioim->iosp->reqq_wait);
1649                 }
1650                 break;
1651
1652         case BFA_IOIM_SM_CLEANUP:
1653                 ioim->iosp->abort_explicit = BFA_FALSE;
1654                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1655
1656                 if (bfa_ioim_send_abort(ioim))
1657                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1658                 else {
1659                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1660                         bfa_stats(ioim->itnim, qwait);
1661                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1662                                           &ioim->iosp->reqq_wait);
1663                 }
1664                 break;
1665
1666         case BFA_IOIM_SM_HWFAIL:
1667                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1668                 bfa_ioim_move_to_comp_q(ioim);
1669                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1670                               ioim);
1671                 break;
1672
1673         case BFA_IOIM_SM_SQRETRY:
1674                 if (bfa_ioim_maxretry_reached(ioim)) {
1675                         /* max retry reached, free IO */
1676                         bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1677                         bfa_ioim_move_to_comp_q(ioim);
1678                         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1679                                         __bfa_cb_ioim_failed, ioim);
1680                         break;
1681                 }
1682                 /* waiting for IO tag resource free */
1683                 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1684                 break;
1685
1686         default:
1687                 bfa_sm_fault(ioim->bfa, event);
1688         }
1689 }
1690
1691 /*
1692  * IO is retried with new tag.
1693  */
1694 static void
1695 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1696 {
1697         switch (event) {
1698         case BFA_IOIM_SM_FREE:
1699                 /* abts and rrq done. Now retry the IO with new tag */
1700                 bfa_ioim_update_iotag(ioim);
1701                 if (!bfa_ioim_send_ioreq(ioim)) {
1702                         bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1703                         break;
1704                 }
1705                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1706         break;
1707
1708         case BFA_IOIM_SM_CLEANUP:
1709                 ioim->iosp->abort_explicit = BFA_FALSE;
1710                 ioim->io_cbfn = __bfa_cb_ioim_failed;
1711
1712                 if (bfa_ioim_send_abort(ioim))
1713                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1714                 else {
1715                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1716                         bfa_stats(ioim->itnim, qwait);
1717                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1718                                           &ioim->iosp->reqq_wait);
1719                 }
1720         break;
1721
1722         case BFA_IOIM_SM_HWFAIL:
1723                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1724                 bfa_ioim_move_to_comp_q(ioim);
1725                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1726                          __bfa_cb_ioim_failed, ioim);
1727                 break;
1728
1729         case BFA_IOIM_SM_ABORT:
1730                 /* in this state IO abort is done.
1731                  * Waiting for IO tag resource free.
1732                  */
1733                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1734                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1735                               ioim);
1736                 break;
1737
1738         default:
1739                 bfa_sm_fault(ioim->bfa, event);
1740         }
1741 }
1742
1743 /*
1744  * IO is being aborted, waiting for completion from firmware.
1745  */
1746 static void
1747 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1748 {
1749         bfa_trc(ioim->bfa, ioim->iotag);
1750         bfa_trc(ioim->bfa, event);
1751
1752         switch (event) {
1753         case BFA_IOIM_SM_COMP_GOOD:
1754         case BFA_IOIM_SM_COMP:
1755         case BFA_IOIM_SM_DONE:
1756         case BFA_IOIM_SM_FREE:
1757                 break;
1758
1759         case BFA_IOIM_SM_ABORT_DONE:
1760                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1761                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1762                               ioim);
1763                 break;
1764
1765         case BFA_IOIM_SM_ABORT_COMP:
1766                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1767                 bfa_ioim_move_to_comp_q(ioim);
1768                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1769                               ioim);
1770                 break;
1771
1772         case BFA_IOIM_SM_COMP_UTAG:
1773                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1774                 bfa_ioim_move_to_comp_q(ioim);
1775                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1776                               ioim);
1777                 break;
1778
1779         case BFA_IOIM_SM_CLEANUP:
1780                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1781                 ioim->iosp->abort_explicit = BFA_FALSE;
1782
1783                 if (bfa_ioim_send_abort(ioim))
1784                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1785                 else {
1786                         bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1787                         bfa_stats(ioim->itnim, qwait);
1788                         bfa_reqq_wait(ioim->bfa, ioim->reqq,
1789                                           &ioim->iosp->reqq_wait);
1790                 }
1791                 break;
1792
1793         case BFA_IOIM_SM_HWFAIL:
1794                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1795                 bfa_ioim_move_to_comp_q(ioim);
1796                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1797                               ioim);
1798                 break;
1799
1800         default:
1801                 bfa_sm_fault(ioim->bfa, event);
1802         }
1803 }
1804
1805 /*
1806  * IO is being cleaned up (implicit abort), waiting for completion from
1807  * firmware.
1808  */
1809 static void
1810 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811 {
1812         bfa_trc(ioim->bfa, ioim->iotag);
1813         bfa_trc(ioim->bfa, event);
1814
1815         switch (event) {
1816         case BFA_IOIM_SM_COMP_GOOD:
1817         case BFA_IOIM_SM_COMP:
1818         case BFA_IOIM_SM_DONE:
1819         case BFA_IOIM_SM_FREE:
1820                 break;
1821
1822         case BFA_IOIM_SM_ABORT:
1823                 /*
1824                  * IO is already being aborted implicitly
1825                  */
1826                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1827                 break;
1828
1829         case BFA_IOIM_SM_ABORT_DONE:
1830                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1831                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1832                 bfa_ioim_notify_cleanup(ioim);
1833                 break;
1834
1835         case BFA_IOIM_SM_ABORT_COMP:
1836                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1837                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1838                 bfa_ioim_notify_cleanup(ioim);
1839                 break;
1840
1841         case BFA_IOIM_SM_COMP_UTAG:
1842                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1843                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1844                 bfa_ioim_notify_cleanup(ioim);
1845                 break;
1846
1847         case BFA_IOIM_SM_HWFAIL:
1848                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1849                 bfa_ioim_move_to_comp_q(ioim);
1850                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1851                               ioim);
1852                 break;
1853
1854         case BFA_IOIM_SM_CLEANUP:
1855                 /*
1856                  * IO can be in cleanup state already due to TM command.
1857                  * 2nd cleanup request comes from ITN offline event.
1858                  */
1859                 break;
1860
1861         default:
1862                 bfa_sm_fault(ioim->bfa, event);
1863         }
1864 }
1865
1866 /*
1867  * IO is waiting for room in request CQ
1868  */
1869 static void
1870 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1871 {
1872         bfa_trc(ioim->bfa, ioim->iotag);
1873         bfa_trc(ioim->bfa, event);
1874
1875         switch (event) {
1876         case BFA_IOIM_SM_QRESUME:
1877                 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1878                 bfa_ioim_send_ioreq(ioim);
1879                 break;
1880
1881         case BFA_IOIM_SM_ABORT:
1882                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1883                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1884                 bfa_ioim_move_to_comp_q(ioim);
1885                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1886                               ioim);
1887                 break;
1888
1889         case BFA_IOIM_SM_CLEANUP:
1890                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1892                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1893                               ioim);
1894                 bfa_ioim_notify_cleanup(ioim);
1895                 break;
1896
1897         case BFA_IOIM_SM_HWFAIL:
1898                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1899                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1900                 bfa_ioim_move_to_comp_q(ioim);
1901                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1902                               ioim);
1903                 break;
1904
1905         default:
1906                 bfa_sm_fault(ioim->bfa, event);
1907         }
1908 }
1909
1910 /*
1911  * Active IO is being aborted, waiting for room in request CQ.
1912  */
1913 static void
1914 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1915 {
1916         bfa_trc(ioim->bfa, ioim->iotag);
1917         bfa_trc(ioim->bfa, event);
1918
1919         switch (event) {
1920         case BFA_IOIM_SM_QRESUME:
1921                 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1922                 bfa_ioim_send_abort(ioim);
1923                 break;
1924
1925         case BFA_IOIM_SM_CLEANUP:
1926                 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1927                 ioim->iosp->abort_explicit = BFA_FALSE;
1928                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1929                 break;
1930
1931         case BFA_IOIM_SM_COMP_GOOD:
1932         case BFA_IOIM_SM_COMP:
1933                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1934                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1935                 bfa_ioim_move_to_comp_q(ioim);
1936                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1937                               ioim);
1938                 break;
1939
1940         case BFA_IOIM_SM_DONE:
1941                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1942                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1943                 bfa_ioim_move_to_comp_q(ioim);
1944                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1945                               ioim);
1946                 break;
1947
1948         case BFA_IOIM_SM_HWFAIL:
1949                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1950                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1951                 bfa_ioim_move_to_comp_q(ioim);
1952                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1953                               ioim);
1954                 break;
1955
1956         default:
1957                 bfa_sm_fault(ioim->bfa, event);
1958         }
1959 }
1960
1961 /*
1962  * Active IO is being cleaned up, waiting for room in request CQ.
1963  */
1964 static void
1965 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1966 {
1967         bfa_trc(ioim->bfa, ioim->iotag);
1968         bfa_trc(ioim->bfa, event);
1969
1970         switch (event) {
1971         case BFA_IOIM_SM_QRESUME:
1972                 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1973                 bfa_ioim_send_abort(ioim);
1974                 break;
1975
1976         case BFA_IOIM_SM_ABORT:
1977                 /*
1978                  * IO is already being cleaned up implicitly
1979                  */
1980                 ioim->io_cbfn = __bfa_cb_ioim_abort;
1981                 break;
1982
1983         case BFA_IOIM_SM_COMP_GOOD:
1984         case BFA_IOIM_SM_COMP:
1985                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1986                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1987                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1988                 bfa_ioim_notify_cleanup(ioim);
1989                 break;
1990
1991         case BFA_IOIM_SM_DONE:
1992                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1993                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1994                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1995                 bfa_ioim_notify_cleanup(ioim);
1996                 break;
1997
1998         case BFA_IOIM_SM_HWFAIL:
1999                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2000                 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001                 bfa_ioim_move_to_comp_q(ioim);
2002                 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2003                               ioim);
2004                 break;
2005
2006         default:
2007                 bfa_sm_fault(ioim->bfa, event);
2008         }
2009 }
2010
2011 /*
2012  * IO bfa callback is pending.
2013  */
2014 static void
2015 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2016 {
2017         switch (event) {
2018         case BFA_IOIM_SM_HCB:
2019                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2020                 bfa_ioim_free(ioim);
2021                 break;
2022
2023         case BFA_IOIM_SM_CLEANUP:
2024                 bfa_ioim_notify_cleanup(ioim);
2025                 break;
2026
2027         case BFA_IOIM_SM_HWFAIL:
2028                 break;
2029
2030         default:
2031                 bfa_sm_fault(ioim->bfa, event);
2032         }
2033 }
2034
2035 /*
2036  * IO bfa callback is pending. IO resource cannot be freed.
2037  */
2038 static void
2039 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2040 {
2041         bfa_trc(ioim->bfa, ioim->iotag);
2042         bfa_trc(ioim->bfa, event);
2043
2044         switch (event) {
2045         case BFA_IOIM_SM_HCB:
2046                 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2047                 list_del(&ioim->qe);
2048                 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2049                 break;
2050
2051         case BFA_IOIM_SM_FREE:
2052                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2053                 break;
2054
2055         case BFA_IOIM_SM_CLEANUP:
2056                 bfa_ioim_notify_cleanup(ioim);
2057                 break;
2058
2059         case BFA_IOIM_SM_HWFAIL:
2060                 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2061                 break;
2062
2063         default:
2064                 bfa_sm_fault(ioim->bfa, event);
2065         }
2066 }
2067
2068 /*
2069  * IO is completed, waiting resource free from firmware.
2070  */
2071 static void
2072 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2073 {
2074         bfa_trc(ioim->bfa, ioim->iotag);
2075         bfa_trc(ioim->bfa, event);
2076
2077         switch (event) {
2078         case BFA_IOIM_SM_FREE:
2079                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2080                 bfa_ioim_free(ioim);
2081                 break;
2082
2083         case BFA_IOIM_SM_CLEANUP:
2084                 bfa_ioim_notify_cleanup(ioim);
2085                 break;
2086
2087         case BFA_IOIM_SM_HWFAIL:
2088                 break;
2089
2090         default:
2091                 bfa_sm_fault(ioim->bfa, event);
2092         }
2093 }
2094
2095 /*
2096  * This is called from bfa_fcpim_start after the bfa_init() with flash read
2097  * is complete by driver. now invalidate the stale content of lun mask
2098  * like unit attention, rp tag and lp tag.
2099  */
2100 void
2101 bfa_ioim_lm_init(struct bfa_s *bfa)
2102 {
2103         struct bfa_lun_mask_s *lunm_list;
2104         int     i;
2105
2106         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2107                 return;
2108
2109         lunm_list = bfa_get_lun_mask_list(bfa);
2110         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2111                 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2112                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2113                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2114         }
2115 }
2116
2117 static void
2118 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2119 {
2120         struct bfa_ioim_s *ioim = cbarg;
2121
2122         if (!complete) {
2123                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2124                 return;
2125         }
2126
2127         bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2128 }
2129
2130 static void
2131 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2132 {
2133         struct bfa_ioim_s       *ioim = cbarg;
2134         struct bfi_ioim_rsp_s *m;
2135         u8      *snsinfo = NULL;
2136         u8      sns_len = 0;
2137         s32     residue = 0;
2138
2139         if (!complete) {
2140                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2141                 return;
2142         }
2143
2144         m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2145         if (m->io_status == BFI_IOIM_STS_OK) {
2146                 /*
2147                  * setup sense information, if present
2148                  */
2149                 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2150                                         m->sns_len) {
2151                         sns_len = m->sns_len;
2152                         snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2153                                                 ioim->iotag);
2154                 }
2155
2156                 /*
2157                  * setup residue value correctly for normal completions
2158                  */
2159                 if (m->resid_flags == FCP_RESID_UNDER) {
2160                         residue = be32_to_cpu(m->residue);
2161                         bfa_stats(ioim->itnim, iocomp_underrun);
2162                 }
2163                 if (m->resid_flags == FCP_RESID_OVER) {
2164                         residue = be32_to_cpu(m->residue);
2165                         residue = -residue;
2166                         bfa_stats(ioim->itnim, iocomp_overrun);
2167                 }
2168         }
2169
2170         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2171                           m->scsi_status, sns_len, snsinfo, residue);
2172 }
2173
2174 void
2175 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2176                         u16 rp_tag, u8 lp_tag)
2177 {
2178         struct bfa_lun_mask_s *lun_list;
2179         u8      i;
2180
2181         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2182                 return;
2183
2184         lun_list = bfa_get_lun_mask_list(bfa);
2185         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2186                 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2187                         if ((lun_list[i].lp_wwn == lp_wwn) &&
2188                             (lun_list[i].rp_wwn == rp_wwn)) {
2189                                 lun_list[i].rp_tag = rp_tag;
2190                                 lun_list[i].lp_tag = lp_tag;
2191                         }
2192                 }
2193         }
2194 }
2195
2196 /*
2197  * set UA for all active luns in LM DB
2198  */
2199 static void
2200 bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2201 {
2202         struct bfa_lun_mask_s   *lunm_list;
2203         int     i;
2204
2205         lunm_list = bfa_get_lun_mask_list(bfa);
2206         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2207                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2208                         continue;
2209                 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2210         }
2211 }
2212
2213 bfa_status_t
2214 bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2215 {
2216         struct bfa_lunmask_cfg_s        *lun_mask;
2217
2218         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2219         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2220                 return BFA_STATUS_FAILED;
2221
2222         if (bfa_get_lun_mask_status(bfa) == update)
2223                 return BFA_STATUS_NO_CHANGE;
2224
2225         lun_mask = bfa_get_lun_mask(bfa);
2226         lun_mask->status = update;
2227
2228         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2229                 bfa_ioim_lm_set_ua(bfa);
2230
2231         return  bfa_dconf_update(bfa);
2232 }
2233
2234 bfa_status_t
2235 bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2236 {
2237         int i;
2238         struct bfa_lun_mask_s   *lunm_list;
2239
2240         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2241         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2242                 return BFA_STATUS_FAILED;
2243
2244         lunm_list = bfa_get_lun_mask_list(bfa);
2245         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2246                 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2247                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2248                                 bfa_rport_unset_lunmask(bfa,
2249                                   BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2250                 }
2251         }
2252
2253         memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2254         return bfa_dconf_update(bfa);
2255 }
2256
2257 bfa_status_t
2258 bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2259 {
2260         struct bfa_lunmask_cfg_s *lun_mask;
2261
2262         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2263         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2264                 return BFA_STATUS_FAILED;
2265
2266         lun_mask = bfa_get_lun_mask(bfa);
2267         memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2268         return BFA_STATUS_OK;
2269 }
2270
2271 bfa_status_t
2272 bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2273                       wwn_t rpwwn, struct scsi_lun lun)
2274 {
2275         struct bfa_lun_mask_s *lunm_list;
2276         struct bfa_rport_s *rp = NULL;
2277         int i, free_index = MAX_LUN_MASK_CFG + 1;
2278         struct bfa_fcs_lport_s *port = NULL;
2279         struct bfa_fcs_rport_s *rp_fcs;
2280
2281         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2282         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2283                 return BFA_STATUS_FAILED;
2284
2285         port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2286                                    vf_id, *pwwn);
2287         if (port) {
2288                 *pwwn = port->port_cfg.pwwn;
2289                 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2290                 if (rp_fcs)
2291                         rp = rp_fcs->bfa_rport;
2292         }
2293
2294         lunm_list = bfa_get_lun_mask_list(bfa);
2295         /* if entry exists */
2296         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2297                 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2298                         free_index = i;
2299                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2300                     (lunm_list[i].rp_wwn == rpwwn) &&
2301                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2302                      scsilun_to_int((struct scsi_lun *)&lun)))
2303                         return  BFA_STATUS_ENTRY_EXISTS;
2304         }
2305
2306         if (free_index > MAX_LUN_MASK_CFG)
2307                 return BFA_STATUS_MAX_ENTRY_REACHED;
2308
2309         if (rp) {
2310                 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2311                                                    rp->rport_info.local_pid);
2312                 lunm_list[free_index].rp_tag = rp->rport_tag;
2313         } else {
2314                 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2315                 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2316         }
2317
2318         lunm_list[free_index].lp_wwn = *pwwn;
2319         lunm_list[free_index].rp_wwn = rpwwn;
2320         lunm_list[free_index].lun = lun;
2321         lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2322
2323         /* set for all luns in this rp */
2324         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2325                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2326                     (lunm_list[i].rp_wwn == rpwwn))
2327                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2328         }
2329
2330         return bfa_dconf_update(bfa);
2331 }
2332
2333 bfa_status_t
2334 bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2335                          wwn_t rpwwn, struct scsi_lun lun)
2336 {
2337         struct bfa_lun_mask_s   *lunm_list;
2338         struct bfa_rport_s      *rp = NULL;
2339         struct bfa_fcs_lport_s *port = NULL;
2340         struct bfa_fcs_rport_s *rp_fcs;
2341         int     i;
2342
2343         /* in min cfg lunm_list could be NULL but  no commands should run. */
2344         if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2345                 return BFA_STATUS_FAILED;
2346
2347         bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2348         bfa_trc(bfa, *pwwn);
2349         bfa_trc(bfa, rpwwn);
2350         bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2351
2352         if (*pwwn == 0) {
2353                 port = bfa_fcs_lookup_port(
2354                                 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2355                                 vf_id, *pwwn);
2356                 if (port) {
2357                         *pwwn = port->port_cfg.pwwn;
2358                         rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2359                         if (rp_fcs)
2360                                 rp = rp_fcs->bfa_rport;
2361                 }
2362         }
2363
2364         lunm_list = bfa_get_lun_mask_list(bfa);
2365         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2366                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2367                     (lunm_list[i].rp_wwn == rpwwn) &&
2368                     (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2369                      scsilun_to_int((struct scsi_lun *)&lun))) {
2370                         lunm_list[i].lp_wwn = 0;
2371                         lunm_list[i].rp_wwn = 0;
2372                         int_to_scsilun(0, &lunm_list[i].lun);
2373                         lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2374                         if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2375                                 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2376                                 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2377                         }
2378                         return bfa_dconf_update(bfa);
2379                 }
2380         }
2381
2382         /* set for all luns in this rp */
2383         for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2384                 if ((lunm_list[i].lp_wwn == *pwwn) &&
2385                     (lunm_list[i].rp_wwn == rpwwn))
2386                         lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2387         }
2388
2389         return BFA_STATUS_ENTRY_NOT_EXISTS;
2390 }
2391
2392 static void
2393 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2394 {
2395         struct bfa_ioim_s *ioim = cbarg;
2396
2397         if (!complete) {
2398                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2399                 return;
2400         }
2401
2402         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2403                           0, 0, NULL, 0);
2404 }
2405
2406 static void
2407 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2408 {
2409         struct bfa_ioim_s *ioim = cbarg;
2410
2411         bfa_stats(ioim->itnim, path_tov_expired);
2412         if (!complete) {
2413                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2414                 return;
2415         }
2416
2417         bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2418                           0, 0, NULL, 0);
2419 }
2420
2421 static void
2422 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2423 {
2424         struct bfa_ioim_s *ioim = cbarg;
2425
2426         if (!complete) {
2427                 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2428                 return;
2429         }
2430
2431         bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2432 }
2433
2434 static void
2435 bfa_ioim_sgpg_alloced(void *cbarg)
2436 {
2437         struct bfa_ioim_s *ioim = cbarg;
2438
2439         ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2440         list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2441         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2442         bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2443 }
2444
2445 /*
2446  * Send I/O request to firmware.
2447  */
2448 static  bfa_boolean_t
2449 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2450 {
2451         struct bfa_itnim_s *itnim = ioim->itnim;
2452         struct bfi_ioim_req_s *m;
2453         static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2454         struct bfi_sge_s *sge, *sgpge;
2455         u32     pgdlen = 0;
2456         u32     fcp_dl;
2457         u64 addr;
2458         struct scatterlist *sg;
2459         struct bfa_sgpg_s *sgpg;
2460         struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2461         u32 i, sge_id, pgcumsz;
2462         enum dma_data_direction dmadir;
2463
2464         /*
2465          * check for room in queue to send request now
2466          */
2467         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2468         if (!m) {
2469                 bfa_stats(ioim->itnim, qwait);
2470                 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2471                                   &ioim->iosp->reqq_wait);
2472                 return BFA_FALSE;
2473         }
2474
2475         /*
2476          * build i/o request message next
2477          */
2478         m->io_tag = cpu_to_be16(ioim->iotag);
2479         m->rport_hdl = ioim->itnim->rport->fw_handle;
2480         m->io_timeout = 0;
2481
2482         sge = &m->sges[0];
2483         sgpg = ioim->sgpg;
2484         sge_id = 0;
2485         sgpge = NULL;
2486         pgcumsz = 0;
2487         scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2488                 if (i == 0) {
2489                         /* build inline IO SG element */
2490                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2491                         sge->sga = *(union bfi_addr_u *) &addr;
2492                         pgdlen = sg_dma_len(sg);
2493                         sge->sg_len = pgdlen;
2494                         sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2495                                         BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2496                         bfa_sge_to_be(sge);
2497                         sge++;
2498                 } else {
2499                         if (sge_id == 0)
2500                                 sgpge = sgpg->sgpg->sges;
2501
2502                         addr = bfa_sgaddr_le(sg_dma_address(sg));
2503                         sgpge->sga = *(union bfi_addr_u *) &addr;
2504                         sgpge->sg_len = sg_dma_len(sg);
2505                         pgcumsz += sgpge->sg_len;
2506
2507                         /* set flags */
2508                         if (i < (ioim->nsges - 1) &&
2509                                         sge_id < (BFI_SGPG_DATA_SGES - 1))
2510                                 sgpge->flags = BFI_SGE_DATA;
2511                         else if (i < (ioim->nsges - 1))
2512                                 sgpge->flags = BFI_SGE_DATA_CPL;
2513                         else
2514                                 sgpge->flags = BFI_SGE_DATA_LAST;
2515
2516                         bfa_sge_to_le(sgpge);
2517
2518                         sgpge++;
2519                         if (i == (ioim->nsges - 1)) {
2520                                 sgpge->flags = BFI_SGE_PGDLEN;
2521                                 sgpge->sga.a32.addr_lo = 0;
2522                                 sgpge->sga.a32.addr_hi = 0;
2523                                 sgpge->sg_len = pgcumsz;
2524                                 bfa_sge_to_le(sgpge);
2525                         } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2526                                 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2527                                 sgpge->flags = BFI_SGE_LINK;
2528                                 sgpge->sga = sgpg->sgpg_pa;
2529                                 sgpge->sg_len = pgcumsz;
2530                                 bfa_sge_to_le(sgpge);
2531                                 sge_id = 0;
2532                                 pgcumsz = 0;
2533                         }
2534                 }
2535         }
2536
2537         if (ioim->nsges > BFI_SGE_INLINE) {
2538                 sge->sga = ioim->sgpg->sgpg_pa;
2539         } else {
2540                 sge->sga.a32.addr_lo = 0;
2541                 sge->sga.a32.addr_hi = 0;
2542         }
2543         sge->sg_len = pgdlen;
2544         sge->flags = BFI_SGE_PGDLEN;
2545         bfa_sge_to_be(sge);
2546
2547         /*
2548          * set up I/O command parameters
2549          */
2550         m->cmnd = cmnd_z0;
2551         int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2552         dmadir = cmnd->sc_data_direction;
2553         if (dmadir == DMA_TO_DEVICE)
2554                 m->cmnd.iodir = FCP_IODIR_WRITE;
2555         else if (dmadir == DMA_FROM_DEVICE)
2556                 m->cmnd.iodir = FCP_IODIR_READ;
2557         else
2558                 m->cmnd.iodir = FCP_IODIR_NONE;
2559
2560         m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2561         fcp_dl = scsi_bufflen(cmnd);
2562         m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2563
2564         /*
2565          * set up I/O message header
2566          */
2567         switch (m->cmnd.iodir) {
2568         case FCP_IODIR_READ:
2569                 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2570                 bfa_stats(itnim, input_reqs);
2571                 ioim->itnim->stats.rd_throughput += fcp_dl;
2572                 break;
2573         case FCP_IODIR_WRITE:
2574                 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2575                 bfa_stats(itnim, output_reqs);
2576                 ioim->itnim->stats.wr_throughput += fcp_dl;
2577                 break;
2578         case FCP_IODIR_RW:
2579                 bfa_stats(itnim, input_reqs);
2580                 bfa_stats(itnim, output_reqs);
2581                 /* fall through */
2582         default:
2583                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2584         }
2585         if (itnim->seq_rec ||
2586             (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2587                 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2588
2589         /*
2590          * queue I/O message to firmware
2591          */
2592         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2593         return BFA_TRUE;
2594 }
2595
2596 /*
2597  * Setup any additional SG pages needed.Inline SG element is setup
2598  * at queuing time.
2599  */
2600 static bfa_boolean_t
2601 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2602 {
2603         u16     nsgpgs;
2604
2605         WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2606
2607         /*
2608          * allocate SG pages needed
2609          */
2610         nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2611         if (!nsgpgs)
2612                 return BFA_TRUE;
2613
2614         if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2615             != BFA_STATUS_OK) {
2616                 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2617                 return BFA_FALSE;
2618         }
2619
2620         ioim->nsgpgs = nsgpgs;
2621         ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2622
2623         return BFA_TRUE;
2624 }
2625
2626 /*
2627  * Send I/O abort request to firmware.
2628  */
2629 static  bfa_boolean_t
2630 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2631 {
2632         struct bfi_ioim_abort_req_s *m;
2633         enum bfi_ioim_h2i       msgop;
2634
2635         /*
2636          * check for room in queue to send request now
2637          */
2638         m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2639         if (!m)
2640                 return BFA_FALSE;
2641
2642         /*
2643          * build i/o request message next
2644          */
2645         if (ioim->iosp->abort_explicit)
2646                 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2647         else
2648                 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2649
2650         bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2651         m->io_tag    = cpu_to_be16(ioim->iotag);
2652         m->abort_tag = ++ioim->abort_tag;
2653
2654         /*
2655          * queue I/O message to firmware
2656          */
2657         bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2658         return BFA_TRUE;
2659 }
2660
2661 /*
2662  * Call to resume any I/O requests waiting for room in request queue.
2663  */
2664 static void
2665 bfa_ioim_qresume(void *cbarg)
2666 {
2667         struct bfa_ioim_s *ioim = cbarg;
2668
2669         bfa_stats(ioim->itnim, qresumes);
2670         bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2671 }
2672
2673
2674 static void
2675 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2676 {
2677         /*
2678          * Move IO from itnim queue to fcpim global queue since itnim will be
2679          * freed.
2680          */
2681         list_del(&ioim->qe);
2682         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2683
2684         if (!ioim->iosp->tskim) {
2685                 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2686                         bfa_cb_dequeue(&ioim->hcb_qe);
2687                         list_del(&ioim->qe);
2688                         list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2689                 }
2690                 bfa_itnim_iodone(ioim->itnim);
2691         } else
2692                 bfa_wc_down(&ioim->iosp->tskim->wc);
2693 }
2694
2695 static bfa_boolean_t
2696 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2697 {
2698         if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2699             (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2700             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2701             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2702             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2703             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2704             (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2705                 return BFA_FALSE;
2706
2707         return BFA_TRUE;
2708 }
2709
2710 void
2711 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2712 {
2713         /*
2714          * If path tov timer expired, failback with PATHTOV status - these
2715          * IO requests are not normally retried by IO stack.
2716          *
2717          * Otherwise device cameback online and fail it with normal failed
2718          * status so that IO stack retries these failed IO requests.
2719          */
2720         if (iotov)
2721                 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2722         else {
2723                 ioim->io_cbfn = __bfa_cb_ioim_failed;
2724                 bfa_stats(ioim->itnim, iocom_nexus_abort);
2725         }
2726         bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2727
2728         /*
2729          * Move IO to fcpim global queue since itnim will be
2730          * freed.
2731          */
2732         list_del(&ioim->qe);
2733         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2734 }
2735
2736
2737 /*
2738  * Memory allocation and initialization.
2739  */
2740 void
2741 bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2742 {
2743         struct bfa_ioim_s               *ioim;
2744         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2745         struct bfa_ioim_sp_s    *iosp;
2746         u16             i;
2747
2748         /*
2749          * claim memory first
2750          */
2751         ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2752         fcpim->ioim_arr = ioim;
2753         bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2754
2755         iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2756         fcpim->ioim_sp_arr = iosp;
2757         bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2758
2759         /*
2760          * Initialize ioim free queues
2761          */
2762         INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2763         INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2764
2765         for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2766              i++, ioim++, iosp++) {
2767                 /*
2768                  * initialize IOIM
2769                  */
2770                 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2771                 ioim->iotag   = i;
2772                 ioim->bfa     = fcpim->bfa;
2773                 ioim->fcpim   = fcpim;
2774                 ioim->iosp    = iosp;
2775                 INIT_LIST_HEAD(&ioim->sgpg_q);
2776                 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2777                                    bfa_ioim_qresume, ioim);
2778                 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2779                                    bfa_ioim_sgpg_alloced, ioim);
2780                 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2781         }
2782 }
2783
2784 void
2785 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2786 {
2787         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2788         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2789         struct bfa_ioim_s *ioim;
2790         u16     iotag;
2791         enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2792
2793         iotag = be16_to_cpu(rsp->io_tag);
2794
2795         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2796         WARN_ON(ioim->iotag != iotag);
2797
2798         bfa_trc(ioim->bfa, ioim->iotag);
2799         bfa_trc(ioim->bfa, rsp->io_status);
2800         bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2801
2802         if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2803                 ioim->iosp->comp_rspmsg = *m;
2804
2805         switch (rsp->io_status) {
2806         case BFI_IOIM_STS_OK:
2807                 bfa_stats(ioim->itnim, iocomp_ok);
2808                 if (rsp->reuse_io_tag == 0)
2809                         evt = BFA_IOIM_SM_DONE;
2810                 else
2811                         evt = BFA_IOIM_SM_COMP;
2812                 break;
2813
2814         case BFI_IOIM_STS_TIMEDOUT:
2815                 bfa_stats(ioim->itnim, iocomp_timedout);
2816                 /* fall through */
2817         case BFI_IOIM_STS_ABORTED:
2818                 rsp->io_status = BFI_IOIM_STS_ABORTED;
2819                 bfa_stats(ioim->itnim, iocomp_aborted);
2820                 if (rsp->reuse_io_tag == 0)
2821                         evt = BFA_IOIM_SM_DONE;
2822                 else
2823                         evt = BFA_IOIM_SM_COMP;
2824                 break;
2825
2826         case BFI_IOIM_STS_PROTO_ERR:
2827                 bfa_stats(ioim->itnim, iocom_proto_err);
2828                 WARN_ON(!rsp->reuse_io_tag);
2829                 evt = BFA_IOIM_SM_COMP;
2830                 break;
2831
2832         case BFI_IOIM_STS_SQER_NEEDED:
2833                 bfa_stats(ioim->itnim, iocom_sqer_needed);
2834                 WARN_ON(rsp->reuse_io_tag != 0);
2835                 evt = BFA_IOIM_SM_SQRETRY;
2836                 break;
2837
2838         case BFI_IOIM_STS_RES_FREE:
2839                 bfa_stats(ioim->itnim, iocom_res_free);
2840                 evt = BFA_IOIM_SM_FREE;
2841                 break;
2842
2843         case BFI_IOIM_STS_HOST_ABORTED:
2844                 bfa_stats(ioim->itnim, iocom_hostabrts);
2845                 if (rsp->abort_tag != ioim->abort_tag) {
2846                         bfa_trc(ioim->bfa, rsp->abort_tag);
2847                         bfa_trc(ioim->bfa, ioim->abort_tag);
2848                         return;
2849                 }
2850
2851                 if (rsp->reuse_io_tag)
2852                         evt = BFA_IOIM_SM_ABORT_COMP;
2853                 else
2854                         evt = BFA_IOIM_SM_ABORT_DONE;
2855                 break;
2856
2857         case BFI_IOIM_STS_UTAG:
2858                 bfa_stats(ioim->itnim, iocom_utags);
2859                 evt = BFA_IOIM_SM_COMP_UTAG;
2860                 break;
2861
2862         default:
2863                 WARN_ON(1);
2864         }
2865
2866         bfa_sm_send_event(ioim, evt);
2867 }
2868
2869 void
2870 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2871 {
2872         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2873         struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2874         struct bfa_ioim_s *ioim;
2875         u16     iotag;
2876
2877         iotag = be16_to_cpu(rsp->io_tag);
2878
2879         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2880         WARN_ON(ioim->iotag != iotag);
2881
2882         bfa_ioim_cb_profile_comp(fcpim, ioim);
2883
2884         bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2885 }
2886
2887 /*
2888  * Called by itnim to clean up IO while going offline.
2889  */
2890 void
2891 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2892 {
2893         bfa_trc(ioim->bfa, ioim->iotag);
2894         bfa_stats(ioim->itnim, io_cleanups);
2895
2896         ioim->iosp->tskim = NULL;
2897         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2898 }
2899
2900 void
2901 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2902 {
2903         bfa_trc(ioim->bfa, ioim->iotag);
2904         bfa_stats(ioim->itnim, io_tmaborts);
2905
2906         ioim->iosp->tskim = tskim;
2907         bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2908 }
2909
2910 /*
2911  * IOC failure handling.
2912  */
2913 void
2914 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2915 {
2916         bfa_trc(ioim->bfa, ioim->iotag);
2917         bfa_stats(ioim->itnim, io_iocdowns);
2918         bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2919 }
2920
2921 /*
2922  * IO offline TOV popped. Fail the pending IO.
2923  */
2924 void
2925 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2926 {
2927         bfa_trc(ioim->bfa, ioim->iotag);
2928         bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2929 }
2930
2931
2932 /*
2933  * Allocate IOIM resource for initiator mode I/O request.
2934  */
2935 struct bfa_ioim_s *
2936 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2937                 struct bfa_itnim_s *itnim, u16 nsges)
2938 {
2939         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2940         struct bfa_ioim_s *ioim;
2941         struct bfa_iotag_s *iotag = NULL;
2942
2943         /*
2944          * alocate IOIM resource
2945          */
2946         bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2947         if (!iotag) {
2948                 bfa_stats(itnim, no_iotags);
2949                 return NULL;
2950         }
2951
2952         ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2953
2954         ioim->dio = dio;
2955         ioim->itnim = itnim;
2956         ioim->nsges = nsges;
2957         ioim->nsgpgs = 0;
2958
2959         bfa_stats(itnim, total_ios);
2960         fcpim->ios_active++;
2961
2962         list_add_tail(&ioim->qe, &itnim->io_q);
2963
2964         return ioim;
2965 }
2966
2967 void
2968 bfa_ioim_free(struct bfa_ioim_s *ioim)
2969 {
2970         struct bfa_fcpim_s *fcpim = ioim->fcpim;
2971         struct bfa_iotag_s *iotag;
2972
2973         if (ioim->nsgpgs > 0)
2974                 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2975
2976         bfa_stats(ioim->itnim, io_comps);
2977         fcpim->ios_active--;
2978
2979         ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2980
2981         WARN_ON(!(ioim->iotag <
2982                 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2983         iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2984
2985         if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2986                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2987         else
2988                 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2989
2990         list_del(&ioim->qe);
2991 }
2992
2993 void
2994 bfa_ioim_start(struct bfa_ioim_s *ioim)
2995 {
2996         bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2997
2998         /*
2999          * Obtain the queue over which this request has to be issued
3000          */
3001         ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
3002                         BFA_FALSE : bfa_itnim_get_reqq(ioim);
3003
3004         bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
3005 }
3006
3007 /*
3008  * Driver I/O abort request.
3009  */
3010 bfa_status_t
3011 bfa_ioim_abort(struct bfa_ioim_s *ioim)
3012 {
3013
3014         bfa_trc(ioim->bfa, ioim->iotag);
3015
3016         if (!bfa_ioim_is_abortable(ioim))
3017                 return BFA_STATUS_FAILED;
3018
3019         bfa_stats(ioim->itnim, io_aborts);
3020         bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3021
3022         return BFA_STATUS_OK;
3023 }
3024
3025 /*
3026  *  BFA TSKIM state machine functions
3027  */
3028
3029 /*
3030  * Task management command beginning state.
3031  */
3032 static void
3033 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3034 {
3035         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3036
3037         switch (event) {
3038         case BFA_TSKIM_SM_START:
3039                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3040                 bfa_tskim_gather_ios(tskim);
3041
3042                 /*
3043                  * If device is offline, do not send TM on wire. Just cleanup
3044                  * any pending IO requests and complete TM request.
3045                  */
3046                 if (!bfa_itnim_is_online(tskim->itnim)) {
3047                         bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3048                         tskim->tsk_status = BFI_TSKIM_STS_OK;
3049                         bfa_tskim_cleanup_ios(tskim);
3050                         return;
3051                 }
3052
3053                 if (!bfa_tskim_send(tskim)) {
3054                         bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3055                         bfa_stats(tskim->itnim, tm_qwait);
3056                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3057                                           &tskim->reqq_wait);
3058                 }
3059                 break;
3060
3061         default:
3062                 bfa_sm_fault(tskim->bfa, event);
3063         }
3064 }
3065
3066 /*
3067  * TM command is active, awaiting completion from firmware to
3068  * cleanup IO requests in TM scope.
3069  */
3070 static void
3071 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3072 {
3073         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3074
3075         switch (event) {
3076         case BFA_TSKIM_SM_DONE:
3077                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3078                 bfa_tskim_cleanup_ios(tskim);
3079                 break;
3080
3081         case BFA_TSKIM_SM_CLEANUP:
3082                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3083                 if (!bfa_tskim_send_abort(tskim)) {
3084                         bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3085                         bfa_stats(tskim->itnim, tm_qwait);
3086                         bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3087                                 &tskim->reqq_wait);
3088                 }
3089                 break;
3090
3091         case BFA_TSKIM_SM_HWFAIL:
3092                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3093                 bfa_tskim_iocdisable_ios(tskim);
3094                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3095                 break;
3096
3097         default:
3098                 bfa_sm_fault(tskim->bfa, event);
3099         }
3100 }
3101
3102 /*
3103  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3104  * completion event from firmware.
3105  */
3106 static void
3107 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3108 {
3109         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3110
3111         switch (event) {
3112         case BFA_TSKIM_SM_DONE:
3113                 /*
3114                  * Ignore and wait for ABORT completion from firmware.
3115                  */
3116                 break;
3117
3118         case BFA_TSKIM_SM_UTAG:
3119         case BFA_TSKIM_SM_CLEANUP_DONE:
3120                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3121                 bfa_tskim_cleanup_ios(tskim);
3122                 break;
3123
3124         case BFA_TSKIM_SM_HWFAIL:
3125                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3126                 bfa_tskim_iocdisable_ios(tskim);
3127                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3128                 break;
3129
3130         default:
3131                 bfa_sm_fault(tskim->bfa, event);
3132         }
3133 }
3134
3135 static void
3136 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3137 {
3138         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3139
3140         switch (event) {
3141         case BFA_TSKIM_SM_IOS_DONE:
3142                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3143                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3144                 break;
3145
3146         case BFA_TSKIM_SM_CLEANUP:
3147                 /*
3148                  * Ignore, TM command completed on wire.
3149                  * Notify TM conmpletion on IO cleanup completion.
3150                  */
3151                 break;
3152
3153         case BFA_TSKIM_SM_HWFAIL:
3154                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3155                 bfa_tskim_iocdisable_ios(tskim);
3156                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3157                 break;
3158
3159         default:
3160                 bfa_sm_fault(tskim->bfa, event);
3161         }
3162 }
3163
3164 /*
3165  * Task management command is waiting for room in request CQ
3166  */
3167 static void
3168 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3169 {
3170         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3171
3172         switch (event) {
3173         case BFA_TSKIM_SM_QRESUME:
3174                 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3175                 bfa_tskim_send(tskim);
3176                 break;
3177
3178         case BFA_TSKIM_SM_CLEANUP:
3179                 /*
3180                  * No need to send TM on wire since ITN is offline.
3181                  */
3182                 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3183                 bfa_reqq_wcancel(&tskim->reqq_wait);
3184                 bfa_tskim_cleanup_ios(tskim);
3185                 break;
3186
3187         case BFA_TSKIM_SM_HWFAIL:
3188                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3189                 bfa_reqq_wcancel(&tskim->reqq_wait);
3190                 bfa_tskim_iocdisable_ios(tskim);
3191                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3192                 break;
3193
3194         default:
3195                 bfa_sm_fault(tskim->bfa, event);
3196         }
3197 }
3198
3199 /*
3200  * Task management command is active, awaiting for room in request CQ
3201  * to send clean up request.
3202  */
3203 static void
3204 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3205                 enum bfa_tskim_event event)
3206 {
3207         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3208
3209         switch (event) {
3210         case BFA_TSKIM_SM_DONE:
3211                 bfa_reqq_wcancel(&tskim->reqq_wait);
3212                 /* fall through */
3213         case BFA_TSKIM_SM_QRESUME:
3214                 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3215                 bfa_tskim_send_abort(tskim);
3216                 break;
3217
3218         case BFA_TSKIM_SM_HWFAIL:
3219                 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3220                 bfa_reqq_wcancel(&tskim->reqq_wait);
3221                 bfa_tskim_iocdisable_ios(tskim);
3222                 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3223                 break;
3224
3225         default:
3226                 bfa_sm_fault(tskim->bfa, event);
3227         }
3228 }
3229
3230 /*
3231  * BFA callback is pending
3232  */
3233 static void
3234 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3235 {
3236         bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3237
3238         switch (event) {
3239         case BFA_TSKIM_SM_HCB:
3240                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3241                 bfa_tskim_free(tskim);
3242                 break;
3243
3244         case BFA_TSKIM_SM_CLEANUP:
3245                 bfa_tskim_notify_comp(tskim);
3246                 break;
3247
3248         case BFA_TSKIM_SM_HWFAIL:
3249                 break;
3250
3251         default:
3252                 bfa_sm_fault(tskim->bfa, event);
3253         }
3254 }
3255
3256 static void
3257 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3258 {
3259         struct bfa_tskim_s *tskim = cbarg;
3260
3261         if (!complete) {
3262                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3263                 return;
3264         }
3265
3266         bfa_stats(tskim->itnim, tm_success);
3267         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3268 }
3269
3270 static void
3271 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3272 {
3273         struct bfa_tskim_s *tskim = cbarg;
3274
3275         if (!complete) {
3276                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3277                 return;
3278         }
3279
3280         bfa_stats(tskim->itnim, tm_failures);
3281         bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3282                                 BFI_TSKIM_STS_FAILED);
3283 }
3284
3285 static bfa_boolean_t
3286 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3287 {
3288         switch (tskim->tm_cmnd) {
3289         case FCP_TM_TARGET_RESET:
3290                 return BFA_TRUE;
3291
3292         case FCP_TM_ABORT_TASK_SET:
3293         case FCP_TM_CLEAR_TASK_SET:
3294         case FCP_TM_LUN_RESET:
3295         case FCP_TM_CLEAR_ACA:
3296                 return !memcmp(&tskim->lun, &lun, sizeof(lun));
3297
3298         default:
3299                 WARN_ON(1);
3300         }
3301
3302         return BFA_FALSE;
3303 }
3304
3305 /*
3306  * Gather affected IO requests and task management commands.
3307  */
3308 static void
3309 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3310 {
3311         struct bfa_itnim_s *itnim = tskim->itnim;
3312         struct bfa_ioim_s *ioim;
3313         struct list_head *qe, *qen;
3314         struct scsi_cmnd *cmnd;
3315         struct scsi_lun scsilun;
3316
3317         INIT_LIST_HEAD(&tskim->io_q);
3318
3319         /*
3320          * Gather any active IO requests first.
3321          */
3322         list_for_each_safe(qe, qen, &itnim->io_q) {
3323                 ioim = (struct bfa_ioim_s *) qe;
3324                 cmnd = (struct scsi_cmnd *) ioim->dio;
3325                 int_to_scsilun(cmnd->device->lun, &scsilun);
3326                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3327                         list_del(&ioim->qe);
3328                         list_add_tail(&ioim->qe, &tskim->io_q);
3329                 }
3330         }
3331
3332         /*
3333          * Failback any pending IO requests immediately.
3334          */
3335         list_for_each_safe(qe, qen, &itnim->pending_q) {
3336                 ioim = (struct bfa_ioim_s *) qe;
3337                 cmnd = (struct scsi_cmnd *) ioim->dio;
3338                 int_to_scsilun(cmnd->device->lun, &scsilun);
3339                 if (bfa_tskim_match_scope(tskim, scsilun)) {
3340                         list_del(&ioim->qe);
3341                         list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3342                         bfa_ioim_tov(ioim);
3343                 }
3344         }
3345 }
3346
3347 /*
3348  * IO cleanup completion
3349  */
3350 static void
3351 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3352 {
3353         struct bfa_tskim_s *tskim = tskim_cbarg;
3354
3355         bfa_stats(tskim->itnim, tm_io_comps);
3356         bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3357 }
3358
3359 /*
3360  * Gather affected IO requests and task management commands.
3361  */
3362 static void
3363 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3364 {
3365         struct bfa_ioim_s *ioim;
3366         struct list_head        *qe, *qen;
3367
3368         bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3369
3370         list_for_each_safe(qe, qen, &tskim->io_q) {
3371                 ioim = (struct bfa_ioim_s *) qe;
3372                 bfa_wc_up(&tskim->wc);
3373                 bfa_ioim_cleanup_tm(ioim, tskim);
3374         }
3375
3376         bfa_wc_wait(&tskim->wc);
3377 }
3378
3379 /*
3380  * Send task management request to firmware.
3381  */
3382 static bfa_boolean_t
3383 bfa_tskim_send(struct bfa_tskim_s *tskim)
3384 {
3385         struct bfa_itnim_s *itnim = tskim->itnim;
3386         struct bfi_tskim_req_s *m;
3387
3388         /*
3389          * check for room in queue to send request now
3390          */
3391         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3392         if (!m)
3393                 return BFA_FALSE;
3394
3395         /*
3396          * build i/o request message next
3397          */
3398         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3399                         bfa_fn_lpu(tskim->bfa));
3400
3401         m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3402         m->itn_fhdl = tskim->itnim->rport->fw_handle;
3403         m->t_secs = tskim->tsecs;
3404         m->lun = tskim->lun;
3405         m->tm_flags = tskim->tm_cmnd;
3406
3407         /*
3408          * queue I/O message to firmware
3409          */
3410         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3411         return BFA_TRUE;
3412 }
3413
3414 /*
3415  * Send abort request to cleanup an active TM to firmware.
3416  */
3417 static bfa_boolean_t
3418 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3419 {
3420         struct bfa_itnim_s      *itnim = tskim->itnim;
3421         struct bfi_tskim_abortreq_s     *m;
3422
3423         /*
3424          * check for room in queue to send request now
3425          */
3426         m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3427         if (!m)
3428                 return BFA_FALSE;
3429
3430         /*
3431          * build i/o request message next
3432          */
3433         bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3434                         bfa_fn_lpu(tskim->bfa));
3435
3436         m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3437
3438         /*
3439          * queue I/O message to firmware
3440          */
3441         bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3442         return BFA_TRUE;
3443 }
3444
3445 /*
3446  * Call to resume task management cmnd waiting for room in request queue.
3447  */
3448 static void
3449 bfa_tskim_qresume(void *cbarg)
3450 {
3451         struct bfa_tskim_s *tskim = cbarg;
3452
3453         bfa_stats(tskim->itnim, tm_qresumes);
3454         bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3455 }
3456
3457 /*
3458  * Cleanup IOs associated with a task mangement command on IOC failures.
3459  */
3460 static void
3461 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3462 {
3463         struct bfa_ioim_s *ioim;
3464         struct list_head        *qe, *qen;
3465
3466         list_for_each_safe(qe, qen, &tskim->io_q) {
3467                 ioim = (struct bfa_ioim_s *) qe;
3468                 bfa_ioim_iocdisable(ioim);
3469         }
3470 }
3471
3472 /*
3473  * Notification on completions from related ioim.
3474  */
3475 void
3476 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3477 {
3478         bfa_wc_down(&tskim->wc);
3479 }
3480
3481 /*
3482  * Handle IOC h/w failure notification from itnim.
3483  */
3484 void
3485 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3486 {
3487         tskim->notify = BFA_FALSE;
3488         bfa_stats(tskim->itnim, tm_iocdowns);
3489         bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3490 }
3491
3492 /*
3493  * Cleanup TM command and associated IOs as part of ITNIM offline.
3494  */
3495 void
3496 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3497 {
3498         tskim->notify = BFA_TRUE;
3499         bfa_stats(tskim->itnim, tm_cleanups);
3500         bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3501 }
3502
3503 /*
3504  * Memory allocation and initialization.
3505  */
3506 void
3507 bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3508 {
3509         struct bfa_tskim_s *tskim;
3510         struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3511         u16     i;
3512
3513         INIT_LIST_HEAD(&fcpim->tskim_free_q);
3514         INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3515
3516         tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3517         fcpim->tskim_arr = tskim;
3518
3519         for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3520                 /*
3521                  * initialize TSKIM
3522                  */
3523                 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3524                 tskim->tsk_tag = i;
3525                 tskim->bfa      = fcpim->bfa;
3526                 tskim->fcpim    = fcpim;
3527                 tskim->notify  = BFA_FALSE;
3528                 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3529                                         tskim);
3530                 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3531
3532                 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3533         }
3534
3535         bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3536 }
3537
3538 void
3539 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3540 {
3541         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3542         struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3543         struct bfa_tskim_s *tskim;
3544         u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3545
3546         tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3547         WARN_ON(tskim->tsk_tag != tsk_tag);
3548
3549         tskim->tsk_status = rsp->tsk_status;
3550
3551         /*
3552          * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3553          * requests. All other statuses are for normal completions.
3554          */
3555         if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3556                 bfa_stats(tskim->itnim, tm_cleanup_comps);
3557                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3558         } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3559                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
3560         } else {
3561                 bfa_stats(tskim->itnim, tm_fw_rsps);
3562                 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3563         }
3564 }
3565
3566
3567 struct bfa_tskim_s *
3568 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3569 {
3570         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3571         struct bfa_tskim_s *tskim;
3572
3573         bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3574
3575         if (tskim)
3576                 tskim->dtsk = dtsk;
3577
3578         return tskim;
3579 }
3580
3581 void
3582 bfa_tskim_free(struct bfa_tskim_s *tskim)
3583 {
3584         WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3585         list_del(&tskim->qe);
3586         list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3587 }
3588
3589 /*
3590  * Start a task management command.
3591  *
3592  * @param[in]   tskim   BFA task management command instance
3593  * @param[in]   itnim   i-t nexus for the task management command
3594  * @param[in]   lun     lun, if applicable
3595  * @param[in]   tm_cmnd Task management command code.
3596  * @param[in]   t_secs  Timeout in seconds
3597  *
3598  * @return None.
3599  */
3600 void
3601 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3602                         struct scsi_lun lun,
3603                         enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3604 {
3605         tskim->itnim    = itnim;
3606         tskim->lun      = lun;
3607         tskim->tm_cmnd = tm_cmnd;
3608         tskim->tsecs    = tsecs;
3609         tskim->notify  = BFA_FALSE;
3610         bfa_stats(itnim, tm_cmnds);
3611
3612         list_add_tail(&tskim->qe, &itnim->tsk_q);
3613         bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3614 }
3615
3616 void
3617 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3618 {
3619         struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3620         struct list_head        *qe;
3621         int     i;
3622
3623         for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3624                 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3625                 list_add_tail(qe, &fcpim->tskim_unused_q);
3626         }
3627 }
3628
3629 void
3630 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3631                 struct bfa_s *bfa)
3632 {
3633         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3634         struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3635         struct bfa_mem_dma_s *seg_ptr;
3636         u16     nsegs, idx, per_seg_ios, num_io_req;
3637         u32     km_len = 0;
3638
3639         /*
3640          * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3641          * So if the values are non zero, adjust them appropriately.
3642          */
3643         if (cfg->fwcfg.num_ioim_reqs &&
3644             cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3645                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3646         else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3647                 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3648
3649         if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3650                 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3651
3652         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3653         if (num_io_req > BFA_IO_MAX) {
3654                 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3655                         cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3656                         cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3657                 } else if (cfg->fwcfg.num_fwtio_reqs)
3658                         cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3659                 else
3660                         cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3661         }
3662
3663         bfa_fcpim_meminfo(cfg, &km_len);
3664
3665         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3666         km_len += num_io_req * sizeof(struct bfa_iotag_s);
3667         km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3668
3669         /* dma memory */
3670         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3671         per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3672
3673         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3674                 if (num_io_req >= per_seg_ios) {
3675                         num_io_req -= per_seg_ios;
3676                         bfa_mem_dma_setup(minfo, seg_ptr,
3677                                 per_seg_ios * BFI_IOIM_SNSLEN);
3678                 } else
3679                         bfa_mem_dma_setup(minfo, seg_ptr,
3680                                 num_io_req * BFI_IOIM_SNSLEN);
3681         }
3682
3683         /* kva memory */
3684         bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3685 }
3686
3687 void
3688 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3689                 struct bfa_pcidev_s *pcidev)
3690 {
3691         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3692         struct bfa_mem_dma_s *seg_ptr;
3693         u16     idx, nsegs, num_io_req;
3694
3695         fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3696         fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3697         fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3698         fcp->num_itns   = cfg->fwcfg.num_rports;
3699         fcp->bfa = bfa;
3700
3701         /*
3702          * Setup the pool of snsbase addr's, that is passed to fw as
3703          * part of bfi_iocfc_cfg_s.
3704          */
3705         num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3706         nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3707
3708         bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3709
3710                 if (!bfa_mem_dma_virt(seg_ptr))
3711                         break;
3712
3713                 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3714                 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3715                 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3716         }
3717
3718         fcp->throttle_update_required = 1;
3719         bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3720
3721         bfa_iotag_attach(fcp);
3722
3723         fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3724         bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3725                         (fcp->num_itns * sizeof(struct bfa_itn_s));
3726         memset(fcp->itn_arr, 0,
3727                         (fcp->num_itns * sizeof(struct bfa_itn_s)));
3728 }
3729
3730 void
3731 bfa_fcp_iocdisable(struct bfa_s *bfa)
3732 {
3733         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3734
3735         bfa_fcpim_iocdisable(fcp);
3736 }
3737
3738 void
3739 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3740 {
3741         struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3742         struct list_head        *qe;
3743         int     i;
3744
3745         /* Update io throttle value only once during driver load time */
3746         if (!mod->throttle_update_required)
3747                 return;
3748
3749         for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3750                 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3751                 list_add_tail(qe, &mod->iotag_unused_q);
3752         }
3753
3754         if (mod->num_ioim_reqs != num_ioim_fw) {
3755                 bfa_trc(bfa, mod->num_ioim_reqs);
3756                 bfa_trc(bfa, num_ioim_fw);
3757         }
3758
3759         mod->max_ioim_reqs = max_ioim_fw;
3760         mod->num_ioim_reqs = num_ioim_fw;
3761         mod->throttle_update_required = 0;
3762 }
3763
3764 void
3765 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3766                 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3767 {
3768         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3769         struct bfa_itn_s *itn;
3770
3771         itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3772         itn->isr = isr;
3773 }
3774
3775 /*
3776  * Itn interrupt processing.
3777  */
3778 void
3779 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3780 {
3781         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3782         union bfi_itn_i2h_msg_u msg;
3783         struct bfa_itn_s *itn;
3784
3785         msg.msg = m;
3786         itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3787
3788         if (itn->isr)
3789                 itn->isr(bfa, m);
3790         else
3791                 WARN_ON(1);
3792 }
3793
3794 void
3795 bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3796 {
3797         struct bfa_iotag_s *iotag;
3798         u16     num_io_req, i;
3799
3800         iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3801         fcp->iotag_arr = iotag;
3802
3803         INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3804         INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3805         INIT_LIST_HEAD(&fcp->iotag_unused_q);
3806
3807         num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3808         for (i = 0; i < num_io_req; i++, iotag++) {
3809                 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3810                 iotag->tag = i;
3811                 if (i < fcp->num_ioim_reqs)
3812                         list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3813                 else
3814                         list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3815         }
3816
3817         bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3818 }
3819
3820
3821 /**
3822  * To send config req, first try to use throttle value from flash
3823  * If 0, then use driver parameter
3824  * We need to use min(flash_val, drv_val) because
3825  * memory allocation was done based on this cfg'd value
3826  */
3827 u16
3828 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3829 {
3830         u16 tmp;
3831         struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3832
3833         /*
3834          * If throttle value from flash is already in effect after driver is
3835          * loaded then until next load, always return current value instead
3836          * of actual flash value
3837          */
3838         if (!fcp->throttle_update_required)
3839                 return (u16)fcp->num_ioim_reqs;
3840
3841         tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3842         if (!tmp || (tmp > drv_cfg_param))
3843                 tmp = drv_cfg_param;
3844
3845         return tmp;
3846 }
3847
3848 bfa_status_t
3849 bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
3850 {
3851         if (!bfa_dconf_get_min_cfg(bfa)) {
3852                 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3853                 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3854                 return BFA_STATUS_OK;
3855         }
3856
3857         return BFA_STATUS_FAILED;
3858 }
3859
3860 u16
3861 bfa_fcpim_read_throttle(struct bfa_s *bfa)
3862 {
3863         struct bfa_throttle_cfg_s *throttle_cfg =
3864                         &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3865
3866         return ((!bfa_dconf_get_min_cfg(bfa)) ?
3867                ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3868 }
3869
3870 bfa_status_t
3871 bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
3872 {
3873         /* in min cfg no commands should run. */
3874         if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3875             (!bfa_dconf_read_data_valid(bfa)))
3876                 return BFA_STATUS_FAILED;
3877
3878         bfa_fcpim_write_throttle(bfa, value);
3879
3880         return bfa_dconf_update(bfa);
3881 }
3882
3883 bfa_status_t
3884 bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3885 {
3886         struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3887         struct bfa_defs_fcpim_throttle_s throttle;
3888
3889         if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3890             (!bfa_dconf_read_data_valid(bfa)))
3891                 return BFA_STATUS_FAILED;
3892
3893         memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3894
3895         throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3896         throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3897         if (!throttle.cfg_value)
3898                 throttle.cfg_value = throttle.cur_value;
3899         throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3900         memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3901
3902         return BFA_STATUS_OK;
3903 }