]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/block/skd_main.c
skd: Remove set-but-not-used local variables
[linux.git] / drivers / block / skd_main.c
1 /*
2  * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3  * was acquired by Western Digital in 2012.
4  *
5  * Copyright 2012 sTec, Inc.
6  * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7  *
8  * This file is part of the Linux kernel, and is made available under
9  * the terms of the GNU General Public License version 2.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/compiler.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/time.h>
25 #include <linux/hdreg.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/completion.h>
28 #include <linux/scatterlist.h>
29 #include <linux/version.h>
30 #include <linux/err.h>
31 #include <linux/aer.h>
32 #include <linux/wait.h>
33 #include <linux/uio.h>
34 #include <scsi/scsi.h>
35 #include <scsi/sg.h>
36 #include <linux/io.h>
37 #include <linux/uaccess.h>
38 #include <asm/unaligned.h>
39
40 #include "skd_s1120.h"
41
42 static int skd_dbg_level;
43 static int skd_isr_comp_limit = 4;
44
45 enum {
46         STEC_LINK_2_5GTS = 0,
47         STEC_LINK_5GTS = 1,
48         STEC_LINK_8GTS = 2,
49         STEC_LINK_UNKNOWN = 0xFF
50 };
51
52 enum {
53         SKD_FLUSH_INITIALIZER,
54         SKD_FLUSH_ZERO_SIZE_FIRST,
55         SKD_FLUSH_DATA_SECOND,
56 };
57
58 #define SKD_ASSERT(expr) \
59         do { \
60                 if (unlikely(!(expr))) { \
61                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
62                                # expr, __FILE__, __func__, __LINE__); \
63                 } \
64         } while (0)
65
66 #define DRV_NAME "skd"
67 #define DRV_VERSION "2.2.1"
68 #define DRV_BUILD_ID "0260"
69 #define PFX DRV_NAME ": "
70 #define DRV_BIN_VERSION 0x100
71 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
72
73 MODULE_LICENSE("GPL");
74
75 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
76 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
77
78 #define PCI_VENDOR_ID_STEC      0x1B39
79 #define PCI_DEVICE_ID_S1120     0x0001
80
81 #define SKD_FUA_NV              (1 << 1)
82 #define SKD_MINORS_PER_DEVICE   16
83
84 #define SKD_MAX_QUEUE_DEPTH     200u
85
86 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
87
88 #define SKD_N_FITMSG_BYTES      (512u)
89
90 #define SKD_N_SPECIAL_CONTEXT   32u
91 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
92
93 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
94  * 128KB limit.  That allows 4096*4K = 16M xfer size
95  */
96 #define SKD_N_SG_PER_REQ_DEFAULT 256u
97 #define SKD_N_SG_PER_SPECIAL    256u
98
99 #define SKD_N_COMPLETION_ENTRY  256u
100 #define SKD_N_READ_CAP_BYTES    (8u)
101
102 #define SKD_N_INTERNAL_BYTES    (512u)
103
104 /* 5 bits of uniqifier, 0xF800 */
105 #define SKD_ID_INCR             (0x400)
106 #define SKD_ID_TABLE_MASK       (3u << 8u)
107 #define  SKD_ID_RW_REQUEST      (0u << 8u)
108 #define  SKD_ID_INTERNAL        (1u << 8u)
109 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
110 #define  SKD_ID_FIT_MSG         (3u << 8u)
111 #define SKD_ID_SLOT_MASK        0x00FFu
112 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
113
114 #define SKD_N_TIMEOUT_SLOT      4u
115 #define SKD_TIMEOUT_SLOT_MASK   3u
116
117 #define SKD_N_MAX_SECTORS 2048u
118
119 #define SKD_MAX_RETRIES 2u
120
121 #define SKD_TIMER_SECONDS(seconds) (seconds)
122 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
123
124 #define INQ_STD_NBYTES 36
125
126 enum skd_drvr_state {
127         SKD_DRVR_STATE_LOAD,
128         SKD_DRVR_STATE_IDLE,
129         SKD_DRVR_STATE_BUSY,
130         SKD_DRVR_STATE_STARTING,
131         SKD_DRVR_STATE_ONLINE,
132         SKD_DRVR_STATE_PAUSING,
133         SKD_DRVR_STATE_PAUSED,
134         SKD_DRVR_STATE_DRAINING_TIMEOUT,
135         SKD_DRVR_STATE_RESTARTING,
136         SKD_DRVR_STATE_RESUMING,
137         SKD_DRVR_STATE_STOPPING,
138         SKD_DRVR_STATE_FAULT,
139         SKD_DRVR_STATE_DISAPPEARED,
140         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
141         SKD_DRVR_STATE_BUSY_ERASE,
142         SKD_DRVR_STATE_BUSY_SANITIZE,
143         SKD_DRVR_STATE_BUSY_IMMINENT,
144         SKD_DRVR_STATE_WAIT_BOOT,
145         SKD_DRVR_STATE_SYNCING,
146 };
147
148 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
149 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
150 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
151 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
152 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
153 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
154 #define SKD_START_WAIT_SECONDS  90u
155
156 enum skd_req_state {
157         SKD_REQ_STATE_IDLE,
158         SKD_REQ_STATE_SETUP,
159         SKD_REQ_STATE_BUSY,
160         SKD_REQ_STATE_COMPLETED,
161         SKD_REQ_STATE_TIMEOUT,
162         SKD_REQ_STATE_ABORTED,
163 };
164
165 enum skd_fit_msg_state {
166         SKD_MSG_STATE_IDLE,
167         SKD_MSG_STATE_BUSY,
168 };
169
170 enum skd_check_status_action {
171         SKD_CHECK_STATUS_REPORT_GOOD,
172         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
173         SKD_CHECK_STATUS_REQUEUE_REQUEST,
174         SKD_CHECK_STATUS_REPORT_ERROR,
175         SKD_CHECK_STATUS_BUSY_IMMINENT,
176 };
177
178 struct skd_fitmsg_context {
179         enum skd_fit_msg_state state;
180
181         struct skd_fitmsg_context *next;
182
183         u32 id;
184         u16 outstanding;
185
186         u32 length;
187         u32 offset;
188
189         u8 *msg_buf;
190         dma_addr_t mb_dma_address;
191 };
192
193 struct skd_request_context {
194         enum skd_req_state state;
195
196         struct skd_request_context *next;
197
198         u16 id;
199         u32 fitmsg_id;
200
201         struct request *req;
202         u8 flush_cmd;
203
204         u32 timeout_stamp;
205         u8 sg_data_dir;
206         struct scatterlist *sg;
207         u32 n_sg;
208         u32 sg_byte_count;
209
210         struct fit_sg_descriptor *sksg_list;
211         dma_addr_t sksg_dma_address;
212
213         struct fit_completion_entry_v1 completion;
214
215         struct fit_comp_error_info err_info;
216
217 };
218 #define SKD_DATA_DIR_HOST_TO_CARD       1
219 #define SKD_DATA_DIR_CARD_TO_HOST       2
220
221 struct skd_special_context {
222         struct skd_request_context req;
223
224         u8 orphaned;
225
226         void *data_buf;
227         dma_addr_t db_dma_address;
228
229         u8 *msg_buf;
230         dma_addr_t mb_dma_address;
231 };
232
233 struct skd_sg_io {
234         fmode_t mode;
235         void __user *argp;
236
237         struct sg_io_hdr sg;
238
239         u8 cdb[16];
240
241         u32 dxfer_len;
242         u32 iovcnt;
243         struct sg_iovec *iov;
244         struct sg_iovec no_iov_iov;
245
246         struct skd_special_context *skspcl;
247 };
248
249 typedef enum skd_irq_type {
250         SKD_IRQ_LEGACY,
251         SKD_IRQ_MSI,
252         SKD_IRQ_MSIX
253 } skd_irq_type_t;
254
255 #define SKD_MAX_BARS                    2
256
257 struct skd_device {
258         volatile void __iomem *mem_map[SKD_MAX_BARS];
259         resource_size_t mem_phys[SKD_MAX_BARS];
260         u32 mem_size[SKD_MAX_BARS];
261
262         struct skd_msix_entry *msix_entries;
263
264         struct pci_dev *pdev;
265         int pcie_error_reporting_is_enabled;
266
267         spinlock_t lock;
268         struct gendisk *disk;
269         struct request_queue *queue;
270         struct device *class_dev;
271         int gendisk_on;
272         int sync_done;
273
274         atomic_t device_count;
275         u32 devno;
276         u32 major;
277         char name[32];
278         char isr_name[30];
279
280         enum skd_drvr_state state;
281         u32 drive_state;
282
283         u32 in_flight;
284         u32 cur_max_queue_depth;
285         u32 queue_low_water_mark;
286         u32 dev_max_queue_depth;
287
288         u32 num_fitmsg_context;
289         u32 num_req_context;
290
291         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
292         u32 timeout_stamp;
293         struct skd_fitmsg_context *skmsg_free_list;
294         struct skd_fitmsg_context *skmsg_table;
295
296         struct skd_request_context *skreq_free_list;
297         struct skd_request_context *skreq_table;
298
299         struct skd_special_context *skspcl_free_list;
300         struct skd_special_context *skspcl_table;
301
302         struct skd_special_context internal_skspcl;
303         u32 read_cap_blocksize;
304         u32 read_cap_last_lba;
305         int read_cap_is_valid;
306         int inquiry_is_valid;
307         u8 inq_serial_num[13];  /*12 chars plus null term */
308         u8 id_str[80];          /* holds a composite name (pci + sernum) */
309
310         u8 skcomp_cycle;
311         u32 skcomp_ix;
312         struct fit_completion_entry_v1 *skcomp_table;
313         struct fit_comp_error_info *skerr_table;
314         dma_addr_t cq_dma_address;
315
316         wait_queue_head_t waitq;
317
318         struct timer_list timer;
319         u32 timer_countdown;
320         u32 timer_substate;
321
322         int n_special;
323         int sgs_per_request;
324         u32 last_mtd;
325
326         u32 proto_ver;
327
328         int dbg_level;
329         u32 connect_time_stamp;
330         int connect_retries;
331 #define SKD_MAX_CONNECT_RETRIES 16
332         u32 drive_jiffies;
333
334         u32 timo_slot;
335
336         struct work_struct completion_worker;
337 };
338
339 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
340 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
341 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
342
343 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
344 {
345         u32 val;
346
347         if (likely(skdev->dbg_level < 2))
348                 return readl(skdev->mem_map[1] + offset);
349         else {
350                 barrier();
351                 val = readl(skdev->mem_map[1] + offset);
352                 barrier();
353                 pr_debug("%s:%s:%d offset %x = %x\n",
354                          skdev->name, __func__, __LINE__, offset, val);
355                 return val;
356         }
357
358 }
359
360 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
361                                    u32 offset)
362 {
363         if (likely(skdev->dbg_level < 2)) {
364                 writel(val, skdev->mem_map[1] + offset);
365                 barrier();
366         } else {
367                 barrier();
368                 writel(val, skdev->mem_map[1] + offset);
369                 barrier();
370                 pr_debug("%s:%s:%d offset %x = %x\n",
371                          skdev->name, __func__, __LINE__, offset, val);
372         }
373 }
374
375 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
376                                    u32 offset)
377 {
378         if (likely(skdev->dbg_level < 2)) {
379                 writeq(val, skdev->mem_map[1] + offset);
380                 barrier();
381         } else {
382                 barrier();
383                 writeq(val, skdev->mem_map[1] + offset);
384                 barrier();
385                 pr_debug("%s:%s:%d offset %x = %016llx\n",
386                          skdev->name, __func__, __LINE__, offset, val);
387         }
388 }
389
390
391 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
392 static int skd_isr_type = SKD_IRQ_DEFAULT;
393
394 module_param(skd_isr_type, int, 0444);
395 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
396                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
397
398 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
399 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
400
401 module_param(skd_max_req_per_msg, int, 0444);
402 MODULE_PARM_DESC(skd_max_req_per_msg,
403                  "Maximum SCSI requests packed in a single message."
404                  " (1-14, default==1)");
405
406 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
407 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
408 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
409
410 module_param(skd_max_queue_depth, int, 0444);
411 MODULE_PARM_DESC(skd_max_queue_depth,
412                  "Maximum SCSI requests issued to s1120."
413                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
414
415 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
416 module_param(skd_sgs_per_request, int, 0444);
417 MODULE_PARM_DESC(skd_sgs_per_request,
418                  "Maximum SG elements per block request."
419                  " (1-4096, default==256)");
420
421 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
422 module_param(skd_max_pass_thru, int, 0444);
423 MODULE_PARM_DESC(skd_max_pass_thru,
424                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
425
426 module_param(skd_dbg_level, int, 0444);
427 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
428
429 module_param(skd_isr_comp_limit, int, 0444);
430 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
431
432 /* Major device number dynamically assigned. */
433 static u32 skd_major;
434
435 static void skd_destruct(struct skd_device *skdev);
436 static const struct block_device_operations skd_blockdev_ops;
437 static void skd_send_fitmsg(struct skd_device *skdev,
438                             struct skd_fitmsg_context *skmsg);
439 static void skd_send_special_fitmsg(struct skd_device *skdev,
440                                     struct skd_special_context *skspcl);
441 static void skd_request_fn(struct request_queue *rq);
442 static void skd_end_request(struct skd_device *skdev,
443                 struct skd_request_context *skreq, blk_status_t status);
444 static bool skd_preop_sg_list(struct skd_device *skdev,
445                              struct skd_request_context *skreq);
446 static void skd_postop_sg_list(struct skd_device *skdev,
447                                struct skd_request_context *skreq);
448
449 static void skd_restart_device(struct skd_device *skdev);
450 static int skd_quiesce_dev(struct skd_device *skdev);
451 static int skd_unquiesce_dev(struct skd_device *skdev);
452 static void skd_release_special(struct skd_device *skdev,
453                                 struct skd_special_context *skspcl);
454 static void skd_disable_interrupts(struct skd_device *skdev);
455 static void skd_isr_fwstate(struct skd_device *skdev);
456 static void skd_recover_requests(struct skd_device *skdev, int requeue);
457 static void skd_soft_reset(struct skd_device *skdev);
458
459 static const char *skd_name(struct skd_device *skdev);
460 const char *skd_drive_state_to_str(int state);
461 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
462 static void skd_log_skdev(struct skd_device *skdev, const char *event);
463 static void skd_log_skmsg(struct skd_device *skdev,
464                           struct skd_fitmsg_context *skmsg, const char *event);
465 static void skd_log_skreq(struct skd_device *skdev,
466                           struct skd_request_context *skreq, const char *event);
467
468 /*
469  *****************************************************************************
470  * READ/WRITE REQUESTS
471  *****************************************************************************
472  */
473 static void skd_fail_all_pending(struct skd_device *skdev)
474 {
475         struct request_queue *q = skdev->queue;
476         struct request *req;
477
478         for (;; ) {
479                 req = blk_peek_request(q);
480                 if (req == NULL)
481                         break;
482                 blk_start_request(req);
483                 __blk_end_request_all(req, BLK_STS_IOERR);
484         }
485 }
486
487 static void
488 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
489                 int data_dir, unsigned lba,
490                 unsigned count)
491 {
492         if (data_dir == READ)
493                 scsi_req->cdb[0] = 0x28;
494         else
495                 scsi_req->cdb[0] = 0x2a;
496
497         scsi_req->cdb[1] = 0;
498         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
499         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
500         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
501         scsi_req->cdb[5] = (lba & 0xff);
502         scsi_req->cdb[6] = 0;
503         scsi_req->cdb[7] = (count & 0xff00) >> 8;
504         scsi_req->cdb[8] = count & 0xff;
505         scsi_req->cdb[9] = 0;
506 }
507
508 static void
509 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
510                             struct skd_request_context *skreq)
511 {
512         skreq->flush_cmd = 1;
513
514         scsi_req->cdb[0] = 0x35;
515         scsi_req->cdb[1] = 0;
516         scsi_req->cdb[2] = 0;
517         scsi_req->cdb[3] = 0;
518         scsi_req->cdb[4] = 0;
519         scsi_req->cdb[5] = 0;
520         scsi_req->cdb[6] = 0;
521         scsi_req->cdb[7] = 0;
522         scsi_req->cdb[8] = 0;
523         scsi_req->cdb[9] = 0;
524 }
525
526 static void skd_request_fn_not_online(struct request_queue *q);
527
528 static void skd_request_fn(struct request_queue *q)
529 {
530         struct skd_device *skdev = q->queuedata;
531         struct skd_fitmsg_context *skmsg = NULL;
532         struct fit_msg_hdr *fmh = NULL;
533         struct skd_request_context *skreq;
534         struct request *req = NULL;
535         struct skd_scsi_request *scsi_req;
536         unsigned long io_flags;
537         u32 lba;
538         u32 count;
539         int data_dir;
540         u64 be_dmaa;
541         u64 cmdctxt;
542         u32 timo_slot;
543         void *cmd_ptr;
544         int flush, fua;
545
546         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
547                 skd_request_fn_not_online(q);
548                 return;
549         }
550
551         if (blk_queue_stopped(skdev->queue)) {
552                 if (skdev->skmsg_free_list == NULL ||
553                     skdev->skreq_free_list == NULL ||
554                     skdev->in_flight >= skdev->queue_low_water_mark)
555                         /* There is still some kind of shortage */
556                         return;
557
558                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
559         }
560
561         /*
562          * Stop conditions:
563          *  - There are no more native requests
564          *  - There are already the maximum number of requests in progress
565          *  - There are no more skd_request_context entries
566          *  - There are no more FIT msg buffers
567          */
568         for (;; ) {
569
570                 flush = fua = 0;
571
572                 req = blk_peek_request(q);
573
574                 /* Are there any native requests to start? */
575                 if (req == NULL)
576                         break;
577
578                 lba = (u32)blk_rq_pos(req);
579                 count = blk_rq_sectors(req);
580                 data_dir = rq_data_dir(req);
581                 io_flags = req->cmd_flags;
582
583                 if (req_op(req) == REQ_OP_FLUSH)
584                         flush++;
585
586                 if (io_flags & REQ_FUA)
587                         fua++;
588
589                 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
590                          "count=%u(0x%x) dir=%d\n",
591                          skdev->name, __func__, __LINE__,
592                          req, lba, lba, count, count, data_dir);
593
594                 /* At this point we know there is a request */
595
596                 /* Are too many requets already in progress? */
597                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
598                         pr_debug("%s:%s:%d qdepth %d, limit %d\n",
599                                  skdev->name, __func__, __LINE__,
600                                  skdev->in_flight, skdev->cur_max_queue_depth);
601                         break;
602                 }
603
604                 /* Is a skd_request_context available? */
605                 skreq = skdev->skreq_free_list;
606                 if (skreq == NULL) {
607                         pr_debug("%s:%s:%d Out of req=%p\n",
608                                  skdev->name, __func__, __LINE__, q);
609                         break;
610                 }
611                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
612                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
613
614                 /* Now we check to see if we can get a fit msg */
615                 if (skmsg == NULL) {
616                         if (skdev->skmsg_free_list == NULL) {
617                                 pr_debug("%s:%s:%d Out of msg\n",
618                                          skdev->name, __func__, __LINE__);
619                                 break;
620                         }
621                 }
622
623                 skreq->flush_cmd = 0;
624                 skreq->n_sg = 0;
625                 skreq->sg_byte_count = 0;
626
627                 /*
628                  * OK to now dequeue request from q.
629                  *
630                  * At this point we are comitted to either start or reject
631                  * the native request. Note that skd_request_context is
632                  * available but is still at the head of the free list.
633                  */
634                 blk_start_request(req);
635                 skreq->req = req;
636                 skreq->fitmsg_id = 0;
637
638                 /* Either a FIT msg is in progress or we have to start one. */
639                 if (skmsg == NULL) {
640                         /* Are there any FIT msg buffers available? */
641                         skmsg = skdev->skmsg_free_list;
642                         if (skmsg == NULL) {
643                                 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
644                                          skdev->name, __func__, __LINE__,
645                                          skdev);
646                                 break;
647                         }
648                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
649                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
650
651                         skdev->skmsg_free_list = skmsg->next;
652
653                         skmsg->state = SKD_MSG_STATE_BUSY;
654                         skmsg->id += SKD_ID_INCR;
655
656                         /* Initialize the FIT msg header */
657                         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
658                         memset(fmh, 0, sizeof(*fmh));
659                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
660                         skmsg->length = sizeof(*fmh);
661                 }
662
663                 skreq->fitmsg_id = skmsg->id;
664
665                 /*
666                  * Note that a FIT msg may have just been started
667                  * but contains no SoFIT requests yet.
668                  */
669
670                 /*
671                  * Transcode the request, checking as we go. The outcome of
672                  * the transcoding is represented by the error variable.
673                  */
674                 cmd_ptr = &skmsg->msg_buf[skmsg->length];
675                 memset(cmd_ptr, 0, 32);
676
677                 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
678                 cmdctxt = skreq->id + SKD_ID_INCR;
679
680                 scsi_req = cmd_ptr;
681                 scsi_req->hdr.tag = cmdctxt;
682                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
683
684                 if (data_dir == READ)
685                         skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
686                 else
687                         skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
688
689                 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
690                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
691                         SKD_ASSERT(skreq->flush_cmd == 1);
692                 } else {
693                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
694                 }
695
696                 if (fua)
697                         scsi_req->cdb[1] |= SKD_FUA_NV;
698
699                 if (!req->bio)
700                         goto skip_sg;
701
702                 if (!skd_preop_sg_list(skdev, skreq)) {
703                         /*
704                          * Complete the native request with error.
705                          * Note that the request context is still at the
706                          * head of the free list, and that the SoFIT request
707                          * was encoded into the FIT msg buffer but the FIT
708                          * msg length has not been updated. In short, the
709                          * only resource that has been allocated but might
710                          * not be used is that the FIT msg could be empty.
711                          */
712                         pr_debug("%s:%s:%d error Out\n",
713                                  skdev->name, __func__, __LINE__);
714                         skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
715                         continue;
716                 }
717
718 skip_sg:
719                 scsi_req->hdr.sg_list_len_bytes =
720                         cpu_to_be32(skreq->sg_byte_count);
721
722                 /* Complete resource allocations. */
723                 skdev->skreq_free_list = skreq->next;
724                 skreq->state = SKD_REQ_STATE_BUSY;
725                 skreq->id += SKD_ID_INCR;
726
727                 skmsg->length += sizeof(struct skd_scsi_request);
728                 fmh->num_protocol_cmds_coalesced++;
729
730                 /*
731                  * Update the active request counts.
732                  * Capture the timeout timestamp.
733                  */
734                 skreq->timeout_stamp = skdev->timeout_stamp;
735                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
736                 skdev->timeout_slot[timo_slot]++;
737                 skdev->in_flight++;
738                 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
739                          skdev->name, __func__, __LINE__,
740                          skreq->id, skdev->in_flight);
741
742                 /*
743                  * If the FIT msg buffer is full send it.
744                  */
745                 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
746                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
747                         skd_send_fitmsg(skdev, skmsg);
748                         skmsg = NULL;
749                         fmh = NULL;
750                 }
751         }
752
753         /*
754          * Is a FIT msg in progress? If it is empty put the buffer back
755          * on the free list. If it is non-empty send what we got.
756          * This minimizes latency when there are fewer requests than
757          * what fits in a FIT msg.
758          */
759         if (skmsg != NULL) {
760                 /* Bigger than just a FIT msg header? */
761                 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
762                         pr_debug("%s:%s:%d sending msg=%p, len %d\n",
763                                  skdev->name, __func__, __LINE__,
764                                  skmsg, skmsg->length);
765                         skd_send_fitmsg(skdev, skmsg);
766                 } else {
767                         /*
768                          * The FIT msg is empty. It means we got started
769                          * on the msg, but the requests were rejected.
770                          */
771                         skmsg->state = SKD_MSG_STATE_IDLE;
772                         skmsg->id += SKD_ID_INCR;
773                         skmsg->next = skdev->skmsg_free_list;
774                         skdev->skmsg_free_list = skmsg;
775                 }
776                 skmsg = NULL;
777                 fmh = NULL;
778         }
779
780         /*
781          * If req is non-NULL it means there is something to do but
782          * we are out of a resource.
783          */
784         if (req)
785                 blk_stop_queue(skdev->queue);
786 }
787
788 static void skd_end_request(struct skd_device *skdev,
789                 struct skd_request_context *skreq, blk_status_t error)
790 {
791         if (unlikely(error)) {
792                 struct request *req = skreq->req;
793                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
794                 u32 lba = (u32)blk_rq_pos(req);
795                 u32 count = blk_rq_sectors(req);
796
797                 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
798                        skd_name(skdev), cmd, lba, count, skreq->id);
799         } else
800                 pr_debug("%s:%s:%d id=0x%x error=%d\n",
801                          skdev->name, __func__, __LINE__, skreq->id, error);
802
803         __blk_end_request_all(skreq->req, error);
804 }
805
806 static bool skd_preop_sg_list(struct skd_device *skdev,
807                              struct skd_request_context *skreq)
808 {
809         struct request *req = skreq->req;
810         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
811         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
812         struct scatterlist *sg = &skreq->sg[0];
813         int n_sg;
814         int i;
815
816         skreq->sg_byte_count = 0;
817
818         /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
819                    skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
820
821         n_sg = blk_rq_map_sg(skdev->queue, req, sg);
822         if (n_sg <= 0)
823                 return false;
824
825         /*
826          * Map scatterlist to PCI bus addresses.
827          * Note PCI might change the number of entries.
828          */
829         n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
830         if (n_sg <= 0)
831                 return false;
832
833         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
834
835         skreq->n_sg = n_sg;
836
837         for (i = 0; i < n_sg; i++) {
838                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
839                 u32 cnt = sg_dma_len(&sg[i]);
840                 uint64_t dma_addr = sg_dma_address(&sg[i]);
841
842                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
843                 sgd->byte_count = cnt;
844                 skreq->sg_byte_count += cnt;
845                 sgd->host_side_addr = dma_addr;
846                 sgd->dev_side_addr = 0;
847         }
848
849         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
850         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
851
852         if (unlikely(skdev->dbg_level > 1)) {
853                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
854                          skdev->name, __func__, __LINE__,
855                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
856                 for (i = 0; i < n_sg; i++) {
857                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
858                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
859                                  "addr=0x%llx next=0x%llx\n",
860                                  skdev->name, __func__, __LINE__,
861                                  i, sgd->byte_count, sgd->control,
862                                  sgd->host_side_addr, sgd->next_desc_ptr);
863                 }
864         }
865
866         return true;
867 }
868
869 static void skd_postop_sg_list(struct skd_device *skdev,
870                                struct skd_request_context *skreq)
871 {
872         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
873         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
874
875         /*
876          * restore the next ptr for next IO request so we
877          * don't have to set it every time.
878          */
879         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
880                 skreq->sksg_dma_address +
881                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
882         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
883 }
884
885 static void skd_request_fn_not_online(struct request_queue *q)
886 {
887         struct skd_device *skdev = q->queuedata;
888
889         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
890
891         skd_log_skdev(skdev, "req_not_online");
892         switch (skdev->state) {
893         case SKD_DRVR_STATE_PAUSING:
894         case SKD_DRVR_STATE_PAUSED:
895         case SKD_DRVR_STATE_STARTING:
896         case SKD_DRVR_STATE_RESTARTING:
897         case SKD_DRVR_STATE_WAIT_BOOT:
898         /* In case of starting, we haven't started the queue,
899          * so we can't get here... but requests are
900          * possibly hanging out waiting for us because we
901          * reported the dev/skd0 already.  They'll wait
902          * forever if connect doesn't complete.
903          * What to do??? delay dev/skd0 ??
904          */
905         case SKD_DRVR_STATE_BUSY:
906         case SKD_DRVR_STATE_BUSY_IMMINENT:
907         case SKD_DRVR_STATE_BUSY_ERASE:
908         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
909                 return;
910
911         case SKD_DRVR_STATE_BUSY_SANITIZE:
912         case SKD_DRVR_STATE_STOPPING:
913         case SKD_DRVR_STATE_SYNCING:
914         case SKD_DRVR_STATE_FAULT:
915         case SKD_DRVR_STATE_DISAPPEARED:
916         default:
917                 break;
918         }
919
920         /* If we get here, terminate all pending block requeusts
921          * with EIO and any scsi pass thru with appropriate sense
922          */
923
924         skd_fail_all_pending(skdev);
925 }
926
927 /*
928  *****************************************************************************
929  * TIMER
930  *****************************************************************************
931  */
932
933 static void skd_timer_tick_not_online(struct skd_device *skdev);
934
935 static void skd_timer_tick(ulong arg)
936 {
937         struct skd_device *skdev = (struct skd_device *)arg;
938
939         u32 timo_slot;
940         unsigned long reqflags;
941         u32 state;
942
943         if (skdev->state == SKD_DRVR_STATE_FAULT)
944                 /* The driver has declared fault, and we want it to
945                  * stay that way until driver is reloaded.
946                  */
947                 return;
948
949         spin_lock_irqsave(&skdev->lock, reqflags);
950
951         state = SKD_READL(skdev, FIT_STATUS);
952         state &= FIT_SR_DRIVE_STATE_MASK;
953         if (state != skdev->drive_state)
954                 skd_isr_fwstate(skdev);
955
956         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
957                 skd_timer_tick_not_online(skdev);
958                 goto timer_func_out;
959         }
960         skdev->timeout_stamp++;
961         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
962
963         /*
964          * All requests that happened during the previous use of
965          * this slot should be done by now. The previous use was
966          * over 7 seconds ago.
967          */
968         if (skdev->timeout_slot[timo_slot] == 0)
969                 goto timer_func_out;
970
971         /* Something is overdue */
972         pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
973                  skdev->name, __func__, __LINE__,
974                  skdev->timeout_slot[timo_slot], skdev->in_flight);
975         pr_err("(%s): Overdue IOs (%d), busy %d\n",
976                skd_name(skdev), skdev->timeout_slot[timo_slot],
977                skdev->in_flight);
978
979         skdev->timer_countdown = SKD_DRAINING_TIMO;
980         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
981         skdev->timo_slot = timo_slot;
982         blk_stop_queue(skdev->queue);
983
984 timer_func_out:
985         mod_timer(&skdev->timer, (jiffies + HZ));
986
987         spin_unlock_irqrestore(&skdev->lock, reqflags);
988 }
989
990 static void skd_timer_tick_not_online(struct skd_device *skdev)
991 {
992         switch (skdev->state) {
993         case SKD_DRVR_STATE_IDLE:
994         case SKD_DRVR_STATE_LOAD:
995                 break;
996         case SKD_DRVR_STATE_BUSY_SANITIZE:
997                 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
998                          skdev->name, __func__, __LINE__,
999                          skdev->drive_state, skdev->state);
1000                 /* If we've been in sanitize for 3 seconds, we figure we're not
1001                  * going to get anymore completions, so recover requests now
1002                  */
1003                 if (skdev->timer_countdown > 0) {
1004                         skdev->timer_countdown--;
1005                         return;
1006                 }
1007                 skd_recover_requests(skdev, 0);
1008                 break;
1009
1010         case SKD_DRVR_STATE_BUSY:
1011         case SKD_DRVR_STATE_BUSY_IMMINENT:
1012         case SKD_DRVR_STATE_BUSY_ERASE:
1013                 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1014                          skdev->name, __func__, __LINE__,
1015                          skdev->state, skdev->timer_countdown);
1016                 if (skdev->timer_countdown > 0) {
1017                         skdev->timer_countdown--;
1018                         return;
1019                 }
1020                 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1021                          skdev->name, __func__, __LINE__,
1022                          skdev->state, skdev->timer_countdown);
1023                 skd_restart_device(skdev);
1024                 break;
1025
1026         case SKD_DRVR_STATE_WAIT_BOOT:
1027         case SKD_DRVR_STATE_STARTING:
1028                 if (skdev->timer_countdown > 0) {
1029                         skdev->timer_countdown--;
1030                         return;
1031                 }
1032                 /* For now, we fault the drive.  Could attempt resets to
1033                  * revcover at some point. */
1034                 skdev->state = SKD_DRVR_STATE_FAULT;
1035
1036                 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1037                        skd_name(skdev), skdev->drive_state);
1038
1039                 /*start the queue so we can respond with error to requests */
1040                 /* wakeup anyone waiting for startup complete */
1041                 blk_start_queue(skdev->queue);
1042                 skdev->gendisk_on = -1;
1043                 wake_up_interruptible(&skdev->waitq);
1044                 break;
1045
1046         case SKD_DRVR_STATE_ONLINE:
1047                 /* shouldn't get here. */
1048                 break;
1049
1050         case SKD_DRVR_STATE_PAUSING:
1051         case SKD_DRVR_STATE_PAUSED:
1052                 break;
1053
1054         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1055                 pr_debug("%s:%s:%d "
1056                          "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1057                          skdev->name, __func__, __LINE__,
1058                          skdev->timo_slot,
1059                          skdev->timer_countdown,
1060                          skdev->in_flight,
1061                          skdev->timeout_slot[skdev->timo_slot]);
1062                 /* if the slot has cleared we can let the I/O continue */
1063                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1064                         pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1065                                  skdev->name, __func__, __LINE__);
1066                         skdev->state = SKD_DRVR_STATE_ONLINE;
1067                         blk_start_queue(skdev->queue);
1068                         return;
1069                 }
1070                 if (skdev->timer_countdown > 0) {
1071                         skdev->timer_countdown--;
1072                         return;
1073                 }
1074                 skd_restart_device(skdev);
1075                 break;
1076
1077         case SKD_DRVR_STATE_RESTARTING:
1078                 if (skdev->timer_countdown > 0) {
1079                         skdev->timer_countdown--;
1080                         return;
1081                 }
1082                 /* For now, we fault the drive. Could attempt resets to
1083                  * revcover at some point. */
1084                 skdev->state = SKD_DRVR_STATE_FAULT;
1085                 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1086                        skd_name(skdev), skdev->drive_state);
1087
1088                 /*
1089                  * Recovering does two things:
1090                  * 1. completes IO with error
1091                  * 2. reclaims dma resources
1092                  * When is it safe to recover requests?
1093                  * - if the drive state is faulted
1094                  * - if the state is still soft reset after out timeout
1095                  * - if the drive registers are dead (state = FF)
1096                  * If it is "unsafe", we still need to recover, so we will
1097                  * disable pci bus mastering and disable our interrupts.
1098                  */
1099
1100                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1101                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1102                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1103                         /* It never came out of soft reset. Try to
1104                          * recover the requests and then let them
1105                          * fail. This is to mitigate hung processes. */
1106                         skd_recover_requests(skdev, 0);
1107                 else {
1108                         pr_err("(%s): Disable BusMaster (%x)\n",
1109                                skd_name(skdev), skdev->drive_state);
1110                         pci_disable_device(skdev->pdev);
1111                         skd_disable_interrupts(skdev);
1112                         skd_recover_requests(skdev, 0);
1113                 }
1114
1115                 /*start the queue so we can respond with error to requests */
1116                 /* wakeup anyone waiting for startup complete */
1117                 blk_start_queue(skdev->queue);
1118                 skdev->gendisk_on = -1;
1119                 wake_up_interruptible(&skdev->waitq);
1120                 break;
1121
1122         case SKD_DRVR_STATE_RESUMING:
1123         case SKD_DRVR_STATE_STOPPING:
1124         case SKD_DRVR_STATE_SYNCING:
1125         case SKD_DRVR_STATE_FAULT:
1126         case SKD_DRVR_STATE_DISAPPEARED:
1127         default:
1128                 break;
1129         }
1130 }
1131
1132 static int skd_start_timer(struct skd_device *skdev)
1133 {
1134         int rc;
1135
1136         init_timer(&skdev->timer);
1137         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1138
1139         rc = mod_timer(&skdev->timer, (jiffies + HZ));
1140         if (rc)
1141                 pr_err("%s: failed to start timer %d\n",
1142                        __func__, rc);
1143         return rc;
1144 }
1145
1146 static void skd_kill_timer(struct skd_device *skdev)
1147 {
1148         del_timer_sync(&skdev->timer);
1149 }
1150
1151 /*
1152  *****************************************************************************
1153  * IOCTL
1154  *****************************************************************************
1155  */
1156 static int skd_ioctl_sg_io(struct skd_device *skdev,
1157                            fmode_t mode, void __user *argp);
1158 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1159                                         struct skd_sg_io *sksgio);
1160 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1161                                    struct skd_sg_io *sksgio);
1162 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1163                                     struct skd_sg_io *sksgio);
1164 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1165                                  struct skd_sg_io *sksgio, int dxfer_dir);
1166 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1167                                  struct skd_sg_io *sksgio);
1168 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1169 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1170                                     struct skd_sg_io *sksgio);
1171 static int skd_sg_io_put_status(struct skd_device *skdev,
1172                                 struct skd_sg_io *sksgio);
1173
1174 static void skd_complete_special(struct skd_device *skdev,
1175                                  volatile struct fit_completion_entry_v1
1176                                  *skcomp,
1177                                  volatile struct fit_comp_error_info *skerr,
1178                                  struct skd_special_context *skspcl);
1179
1180 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1181                           uint cmd_in, ulong arg)
1182 {
1183         static const int sg_version_num = 30527;
1184         int rc = 0, timeout;
1185         struct gendisk *disk = bdev->bd_disk;
1186         struct skd_device *skdev = disk->private_data;
1187         int __user *p = (int __user *)arg;
1188
1189         pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1190                  skdev->name, __func__, __LINE__,
1191                  disk->disk_name, current->comm, mode, cmd_in, arg);
1192
1193         if (!capable(CAP_SYS_ADMIN))
1194                 return -EPERM;
1195
1196         switch (cmd_in) {
1197         case SG_SET_TIMEOUT:
1198                 rc = get_user(timeout, p);
1199                 if (!rc)
1200                         disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1201                 break;
1202         case SG_GET_TIMEOUT:
1203                 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1204                 break;
1205         case SG_GET_VERSION_NUM:
1206                 rc = put_user(sg_version_num, p);
1207                 break;
1208         case SG_IO:
1209                 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1210                 break;
1211
1212         default:
1213                 rc = -ENOTTY;
1214                 break;
1215         }
1216
1217         pr_debug("%s:%s:%d %s:  completion rc %d\n",
1218                  skdev->name, __func__, __LINE__, disk->disk_name, rc);
1219         return rc;
1220 }
1221
1222 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1223                            void __user *argp)
1224 {
1225         int rc;
1226         struct skd_sg_io sksgio;
1227
1228         memset(&sksgio, 0, sizeof(sksgio));
1229         sksgio.mode = mode;
1230         sksgio.argp = argp;
1231         sksgio.iov = &sksgio.no_iov_iov;
1232
1233         switch (skdev->state) {
1234         case SKD_DRVR_STATE_ONLINE:
1235         case SKD_DRVR_STATE_BUSY_IMMINENT:
1236                 break;
1237
1238         default:
1239                 pr_debug("%s:%s:%d drive not online\n",
1240                          skdev->name, __func__, __LINE__);
1241                 rc = -ENXIO;
1242                 goto out;
1243         }
1244
1245         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1246         if (rc)
1247                 goto out;
1248
1249         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1250         if (rc)
1251                 goto out;
1252
1253         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1254         if (rc)
1255                 goto out;
1256
1257         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1258         if (rc)
1259                 goto out;
1260
1261         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1262         if (rc)
1263                 goto out;
1264
1265         rc = skd_sg_io_await(skdev, &sksgio);
1266         if (rc)
1267                 goto out;
1268
1269         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1270         if (rc)
1271                 goto out;
1272
1273         rc = skd_sg_io_put_status(skdev, &sksgio);
1274         if (rc)
1275                 goto out;
1276
1277         rc = 0;
1278
1279 out:
1280         skd_sg_io_release_skspcl(skdev, &sksgio);
1281
1282         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1283                 kfree(sksgio.iov);
1284         return rc;
1285 }
1286
1287 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1288                                         struct skd_sg_io *sksgio)
1289 {
1290         struct sg_io_hdr *sgp = &sksgio->sg;
1291         int i, __maybe_unused acc;
1292
1293         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1294                 pr_debug("%s:%s:%d access sg failed %p\n",
1295                          skdev->name, __func__, __LINE__, sksgio->argp);
1296                 return -EFAULT;
1297         }
1298
1299         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1300                 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1301                          skdev->name, __func__, __LINE__, sksgio->argp);
1302                 return -EFAULT;
1303         }
1304
1305         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1306                 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1307                          skdev->name, __func__, __LINE__, sgp->interface_id);
1308                 return -EINVAL;
1309         }
1310
1311         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1312                 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1313                          skdev->name, __func__, __LINE__, sgp->cmd_len);
1314                 return -EINVAL;
1315         }
1316
1317         if (sgp->iovec_count > 256) {
1318                 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1319                          skdev->name, __func__, __LINE__, sgp->iovec_count);
1320                 return -EINVAL;
1321         }
1322
1323         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1324                 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1325                          skdev->name, __func__, __LINE__, sgp->dxfer_len);
1326                 return -EINVAL;
1327         }
1328
1329         switch (sgp->dxfer_direction) {
1330         case SG_DXFER_NONE:
1331                 acc = -1;
1332                 break;
1333
1334         case SG_DXFER_TO_DEV:
1335                 acc = VERIFY_READ;
1336                 break;
1337
1338         case SG_DXFER_FROM_DEV:
1339         case SG_DXFER_TO_FROM_DEV:
1340                 acc = VERIFY_WRITE;
1341                 break;
1342
1343         default:
1344                 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1345                          skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1346                 return -EINVAL;
1347         }
1348
1349         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1350                 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1351                          skdev->name, __func__, __LINE__, sgp->cmdp);
1352                 return -EFAULT;
1353         }
1354
1355         if (sgp->mx_sb_len != 0) {
1356                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1357                         pr_debug("%s:%s:%d access sbp failed %p\n",
1358                                  skdev->name, __func__, __LINE__, sgp->sbp);
1359                         return -EFAULT;
1360                 }
1361         }
1362
1363         if (sgp->iovec_count == 0) {
1364                 sksgio->iov[0].iov_base = sgp->dxferp;
1365                 sksgio->iov[0].iov_len = sgp->dxfer_len;
1366                 sksgio->iovcnt = 1;
1367                 sksgio->dxfer_len = sgp->dxfer_len;
1368         } else {
1369                 struct sg_iovec *iov;
1370                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1371                 size_t iov_data_len;
1372
1373                 iov = kmalloc(nbytes, GFP_KERNEL);
1374                 if (iov == NULL) {
1375                         pr_debug("%s:%s:%d alloc iovec failed %d\n",
1376                                  skdev->name, __func__, __LINE__,
1377                                  sgp->iovec_count);
1378                         return -ENOMEM;
1379                 }
1380                 sksgio->iov = iov;
1381                 sksgio->iovcnt = sgp->iovec_count;
1382
1383                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1384                         pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1385                                  skdev->name, __func__, __LINE__, sgp->dxferp);
1386                         return -EFAULT;
1387                 }
1388
1389                 /*
1390                  * Sum up the vecs, making sure they don't overflow
1391                  */
1392                 iov_data_len = 0;
1393                 for (i = 0; i < sgp->iovec_count; i++) {
1394                         if (iov_data_len + iov[i].iov_len < iov_data_len)
1395                                 return -EINVAL;
1396                         iov_data_len += iov[i].iov_len;
1397                 }
1398
1399                 /* SG_IO howto says that the shorter of the two wins */
1400                 if (sgp->dxfer_len < iov_data_len) {
1401                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1402                                                      sgp->iovec_count,
1403                                                      sgp->dxfer_len);
1404                         sksgio->dxfer_len = sgp->dxfer_len;
1405                 } else
1406                         sksgio->dxfer_len = iov_data_len;
1407         }
1408
1409         if (sgp->dxfer_direction != SG_DXFER_NONE) {
1410                 struct sg_iovec *iov = sksgio->iov;
1411                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1412                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1413                                 pr_debug("%s:%s:%d access data failed %p/%d\n",
1414                                          skdev->name, __func__, __LINE__,
1415                                          iov->iov_base, (int)iov->iov_len);
1416                                 return -EFAULT;
1417                         }
1418                 }
1419         }
1420
1421         return 0;
1422 }
1423
1424 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1425                                    struct skd_sg_io *sksgio)
1426 {
1427         struct skd_special_context *skspcl = NULL;
1428         int rc;
1429
1430         for (;;) {
1431                 ulong flags;
1432
1433                 spin_lock_irqsave(&skdev->lock, flags);
1434                 skspcl = skdev->skspcl_free_list;
1435                 if (skspcl != NULL) {
1436                         skdev->skspcl_free_list =
1437                                 (struct skd_special_context *)skspcl->req.next;
1438                         skspcl->req.id += SKD_ID_INCR;
1439                         skspcl->req.state = SKD_REQ_STATE_SETUP;
1440                         skspcl->orphaned = 0;
1441                         skspcl->req.n_sg = 0;
1442                 }
1443                 spin_unlock_irqrestore(&skdev->lock, flags);
1444
1445                 if (skspcl != NULL) {
1446                         rc = 0;
1447                         break;
1448                 }
1449
1450                 pr_debug("%s:%s:%d blocking\n",
1451                          skdev->name, __func__, __LINE__);
1452
1453                 rc = wait_event_interruptible_timeout(
1454                                 skdev->waitq,
1455                                 (skdev->skspcl_free_list != NULL),
1456                                 msecs_to_jiffies(sksgio->sg.timeout));
1457
1458                 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1459                          skdev->name, __func__, __LINE__, rc);
1460
1461                 if (rc <= 0) {
1462                         if (rc == 0)
1463                                 rc = -ETIMEDOUT;
1464                         else
1465                                 rc = -EINTR;
1466                         break;
1467                 }
1468                 /*
1469                  * If we get here rc > 0 meaning the timeout to
1470                  * wait_event_interruptible_timeout() had time left, hence the
1471                  * sought event -- non-empty free list -- happened.
1472                  * Retry the allocation.
1473                  */
1474         }
1475         sksgio->skspcl = skspcl;
1476
1477         return rc;
1478 }
1479
1480 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1481                                     struct skd_request_context *skreq,
1482                                     u32 dxfer_len)
1483 {
1484         u32 resid = dxfer_len;
1485
1486         /*
1487          * The DMA engine must have aligned addresses and byte counts.
1488          */
1489         resid += (-resid) & 3;
1490         skreq->sg_byte_count = resid;
1491
1492         skreq->n_sg = 0;
1493
1494         while (resid > 0) {
1495                 u32 nbytes = PAGE_SIZE;
1496                 u32 ix = skreq->n_sg;
1497                 struct scatterlist *sg = &skreq->sg[ix];
1498                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1499                 struct page *page;
1500
1501                 if (nbytes > resid)
1502                         nbytes = resid;
1503
1504                 page = alloc_page(GFP_KERNEL);
1505                 if (page == NULL)
1506                         return -ENOMEM;
1507
1508                 sg_set_page(sg, page, nbytes, 0);
1509
1510                 /* TODO: This should be going through a pci_???()
1511                  * routine to do proper mapping. */
1512                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1513                 sksg->byte_count = nbytes;
1514
1515                 sksg->host_side_addr = sg_phys(sg);
1516
1517                 sksg->dev_side_addr = 0;
1518                 sksg->next_desc_ptr = skreq->sksg_dma_address +
1519                                       (ix + 1) * sizeof(*sksg);
1520
1521                 skreq->n_sg++;
1522                 resid -= nbytes;
1523         }
1524
1525         if (skreq->n_sg > 0) {
1526                 u32 ix = skreq->n_sg - 1;
1527                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1528
1529                 sksg->control = FIT_SGD_CONTROL_LAST;
1530                 sksg->next_desc_ptr = 0;
1531         }
1532
1533         if (unlikely(skdev->dbg_level > 1)) {
1534                 u32 i;
1535
1536                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1537                          skdev->name, __func__, __LINE__,
1538                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1539                 for (i = 0; i < skreq->n_sg; i++) {
1540                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1541
1542                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
1543                                  "addr=0x%llx next=0x%llx\n",
1544                                  skdev->name, __func__, __LINE__,
1545                                  i, sgd->byte_count, sgd->control,
1546                                  sgd->host_side_addr, sgd->next_desc_ptr);
1547                 }
1548         }
1549
1550         return 0;
1551 }
1552
1553 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1554                                     struct skd_sg_io *sksgio)
1555 {
1556         struct skd_special_context *skspcl = sksgio->skspcl;
1557         struct skd_request_context *skreq = &skspcl->req;
1558         u32 dxfer_len = sksgio->dxfer_len;
1559         int rc;
1560
1561         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1562         /*
1563          * Eventually, errors or not, skd_release_special() is called
1564          * to recover allocations including partial allocations.
1565          */
1566         return rc;
1567 }
1568
1569 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1570                                  struct skd_sg_io *sksgio, int dxfer_dir)
1571 {
1572         struct skd_special_context *skspcl = sksgio->skspcl;
1573         u32 iov_ix = 0;
1574         struct sg_iovec curiov;
1575         u32 sksg_ix = 0;
1576         u8 *bufp = NULL;
1577         u32 buf_len = 0;
1578         u32 resid = sksgio->dxfer_len;
1579         int rc;
1580
1581         curiov.iov_len = 0;
1582         curiov.iov_base = NULL;
1583
1584         if (dxfer_dir != sksgio->sg.dxfer_direction) {
1585                 if (dxfer_dir != SG_DXFER_TO_DEV ||
1586                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1587                         return 0;
1588         }
1589
1590         while (resid > 0) {
1591                 u32 nbytes = PAGE_SIZE;
1592
1593                 if (curiov.iov_len == 0) {
1594                         curiov = sksgio->iov[iov_ix++];
1595                         continue;
1596                 }
1597
1598                 if (buf_len == 0) {
1599                         struct page *page;
1600                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
1601                         bufp = page_address(page);
1602                         buf_len = PAGE_SIZE;
1603                 }
1604
1605                 nbytes = min_t(u32, nbytes, resid);
1606                 nbytes = min_t(u32, nbytes, curiov.iov_len);
1607                 nbytes = min_t(u32, nbytes, buf_len);
1608
1609                 if (dxfer_dir == SG_DXFER_TO_DEV)
1610                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1611                 else
1612                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1613
1614                 if (rc)
1615                         return -EFAULT;
1616
1617                 resid -= nbytes;
1618                 curiov.iov_len -= nbytes;
1619                 curiov.iov_base += nbytes;
1620                 buf_len -= nbytes;
1621         }
1622
1623         return 0;
1624 }
1625
1626 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1627                                  struct skd_sg_io *sksgio)
1628 {
1629         struct skd_special_context *skspcl = sksgio->skspcl;
1630         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1631         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1632
1633         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1634
1635         /* Initialize the FIT msg header */
1636         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1637         fmh->num_protocol_cmds_coalesced = 1;
1638
1639         /* Initialize the SCSI request */
1640         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1641                 scsi_req->hdr.sg_list_dma_address =
1642                         cpu_to_be64(skspcl->req.sksg_dma_address);
1643         scsi_req->hdr.tag = skspcl->req.id;
1644         scsi_req->hdr.sg_list_len_bytes =
1645                 cpu_to_be32(skspcl->req.sg_byte_count);
1646         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1647
1648         skspcl->req.state = SKD_REQ_STATE_BUSY;
1649         skd_send_special_fitmsg(skdev, skspcl);
1650
1651         return 0;
1652 }
1653
1654 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1655 {
1656         unsigned long flags;
1657         int rc;
1658
1659         rc = wait_event_interruptible_timeout(skdev->waitq,
1660                                               (sksgio->skspcl->req.state !=
1661                                                SKD_REQ_STATE_BUSY),
1662                                               msecs_to_jiffies(sksgio->sg.
1663                                                                timeout));
1664
1665         spin_lock_irqsave(&skdev->lock, flags);
1666
1667         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1668                 pr_debug("%s:%s:%d skspcl %p aborted\n",
1669                          skdev->name, __func__, __LINE__, sksgio->skspcl);
1670
1671                 /* Build check cond, sense and let command finish. */
1672                 /* For a timeout, we must fabricate completion and sense
1673                  * data to complete the command */
1674                 sksgio->skspcl->req.completion.status =
1675                         SAM_STAT_CHECK_CONDITION;
1676
1677                 memset(&sksgio->skspcl->req.err_info, 0,
1678                        sizeof(sksgio->skspcl->req.err_info));
1679                 sksgio->skspcl->req.err_info.type = 0x70;
1680                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1681                 sksgio->skspcl->req.err_info.code = 0x44;
1682                 sksgio->skspcl->req.err_info.qual = 0;
1683                 rc = 0;
1684         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1685                 /* No longer on the adapter. We finish. */
1686                 rc = 0;
1687         else {
1688                 /* Something's gone wrong. Still busy. Timeout or
1689                  * user interrupted (control-C). Mark as an orphan
1690                  * so it will be disposed when completed. */
1691                 sksgio->skspcl->orphaned = 1;
1692                 sksgio->skspcl = NULL;
1693                 if (rc == 0) {
1694                         pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1695                                  skdev->name, __func__, __LINE__,
1696                                  sksgio, sksgio->sg.timeout);
1697                         rc = -ETIMEDOUT;
1698                 } else {
1699                         pr_debug("%s:%s:%d cntlc %p\n",
1700                                  skdev->name, __func__, __LINE__, sksgio);
1701                         rc = -EINTR;
1702                 }
1703         }
1704
1705         spin_unlock_irqrestore(&skdev->lock, flags);
1706
1707         return rc;
1708 }
1709
1710 static int skd_sg_io_put_status(struct skd_device *skdev,
1711                                 struct skd_sg_io *sksgio)
1712 {
1713         struct sg_io_hdr *sgp = &sksgio->sg;
1714         struct skd_special_context *skspcl = sksgio->skspcl;
1715         int resid = 0;
1716
1717         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1718
1719         sgp->status = skspcl->req.completion.status;
1720         resid = sksgio->dxfer_len - nb;
1721
1722         sgp->masked_status = sgp->status & STATUS_MASK;
1723         sgp->msg_status = 0;
1724         sgp->host_status = 0;
1725         sgp->driver_status = 0;
1726         sgp->resid = resid;
1727         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1728                 sgp->info |= SG_INFO_CHECK;
1729
1730         pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1731                  skdev->name, __func__, __LINE__,
1732                  sgp->status, sgp->masked_status, sgp->resid);
1733
1734         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1735                 if (sgp->mx_sb_len > 0) {
1736                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
1737                         u32 nbytes = sizeof(*ei);
1738
1739                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1740
1741                         sgp->sb_len_wr = nbytes;
1742
1743                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1744                                 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1745                                          skdev->name, __func__, __LINE__,
1746                                          sgp->sbp);
1747                                 return -EFAULT;
1748                         }
1749                 }
1750         }
1751
1752         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1753                 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1754                          skdev->name, __func__, __LINE__, sksgio->argp);
1755                 return -EFAULT;
1756         }
1757
1758         return 0;
1759 }
1760
1761 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1762                                     struct skd_sg_io *sksgio)
1763 {
1764         struct skd_special_context *skspcl = sksgio->skspcl;
1765
1766         if (skspcl != NULL) {
1767                 ulong flags;
1768
1769                 sksgio->skspcl = NULL;
1770
1771                 spin_lock_irqsave(&skdev->lock, flags);
1772                 skd_release_special(skdev, skspcl);
1773                 spin_unlock_irqrestore(&skdev->lock, flags);
1774         }
1775
1776         return 0;
1777 }
1778
1779 /*
1780  *****************************************************************************
1781  * INTERNAL REQUESTS -- generated by driver itself
1782  *****************************************************************************
1783  */
1784
1785 static int skd_format_internal_skspcl(struct skd_device *skdev)
1786 {
1787         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1788         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1789         struct fit_msg_hdr *fmh;
1790         uint64_t dma_address;
1791         struct skd_scsi_request *scsi;
1792
1793         fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1794         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1795         fmh->num_protocol_cmds_coalesced = 1;
1796
1797         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1798         memset(scsi, 0, sizeof(*scsi));
1799         dma_address = skspcl->req.sksg_dma_address;
1800         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1801         sgd->control = FIT_SGD_CONTROL_LAST;
1802         sgd->byte_count = 0;
1803         sgd->host_side_addr = skspcl->db_dma_address;
1804         sgd->dev_side_addr = 0;
1805         sgd->next_desc_ptr = 0LL;
1806
1807         return 1;
1808 }
1809
1810 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1811
1812 static void skd_send_internal_skspcl(struct skd_device *skdev,
1813                                      struct skd_special_context *skspcl,
1814                                      u8 opcode)
1815 {
1816         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1817         struct skd_scsi_request *scsi;
1818         unsigned char *buf = skspcl->data_buf;
1819         int i;
1820
1821         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1822                 /*
1823                  * A refresh is already in progress.
1824                  * Just wait for it to finish.
1825                  */
1826                 return;
1827
1828         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1829         skspcl->req.state = SKD_REQ_STATE_BUSY;
1830         skspcl->req.id += SKD_ID_INCR;
1831
1832         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1833         scsi->hdr.tag = skspcl->req.id;
1834
1835         memset(scsi->cdb, 0, sizeof(scsi->cdb));
1836
1837         switch (opcode) {
1838         case TEST_UNIT_READY:
1839                 scsi->cdb[0] = TEST_UNIT_READY;
1840                 sgd->byte_count = 0;
1841                 scsi->hdr.sg_list_len_bytes = 0;
1842                 break;
1843
1844         case READ_CAPACITY:
1845                 scsi->cdb[0] = READ_CAPACITY;
1846                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1847                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1848                 break;
1849
1850         case INQUIRY:
1851                 scsi->cdb[0] = INQUIRY;
1852                 scsi->cdb[1] = 0x01;    /* evpd */
1853                 scsi->cdb[2] = 0x80;    /* serial number page */
1854                 scsi->cdb[4] = 0x10;
1855                 sgd->byte_count = 16;
1856                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1857                 break;
1858
1859         case SYNCHRONIZE_CACHE:
1860                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1861                 sgd->byte_count = 0;
1862                 scsi->hdr.sg_list_len_bytes = 0;
1863                 break;
1864
1865         case WRITE_BUFFER:
1866                 scsi->cdb[0] = WRITE_BUFFER;
1867                 scsi->cdb[1] = 0x02;
1868                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1869                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1870                 sgd->byte_count = WR_BUF_SIZE;
1871                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1872                 /* fill incrementing byte pattern */
1873                 for (i = 0; i < sgd->byte_count; i++)
1874                         buf[i] = i & 0xFF;
1875                 break;
1876
1877         case READ_BUFFER:
1878                 scsi->cdb[0] = READ_BUFFER;
1879                 scsi->cdb[1] = 0x02;
1880                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1881                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1882                 sgd->byte_count = WR_BUF_SIZE;
1883                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1884                 memset(skspcl->data_buf, 0, sgd->byte_count);
1885                 break;
1886
1887         default:
1888                 SKD_ASSERT("Don't know what to send");
1889                 return;
1890
1891         }
1892         skd_send_special_fitmsg(skdev, skspcl);
1893 }
1894
1895 static void skd_refresh_device_data(struct skd_device *skdev)
1896 {
1897         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1898
1899         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1900 }
1901
1902 static int skd_chk_read_buf(struct skd_device *skdev,
1903                             struct skd_special_context *skspcl)
1904 {
1905         unsigned char *buf = skspcl->data_buf;
1906         int i;
1907
1908         /* check for incrementing byte pattern */
1909         for (i = 0; i < WR_BUF_SIZE; i++)
1910                 if (buf[i] != (i & 0xFF))
1911                         return 1;
1912
1913         return 0;
1914 }
1915
1916 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1917                                  u8 code, u8 qual, u8 fruc)
1918 {
1919         /* If the check condition is of special interest, log a message */
1920         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1921             && (code == 0x04) && (qual == 0x06)) {
1922                 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1923                        "ascq/fruc %02x/%02x/%02x/%02x\n",
1924                        skd_name(skdev), key, code, qual, fruc);
1925         }
1926 }
1927
1928 static void skd_complete_internal(struct skd_device *skdev,
1929                                   volatile struct fit_completion_entry_v1
1930                                   *skcomp,
1931                                   volatile struct fit_comp_error_info *skerr,
1932                                   struct skd_special_context *skspcl)
1933 {
1934         u8 *buf = skspcl->data_buf;
1935         u8 status;
1936         int i;
1937         struct skd_scsi_request *scsi =
1938                 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1939
1940         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1941
1942         pr_debug("%s:%s:%d complete internal %x\n",
1943                  skdev->name, __func__, __LINE__, scsi->cdb[0]);
1944
1945         skspcl->req.completion = *skcomp;
1946         skspcl->req.state = SKD_REQ_STATE_IDLE;
1947         skspcl->req.id += SKD_ID_INCR;
1948
1949         status = skspcl->req.completion.status;
1950
1951         skd_log_check_status(skdev, status, skerr->key, skerr->code,
1952                              skerr->qual, skerr->fruc);
1953
1954         switch (scsi->cdb[0]) {
1955         case TEST_UNIT_READY:
1956                 if (status == SAM_STAT_GOOD)
1957                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1958                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1959                          (skerr->key == MEDIUM_ERROR))
1960                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1961                 else {
1962                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1963                                 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1964                                          skdev->name, __func__, __LINE__,
1965                                          skdev->state);
1966                                 return;
1967                         }
1968                         pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1969                                  skdev->name, __func__, __LINE__);
1970                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1971                 }
1972                 break;
1973
1974         case WRITE_BUFFER:
1975                 if (status == SAM_STAT_GOOD)
1976                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1977                 else {
1978                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1979                                 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
1980                                          skdev->name, __func__, __LINE__,
1981                                          skdev->state);
1982                                 return;
1983                         }
1984                         pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
1985                                  skdev->name, __func__, __LINE__);
1986                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1987                 }
1988                 break;
1989
1990         case READ_BUFFER:
1991                 if (status == SAM_STAT_GOOD) {
1992                         if (skd_chk_read_buf(skdev, skspcl) == 0)
1993                                 skd_send_internal_skspcl(skdev, skspcl,
1994                                                          READ_CAPACITY);
1995                         else {
1996                                 pr_err("(%s):*** W/R Buffer mismatch %d ***\n",
1997                                        skd_name(skdev), skdev->connect_retries);
1998                                 if (skdev->connect_retries <
1999                                     SKD_MAX_CONNECT_RETRIES) {
2000                                         skdev->connect_retries++;
2001                                         skd_soft_reset(skdev);
2002                                 } else {
2003                                         pr_err("(%s): W/R Buffer Connect Error\n",
2004                                                skd_name(skdev));
2005                                         return;
2006                                 }
2007                         }
2008
2009                 } else {
2010                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2011                                 pr_debug("%s:%s:%d "
2012                                          "read buffer failed, don't send anymore state 0x%x\n",
2013                                          skdev->name, __func__, __LINE__,
2014                                          skdev->state);
2015                                 return;
2016                         }
2017                         pr_debug("%s:%s:%d "
2018                                  "**** read buffer failed, retry skerr\n",
2019                                  skdev->name, __func__, __LINE__);
2020                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2021                 }
2022                 break;
2023
2024         case READ_CAPACITY:
2025                 skdev->read_cap_is_valid = 0;
2026                 if (status == SAM_STAT_GOOD) {
2027                         skdev->read_cap_last_lba =
2028                                 (buf[0] << 24) | (buf[1] << 16) |
2029                                 (buf[2] << 8) | buf[3];
2030                         skdev->read_cap_blocksize =
2031                                 (buf[4] << 24) | (buf[5] << 16) |
2032                                 (buf[6] << 8) | buf[7];
2033
2034                         pr_debug("%s:%s:%d last lba %d, bs %d\n",
2035                                  skdev->name, __func__, __LINE__,
2036                                  skdev->read_cap_last_lba,
2037                                  skdev->read_cap_blocksize);
2038
2039                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2040
2041                         skdev->read_cap_is_valid = 1;
2042
2043                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2044                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2045                            (skerr->key == MEDIUM_ERROR)) {
2046                         skdev->read_cap_last_lba = ~0;
2047                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2048                         pr_debug("%s:%s:%d "
2049                                  "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2050                                  skdev->name, __func__, __LINE__);
2051                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2052                 } else {
2053                         pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2054                                  skdev->name, __func__, __LINE__);
2055                         skd_send_internal_skspcl(skdev, skspcl,
2056                                                  TEST_UNIT_READY);
2057                 }
2058                 break;
2059
2060         case INQUIRY:
2061                 skdev->inquiry_is_valid = 0;
2062                 if (status == SAM_STAT_GOOD) {
2063                         skdev->inquiry_is_valid = 1;
2064
2065                         for (i = 0; i < 12; i++)
2066                                 skdev->inq_serial_num[i] = buf[i + 4];
2067                         skdev->inq_serial_num[12] = 0;
2068                 }
2069
2070                 if (skd_unquiesce_dev(skdev) < 0)
2071                         pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2072                                  skdev->name, __func__, __LINE__);
2073                  /* connection is complete */
2074                 skdev->connect_retries = 0;
2075                 break;
2076
2077         case SYNCHRONIZE_CACHE:
2078                 if (status == SAM_STAT_GOOD)
2079                         skdev->sync_done = 1;
2080                 else
2081                         skdev->sync_done = -1;
2082                 wake_up_interruptible(&skdev->waitq);
2083                 break;
2084
2085         default:
2086                 SKD_ASSERT("we didn't send this");
2087         }
2088 }
2089
2090 /*
2091  *****************************************************************************
2092  * FIT MESSAGES
2093  *****************************************************************************
2094  */
2095
2096 static void skd_send_fitmsg(struct skd_device *skdev,
2097                             struct skd_fitmsg_context *skmsg)
2098 {
2099         u64 qcmd;
2100         struct fit_msg_hdr *fmh;
2101
2102         pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2103                  skdev->name, __func__, __LINE__,
2104                  skmsg->mb_dma_address, skdev->in_flight);
2105         pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2106                  skdev->name, __func__, __LINE__,
2107                  skmsg->msg_buf, skmsg->offset);
2108
2109         qcmd = skmsg->mb_dma_address;
2110         qcmd |= FIT_QCMD_QID_NORMAL;
2111
2112         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2113         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2114
2115         if (unlikely(skdev->dbg_level > 1)) {
2116                 u8 *bp = (u8 *)skmsg->msg_buf;
2117                 int i;
2118                 for (i = 0; i < skmsg->length; i += 8) {
2119                         pr_debug("%s:%s:%d msg[%2d] %8ph\n",
2120                                  skdev->name, __func__, __LINE__, i, &bp[i]);
2121                         if (i == 0)
2122                                 i = 64 - 8;
2123                 }
2124         }
2125
2126         if (skmsg->length > 256)
2127                 qcmd |= FIT_QCMD_MSGSIZE_512;
2128         else if (skmsg->length > 128)
2129                 qcmd |= FIT_QCMD_MSGSIZE_256;
2130         else if (skmsg->length > 64)
2131                 qcmd |= FIT_QCMD_MSGSIZE_128;
2132         else
2133                 /*
2134                  * This makes no sense because the FIT msg header is
2135                  * 64 bytes. If the msg is only 64 bytes long it has
2136                  * no payload.
2137                  */
2138                 qcmd |= FIT_QCMD_MSGSIZE_64;
2139
2140         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2141         smp_wmb();
2142
2143         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2144 }
2145
2146 static void skd_send_special_fitmsg(struct skd_device *skdev,
2147                                     struct skd_special_context *skspcl)
2148 {
2149         u64 qcmd;
2150
2151         if (unlikely(skdev->dbg_level > 1)) {
2152                 u8 *bp = (u8 *)skspcl->msg_buf;
2153                 int i;
2154
2155                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2156                         pr_debug("%s:%s:%d  spcl[%2d] %8ph\n",
2157                                  skdev->name, __func__, __LINE__, i, &bp[i]);
2158                         if (i == 0)
2159                                 i = 64 - 8;
2160                 }
2161
2162                 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2163                          skdev->name, __func__, __LINE__,
2164                          skspcl, skspcl->req.id, skspcl->req.sksg_list,
2165                          skspcl->req.sksg_dma_address);
2166                 for (i = 0; i < skspcl->req.n_sg; i++) {
2167                         struct fit_sg_descriptor *sgd =
2168                                 &skspcl->req.sksg_list[i];
2169
2170                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
2171                                  "addr=0x%llx next=0x%llx\n",
2172                                  skdev->name, __func__, __LINE__,
2173                                  i, sgd->byte_count, sgd->control,
2174                                  sgd->host_side_addr, sgd->next_desc_ptr);
2175                 }
2176         }
2177
2178         /*
2179          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2180          * and one 64-byte SSDI command.
2181          */
2182         qcmd = skspcl->mb_dma_address;
2183         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2184
2185         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2186         smp_wmb();
2187
2188         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2189 }
2190
2191 /*
2192  *****************************************************************************
2193  * COMPLETION QUEUE
2194  *****************************************************************************
2195  */
2196
2197 static void skd_complete_other(struct skd_device *skdev,
2198                                volatile struct fit_completion_entry_v1 *skcomp,
2199                                volatile struct fit_comp_error_info *skerr);
2200
2201 struct sns_info {
2202         u8 type;
2203         u8 stat;
2204         u8 key;
2205         u8 asc;
2206         u8 ascq;
2207         u8 mask;
2208         enum skd_check_status_action action;
2209 };
2210
2211 static struct sns_info skd_chkstat_table[] = {
2212         /* Good */
2213         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2214           SKD_CHECK_STATUS_REPORT_GOOD },
2215
2216         /* Smart alerts */
2217         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2218           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2219         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2220           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2221         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2222           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2223
2224         /* Retry (with limits) */
2225         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2226           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2227         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2228           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2229         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2230           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2231         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2232           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2233
2234         /* Busy (or about to be) */
2235         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2236           SKD_CHECK_STATUS_BUSY_IMMINENT },
2237 };
2238
2239 /*
2240  * Look up status and sense data to decide how to handle the error
2241  * from the device.
2242  * mask says which fields must match e.g., mask=0x18 means check
2243  * type and stat, ignore key, asc, ascq.
2244  */
2245
2246 static enum skd_check_status_action
2247 skd_check_status(struct skd_device *skdev,
2248                  u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2249 {
2250         int i, n;
2251
2252         pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2253                skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2254                skerr->fruc);
2255
2256         pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2257                  skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2258                  skerr->key, skerr->code, skerr->qual, skerr->fruc);
2259
2260         /* Does the info match an entry in the good category? */
2261         n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2262         for (i = 0; i < n; i++) {
2263                 struct sns_info *sns = &skd_chkstat_table[i];
2264
2265                 if (sns->mask & 0x10)
2266                         if (skerr->type != sns->type)
2267                                 continue;
2268
2269                 if (sns->mask & 0x08)
2270                         if (cmp_status != sns->stat)
2271                                 continue;
2272
2273                 if (sns->mask & 0x04)
2274                         if (skerr->key != sns->key)
2275                                 continue;
2276
2277                 if (sns->mask & 0x02)
2278                         if (skerr->code != sns->asc)
2279                                 continue;
2280
2281                 if (sns->mask & 0x01)
2282                         if (skerr->qual != sns->ascq)
2283                                 continue;
2284
2285                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2286                         pr_err("(%s): SMART Alert: sense key/asc/ascq "
2287                                "%02x/%02x/%02x\n",
2288                                skd_name(skdev), skerr->key,
2289                                skerr->code, skerr->qual);
2290                 }
2291                 return sns->action;
2292         }
2293
2294         /* No other match, so nonzero status means error,
2295          * zero status means good
2296          */
2297         if (cmp_status) {
2298                 pr_debug("%s:%s:%d status check: error\n",
2299                          skdev->name, __func__, __LINE__);
2300                 return SKD_CHECK_STATUS_REPORT_ERROR;
2301         }
2302
2303         pr_debug("%s:%s:%d status check good default\n",
2304                  skdev->name, __func__, __LINE__);
2305         return SKD_CHECK_STATUS_REPORT_GOOD;
2306 }
2307
2308 static void skd_resolve_req_exception(struct skd_device *skdev,
2309                                       struct skd_request_context *skreq)
2310 {
2311         u8 cmp_status = skreq->completion.status;
2312
2313         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2314         case SKD_CHECK_STATUS_REPORT_GOOD:
2315         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2316                 skd_end_request(skdev, skreq, BLK_STS_OK);
2317                 break;
2318
2319         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2320                 skd_log_skreq(skdev, skreq, "retry(busy)");
2321                 blk_requeue_request(skdev->queue, skreq->req);
2322                 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2323                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2324                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2325                 skd_quiesce_dev(skdev);
2326                 break;
2327
2328         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2329                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2330                         skd_log_skreq(skdev, skreq, "retry");
2331                         blk_requeue_request(skdev->queue, skreq->req);
2332                         break;
2333                 }
2334                 /* fall through */
2335
2336         case SKD_CHECK_STATUS_REPORT_ERROR:
2337         default:
2338                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
2339                 break;
2340         }
2341 }
2342
2343 /* assume spinlock is already held */
2344 static void skd_release_skreq(struct skd_device *skdev,
2345                               struct skd_request_context *skreq)
2346 {
2347         u32 msg_slot;
2348         struct skd_fitmsg_context *skmsg;
2349
2350         u32 timo_slot;
2351
2352         /*
2353          * Reclaim the FIT msg buffer if this is
2354          * the first of the requests it carried to
2355          * be completed. The FIT msg buffer used to
2356          * send this request cannot be reused until
2357          * we are sure the s1120 card has copied
2358          * it to its memory. The FIT msg might have
2359          * contained several requests. As soon as
2360          * any of them are completed we know that
2361          * the entire FIT msg was transferred.
2362          * Only the first completed request will
2363          * match the FIT msg buffer id. The FIT
2364          * msg buffer id is immediately updated.
2365          * When subsequent requests complete the FIT
2366          * msg buffer id won't match, so we know
2367          * quite cheaply that it is already done.
2368          */
2369         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2370         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2371
2372         skmsg = &skdev->skmsg_table[msg_slot];
2373         if (skmsg->id == skreq->fitmsg_id) {
2374                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2375                 SKD_ASSERT(skmsg->outstanding > 0);
2376                 skmsg->outstanding--;
2377                 if (skmsg->outstanding == 0) {
2378                         skmsg->state = SKD_MSG_STATE_IDLE;
2379                         skmsg->id += SKD_ID_INCR;
2380                         skmsg->next = skdev->skmsg_free_list;
2381                         skdev->skmsg_free_list = skmsg;
2382                 }
2383         }
2384
2385         /*
2386          * Decrease the number of active requests.
2387          * Also decrements the count in the timeout slot.
2388          */
2389         SKD_ASSERT(skdev->in_flight > 0);
2390         skdev->in_flight -= 1;
2391
2392         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2393         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2394         skdev->timeout_slot[timo_slot] -= 1;
2395
2396         /*
2397          * Reset backpointer
2398          */
2399         skreq->req = NULL;
2400
2401         /*
2402          * Reclaim the skd_request_context
2403          */
2404         skreq->state = SKD_REQ_STATE_IDLE;
2405         skreq->id += SKD_ID_INCR;
2406         skreq->next = skdev->skreq_free_list;
2407         skdev->skreq_free_list = skreq;
2408 }
2409
2410 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2411
2412 static void skd_do_inq_page_00(struct skd_device *skdev,
2413                                volatile struct fit_completion_entry_v1 *skcomp,
2414                                volatile struct fit_comp_error_info *skerr,
2415                                uint8_t *cdb, uint8_t *buf)
2416 {
2417         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2418
2419         /* Caller requested "supported pages".  The driver needs to insert
2420          * its page.
2421          */
2422         pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2423                  skdev->name, __func__, __LINE__);
2424
2425         /* If the device rejected the request because the CDB was
2426          * improperly formed, then just leave.
2427          */
2428         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2429             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2430                 return;
2431
2432         /* Get the amount of space the caller allocated */
2433         max_bytes = (cdb[3] << 8) | cdb[4];
2434
2435         /* Get the number of pages actually returned by the device */
2436         drive_pages = (buf[2] << 8) | buf[3];
2437         drive_bytes = drive_pages + 4;
2438         new_size = drive_pages + 1;
2439
2440         /* Supported pages must be in numerical order, so find where
2441          * the driver page needs to be inserted into the list of
2442          * pages returned by the device.
2443          */
2444         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2445                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2446                         return; /* Device using this page code. abort */
2447                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2448                         break;
2449         }
2450
2451         if (insert_pt < max_bytes) {
2452                 uint16_t u;
2453
2454                 /* Shift everything up one byte to make room. */
2455                 for (u = new_size + 3; u > insert_pt; u--)
2456                         buf[u] = buf[u - 1];
2457                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2458
2459                 /* SCSI byte order increment of num_returned_bytes by 1 */
2460                 skcomp->num_returned_bytes =
2461                         be32_to_cpu(skcomp->num_returned_bytes) + 1;
2462                 skcomp->num_returned_bytes =
2463                         be32_to_cpu(skcomp->num_returned_bytes);
2464         }
2465
2466         /* update page length field to reflect the driver's page too */
2467         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2468         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2469 }
2470
2471 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2472 {
2473         int pcie_reg;
2474         u16 pci_bus_speed;
2475         u8 pci_lanes;
2476
2477         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2478         if (pcie_reg) {
2479                 u16 linksta;
2480                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2481
2482                 pci_bus_speed = linksta & 0xF;
2483                 pci_lanes = (linksta & 0x3F0) >> 4;
2484         } else {
2485                 *speed = STEC_LINK_UNKNOWN;
2486                 *width = 0xFF;
2487                 return;
2488         }
2489
2490         switch (pci_bus_speed) {
2491         case 1:
2492                 *speed = STEC_LINK_2_5GTS;
2493                 break;
2494         case 2:
2495                 *speed = STEC_LINK_5GTS;
2496                 break;
2497         case 3:
2498                 *speed = STEC_LINK_8GTS;
2499                 break;
2500         default:
2501                 *speed = STEC_LINK_UNKNOWN;
2502                 break;
2503         }
2504
2505         if (pci_lanes <= 0x20)
2506                 *width = pci_lanes;
2507         else
2508                 *width = 0xFF;
2509 }
2510
2511 static void skd_do_inq_page_da(struct skd_device *skdev,
2512                                volatile struct fit_completion_entry_v1 *skcomp,
2513                                volatile struct fit_comp_error_info *skerr,
2514                                uint8_t *cdb, uint8_t *buf)
2515 {
2516         struct pci_dev *pdev = skdev->pdev;
2517         unsigned max_bytes;
2518         struct driver_inquiry_data inq;
2519         u16 val;
2520
2521         pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2522                  skdev->name, __func__, __LINE__);
2523
2524         memset(&inq, 0, sizeof(inq));
2525
2526         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2527
2528         skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2529         inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2530         inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2531         inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2532
2533         pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2534         inq.pcie_vendor_id = cpu_to_be16(val);
2535
2536         pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2537         inq.pcie_device_id = cpu_to_be16(val);
2538
2539         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2540         inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2541
2542         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2543         inq.pcie_subsystem_device_id = cpu_to_be16(val);
2544
2545         /* Driver version, fixed lenth, padded with spaces on the right */
2546         inq.driver_version_length = sizeof(inq.driver_version);
2547         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2548         memcpy(inq.driver_version, DRV_VER_COMPL,
2549                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2550
2551         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2552
2553         /* Clear the error set by the device */
2554         skcomp->status = SAM_STAT_GOOD;
2555         memset((void *)skerr, 0, sizeof(*skerr));
2556
2557         /* copy response into output buffer */
2558         max_bytes = (cdb[3] << 8) | cdb[4];
2559         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2560
2561         skcomp->num_returned_bytes =
2562                 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2563 }
2564
2565 static void skd_do_driver_inq(struct skd_device *skdev,
2566                               volatile struct fit_completion_entry_v1 *skcomp,
2567                               volatile struct fit_comp_error_info *skerr,
2568                               uint8_t *cdb, uint8_t *buf)
2569 {
2570         if (!buf)
2571                 return;
2572         else if (cdb[0] != INQUIRY)
2573                 return;         /* Not an INQUIRY */
2574         else if ((cdb[1] & 1) == 0)
2575                 return;         /* EVPD not set */
2576         else if (cdb[2] == 0)
2577                 /* Need to add driver's page to supported pages list */
2578                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2579         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2580                 /* Caller requested driver's page */
2581                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2582 }
2583
2584 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2585 {
2586         if (!sg)
2587                 return NULL;
2588         if (!sg_page(sg))
2589                 return NULL;
2590         return sg_virt(sg);
2591 }
2592
2593 static void skd_process_scsi_inq(struct skd_device *skdev,
2594                                  volatile struct fit_completion_entry_v1
2595                                  *skcomp,
2596                                  volatile struct fit_comp_error_info *skerr,
2597                                  struct skd_special_context *skspcl)
2598 {
2599         uint8_t *buf;
2600         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2601         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2602
2603         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2604                             skspcl->req.sg_data_dir);
2605         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2606
2607         if (buf)
2608                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2609 }
2610
2611 static int skd_isr_completion_posted(struct skd_device *skdev,
2612                                         int limit, int *enqueued)
2613 {
2614         volatile struct fit_completion_entry_v1 *skcmp = NULL;
2615         volatile struct fit_comp_error_info *skerr;
2616         u16 req_id;
2617         u32 req_slot;
2618         struct skd_request_context *skreq;
2619         u16 cmp_cntxt = 0;
2620         u8 cmp_status = 0;
2621         u8 cmp_cycle = 0;
2622         u32 cmp_bytes = 0;
2623         int rc = 0;
2624         int processed = 0;
2625
2626         for (;; ) {
2627                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2628
2629                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2630                 cmp_cycle = skcmp->cycle;
2631                 cmp_cntxt = skcmp->tag;
2632                 cmp_status = skcmp->status;
2633                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2634
2635                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2636
2637                 pr_debug("%s:%s:%d "
2638                          "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2639                          "busy=%d rbytes=0x%x proto=%d\n",
2640                          skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2641                          skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2642                          skdev->in_flight, cmp_bytes, skdev->proto_ver);
2643
2644                 if (cmp_cycle != skdev->skcomp_cycle) {
2645                         pr_debug("%s:%s:%d end of completions\n",
2646                                  skdev->name, __func__, __LINE__);
2647                         break;
2648                 }
2649                 /*
2650                  * Update the completion queue head index and possibly
2651                  * the completion cycle count. 8-bit wrap-around.
2652                  */
2653                 skdev->skcomp_ix++;
2654                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2655                         skdev->skcomp_ix = 0;
2656                         skdev->skcomp_cycle++;
2657                 }
2658
2659                 /*
2660                  * The command context is a unique 32-bit ID. The low order
2661                  * bits help locate the request. The request is usually a
2662                  * r/w request (see skd_start() above) or a special request.
2663                  */
2664                 req_id = cmp_cntxt;
2665                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2666
2667                 /* Is this other than a r/w request? */
2668                 if (req_slot >= skdev->num_req_context) {
2669                         /*
2670                          * This is not a completion for a r/w request.
2671                          */
2672                         skd_complete_other(skdev, skcmp, skerr);
2673                         continue;
2674                 }
2675
2676                 skreq = &skdev->skreq_table[req_slot];
2677
2678                 /*
2679                  * Make sure the request ID for the slot matches.
2680                  */
2681                 if (skreq->id != req_id) {
2682                         pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2683                                  skdev->name, __func__, __LINE__,
2684                                  req_id, skreq->id);
2685                         {
2686                                 u16 new_id = cmp_cntxt;
2687                                 pr_err("(%s): Completion mismatch "
2688                                        "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2689                                        skd_name(skdev), req_id,
2690                                        skreq->id, new_id);
2691
2692                                 continue;
2693                         }
2694                 }
2695
2696                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2697
2698                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2699                         pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2700                                  skdev->name, __func__, __LINE__,
2701                                  skreq, skreq->id);
2702                         /* a previously timed out command can
2703                          * now be cleaned up */
2704                         skd_release_skreq(skdev, skreq);
2705                         continue;
2706                 }
2707
2708                 skreq->completion = *skcmp;
2709                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2710                         skreq->err_info = *skerr;
2711                         skd_log_check_status(skdev, cmp_status, skerr->key,
2712                                              skerr->code, skerr->qual,
2713                                              skerr->fruc);
2714                 }
2715                 /* Release DMA resources for the request. */
2716                 if (skreq->n_sg > 0)
2717                         skd_postop_sg_list(skdev, skreq);
2718
2719                 if (!skreq->req) {
2720                         pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2721                                  "req=0x%x req_id=0x%x\n",
2722                                  skdev->name, __func__, __LINE__,
2723                                  skreq, skreq->id, req_id);
2724                 } else {
2725                         /*
2726                          * Capture the outcome and post it back to the
2727                          * native request.
2728                          */
2729                         if (likely(cmp_status == SAM_STAT_GOOD))
2730                                 skd_end_request(skdev, skreq, BLK_STS_OK);
2731                         else
2732                                 skd_resolve_req_exception(skdev, skreq);
2733                 }
2734
2735                 /*
2736                  * Release the skreq, its FIT msg (if one), timeout slot,
2737                  * and queue depth.
2738                  */
2739                 skd_release_skreq(skdev, skreq);
2740
2741                 /* skd_isr_comp_limit equal zero means no limit */
2742                 if (limit) {
2743                         if (++processed >= limit) {
2744                                 rc = 1;
2745                                 break;
2746                         }
2747                 }
2748         }
2749
2750         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2751                 && (skdev->in_flight) == 0) {
2752                 skdev->state = SKD_DRVR_STATE_PAUSED;
2753                 wake_up_interruptible(&skdev->waitq);
2754         }
2755
2756         return rc;
2757 }
2758
2759 static void skd_complete_other(struct skd_device *skdev,
2760                                volatile struct fit_completion_entry_v1 *skcomp,
2761                                volatile struct fit_comp_error_info *skerr)
2762 {
2763         u32 req_id = 0;
2764         u32 req_table;
2765         u32 req_slot;
2766         struct skd_special_context *skspcl;
2767
2768         req_id = skcomp->tag;
2769         req_table = req_id & SKD_ID_TABLE_MASK;
2770         req_slot = req_id & SKD_ID_SLOT_MASK;
2771
2772         pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2773                  skdev->name, __func__, __LINE__,
2774                  req_table, req_id, req_slot);
2775
2776         /*
2777          * Based on the request id, determine how to dispatch this completion.
2778          * This swich/case is finding the good cases and forwarding the
2779          * completion entry. Errors are reported below the switch.
2780          */
2781         switch (req_table) {
2782         case SKD_ID_RW_REQUEST:
2783                 /*
2784                  * The caller, skd_isr_completion_posted() above,
2785                  * handles r/w requests. The only way we get here
2786                  * is if the req_slot is out of bounds.
2787                  */
2788                 break;
2789
2790         case SKD_ID_SPECIAL_REQUEST:
2791                 /*
2792                  * Make sure the req_slot is in bounds and that the id
2793                  * matches.
2794                  */
2795                 if (req_slot < skdev->n_special) {
2796                         skspcl = &skdev->skspcl_table[req_slot];
2797                         if (skspcl->req.id == req_id &&
2798                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2799                                 skd_complete_special(skdev,
2800                                                      skcomp, skerr, skspcl);
2801                                 return;
2802                         }
2803                 }
2804                 break;
2805
2806         case SKD_ID_INTERNAL:
2807                 if (req_slot == 0) {
2808                         skspcl = &skdev->internal_skspcl;
2809                         if (skspcl->req.id == req_id &&
2810                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2811                                 skd_complete_internal(skdev,
2812                                                       skcomp, skerr, skspcl);
2813                                 return;
2814                         }
2815                 }
2816                 break;
2817
2818         case SKD_ID_FIT_MSG:
2819                 /*
2820                  * These id's should never appear in a completion record.
2821                  */
2822                 break;
2823
2824         default:
2825                 /*
2826                  * These id's should never appear anywhere;
2827                  */
2828                 break;
2829         }
2830
2831         /*
2832          * If we get here it is a bad or stale id.
2833          */
2834 }
2835
2836 static void skd_complete_special(struct skd_device *skdev,
2837                                  volatile struct fit_completion_entry_v1
2838                                  *skcomp,
2839                                  volatile struct fit_comp_error_info *skerr,
2840                                  struct skd_special_context *skspcl)
2841 {
2842         pr_debug("%s:%s:%d  completing special request %p\n",
2843                  skdev->name, __func__, __LINE__, skspcl);
2844         if (skspcl->orphaned) {
2845                 /* Discard orphaned request */
2846                 /* ?: Can this release directly or does it need
2847                  * to use a worker? */
2848                 pr_debug("%s:%s:%d release orphaned %p\n",
2849                          skdev->name, __func__, __LINE__, skspcl);
2850                 skd_release_special(skdev, skspcl);
2851                 return;
2852         }
2853
2854         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2855
2856         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2857         skspcl->req.completion = *skcomp;
2858         skspcl->req.err_info = *skerr;
2859
2860         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2861                              skerr->code, skerr->qual, skerr->fruc);
2862
2863         wake_up_interruptible(&skdev->waitq);
2864 }
2865
2866 /* assume spinlock is already held */
2867 static void skd_release_special(struct skd_device *skdev,
2868                                 struct skd_special_context *skspcl)
2869 {
2870         int i, was_depleted;
2871
2872         for (i = 0; i < skspcl->req.n_sg; i++) {
2873                 struct page *page = sg_page(&skspcl->req.sg[i]);
2874                 __free_page(page);
2875         }
2876
2877         was_depleted = (skdev->skspcl_free_list == NULL);
2878
2879         skspcl->req.state = SKD_REQ_STATE_IDLE;
2880         skspcl->req.id += SKD_ID_INCR;
2881         skspcl->req.next =
2882                 (struct skd_request_context *)skdev->skspcl_free_list;
2883         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2884
2885         if (was_depleted) {
2886                 pr_debug("%s:%s:%d skspcl was depleted\n",
2887                          skdev->name, __func__, __LINE__);
2888                 /* Free list was depleted. Their might be waiters. */
2889                 wake_up_interruptible(&skdev->waitq);
2890         }
2891 }
2892
2893 static void skd_reset_skcomp(struct skd_device *skdev)
2894 {
2895         u32 nbytes;
2896         struct fit_completion_entry_v1 *skcomp;
2897
2898         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2899         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2900
2901         memset(skdev->skcomp_table, 0, nbytes);
2902
2903         skdev->skcomp_ix = 0;
2904         skdev->skcomp_cycle = 1;
2905 }
2906
2907 /*
2908  *****************************************************************************
2909  * INTERRUPTS
2910  *****************************************************************************
2911  */
2912 static void skd_completion_worker(struct work_struct *work)
2913 {
2914         struct skd_device *skdev =
2915                 container_of(work, struct skd_device, completion_worker);
2916         unsigned long flags;
2917         int flush_enqueued = 0;
2918
2919         spin_lock_irqsave(&skdev->lock, flags);
2920
2921         /*
2922          * pass in limit=0, which means no limit..
2923          * process everything in compq
2924          */
2925         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2926         skd_request_fn(skdev->queue);
2927
2928         spin_unlock_irqrestore(&skdev->lock, flags);
2929 }
2930
2931 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2932
2933 static irqreturn_t
2934 skd_isr(int irq, void *ptr)
2935 {
2936         struct skd_device *skdev;
2937         u32 intstat;
2938         u32 ack;
2939         int rc = 0;
2940         int deferred = 0;
2941         int flush_enqueued = 0;
2942
2943         skdev = (struct skd_device *)ptr;
2944         spin_lock(&skdev->lock);
2945
2946         for (;; ) {
2947                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2948
2949                 ack = FIT_INT_DEF_MASK;
2950                 ack &= intstat;
2951
2952                 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2953                          skdev->name, __func__, __LINE__, intstat, ack);
2954
2955                 /* As long as there is an int pending on device, keep
2956                  * running loop.  When none, get out, but if we've never
2957                  * done any processing, call completion handler?
2958                  */
2959                 if (ack == 0) {
2960                         /* No interrupts on device, but run the completion
2961                          * processor anyway?
2962                          */
2963                         if (rc == 0)
2964                                 if (likely (skdev->state
2965                                         == SKD_DRVR_STATE_ONLINE))
2966                                         deferred = 1;
2967                         break;
2968                 }
2969
2970                 rc = IRQ_HANDLED;
2971
2972                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2973
2974                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2975                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2976                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
2977                                 /*
2978                                  * If we have already deferred completion
2979                                  * processing, don't bother running it again
2980                                  */
2981                                 if (deferred == 0)
2982                                         deferred =
2983                                                 skd_isr_completion_posted(skdev,
2984                                                 skd_isr_comp_limit, &flush_enqueued);
2985                         }
2986
2987                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2988                                 skd_isr_fwstate(skdev);
2989                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2990                                     skdev->state ==
2991                                     SKD_DRVR_STATE_DISAPPEARED) {
2992                                         spin_unlock(&skdev->lock);
2993                                         return rc;
2994                                 }
2995                         }
2996
2997                         if (intstat & FIT_ISH_MSG_FROM_DEV)
2998                                 skd_isr_msg_from_dev(skdev);
2999                 }
3000         }
3001
3002         if (unlikely(flush_enqueued))
3003                 skd_request_fn(skdev->queue);
3004
3005         if (deferred)
3006                 schedule_work(&skdev->completion_worker);
3007         else if (!flush_enqueued)
3008                 skd_request_fn(skdev->queue);
3009
3010         spin_unlock(&skdev->lock);
3011
3012         return rc;
3013 }
3014
3015 static void skd_drive_fault(struct skd_device *skdev)
3016 {
3017         skdev->state = SKD_DRVR_STATE_FAULT;
3018         pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3019 }
3020
3021 static void skd_drive_disappeared(struct skd_device *skdev)
3022 {
3023         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3024         pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3025 }
3026
3027 static void skd_isr_fwstate(struct skd_device *skdev)
3028 {
3029         u32 sense;
3030         u32 state;
3031         u32 mtd;
3032         int prev_driver_state = skdev->state;
3033
3034         sense = SKD_READL(skdev, FIT_STATUS);
3035         state = sense & FIT_SR_DRIVE_STATE_MASK;
3036
3037         pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3038                skd_name(skdev),
3039                skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3040                skd_drive_state_to_str(state), state);
3041
3042         skdev->drive_state = state;
3043
3044         switch (skdev->drive_state) {
3045         case FIT_SR_DRIVE_INIT:
3046                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3047                         skd_disable_interrupts(skdev);
3048                         break;
3049                 }
3050                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3051                         skd_recover_requests(skdev, 0);
3052                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3053                         skdev->timer_countdown = SKD_STARTING_TIMO;
3054                         skdev->state = SKD_DRVR_STATE_STARTING;
3055                         skd_soft_reset(skdev);
3056                         break;
3057                 }
3058                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3059                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3060                 skdev->last_mtd = mtd;
3061                 break;
3062
3063         case FIT_SR_DRIVE_ONLINE:
3064                 skdev->cur_max_queue_depth = skd_max_queue_depth;
3065                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3066                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3067
3068                 skdev->queue_low_water_mark =
3069                         skdev->cur_max_queue_depth * 2 / 3 + 1;
3070                 if (skdev->queue_low_water_mark < 1)
3071                         skdev->queue_low_water_mark = 1;
3072                 pr_info("(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3073                        skd_name(skdev),
3074                        skdev->cur_max_queue_depth,
3075                        skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3076
3077                 skd_refresh_device_data(skdev);
3078                 break;
3079
3080         case FIT_SR_DRIVE_BUSY:
3081                 skdev->state = SKD_DRVR_STATE_BUSY;
3082                 skdev->timer_countdown = SKD_BUSY_TIMO;
3083                 skd_quiesce_dev(skdev);
3084                 break;
3085         case FIT_SR_DRIVE_BUSY_SANITIZE:
3086                 /* set timer for 3 seconds, we'll abort any unfinished
3087                  * commands after that expires
3088                  */
3089                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3090                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3091                 blk_start_queue(skdev->queue);
3092                 break;
3093         case FIT_SR_DRIVE_BUSY_ERASE:
3094                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3095                 skdev->timer_countdown = SKD_BUSY_TIMO;
3096                 break;
3097         case FIT_SR_DRIVE_OFFLINE:
3098                 skdev->state = SKD_DRVR_STATE_IDLE;
3099                 break;
3100         case FIT_SR_DRIVE_SOFT_RESET:
3101                 switch (skdev->state) {
3102                 case SKD_DRVR_STATE_STARTING:
3103                 case SKD_DRVR_STATE_RESTARTING:
3104                         /* Expected by a caller of skd_soft_reset() */
3105                         break;
3106                 default:
3107                         skdev->state = SKD_DRVR_STATE_RESTARTING;
3108                         break;
3109                 }
3110                 break;
3111         case FIT_SR_DRIVE_FW_BOOTING:
3112                 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3113                          skdev->name, __func__, __LINE__, skdev->name);
3114                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3115                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3116                 break;
3117
3118         case FIT_SR_DRIVE_DEGRADED:
3119         case FIT_SR_PCIE_LINK_DOWN:
3120         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3121                 break;
3122
3123         case FIT_SR_DRIVE_FAULT:
3124                 skd_drive_fault(skdev);
3125                 skd_recover_requests(skdev, 0);
3126                 blk_start_queue(skdev->queue);
3127                 break;
3128
3129         /* PCIe bus returned all Fs? */
3130         case 0xFF:
3131                 pr_info("(%s): state=0x%x sense=0x%x\n",
3132                        skd_name(skdev), state, sense);
3133                 skd_drive_disappeared(skdev);
3134                 skd_recover_requests(skdev, 0);
3135                 blk_start_queue(skdev->queue);
3136                 break;
3137         default:
3138                 /*
3139                  * Uknown FW State. Wait for a state we recognize.
3140                  */
3141                 break;
3142         }
3143         pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3144                skd_name(skdev),
3145                skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3146                skd_skdev_state_to_str(skdev->state), skdev->state);
3147 }
3148
3149 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3150 {
3151         int i;
3152
3153         for (i = 0; i < skdev->num_req_context; i++) {
3154                 struct skd_request_context *skreq = &skdev->skreq_table[i];
3155
3156                 if (skreq->state == SKD_REQ_STATE_BUSY) {
3157                         skd_log_skreq(skdev, skreq, "recover");
3158
3159                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3160                         SKD_ASSERT(skreq->req != NULL);
3161
3162                         /* Release DMA resources for the request. */
3163                         if (skreq->n_sg > 0)
3164                                 skd_postop_sg_list(skdev, skreq);
3165
3166                         if (requeue &&
3167                             (unsigned long) ++skreq->req->special <
3168                             SKD_MAX_RETRIES)
3169                                 blk_requeue_request(skdev->queue, skreq->req);
3170                         else
3171                                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
3172
3173                         skreq->req = NULL;
3174
3175                         skreq->state = SKD_REQ_STATE_IDLE;
3176                         skreq->id += SKD_ID_INCR;
3177                 }
3178                 if (i > 0)
3179                         skreq[-1].next = skreq;
3180                 skreq->next = NULL;
3181         }
3182         skdev->skreq_free_list = skdev->skreq_table;
3183
3184         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3185                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3186
3187                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3188                         skd_log_skmsg(skdev, skmsg, "salvaged");
3189                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3190                         skmsg->state = SKD_MSG_STATE_IDLE;
3191                         skmsg->id += SKD_ID_INCR;
3192                 }
3193                 if (i > 0)
3194                         skmsg[-1].next = skmsg;
3195                 skmsg->next = NULL;
3196         }
3197         skdev->skmsg_free_list = skdev->skmsg_table;
3198
3199         for (i = 0; i < skdev->n_special; i++) {
3200                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3201
3202                 /* If orphaned, reclaim it because it has already been reported
3203                  * to the process as an error (it was just waiting for
3204                  * a completion that didn't come, and now it will never come)
3205                  * If busy, change to a state that will cause it to error
3206                  * out in the wait routine and let it do the normal
3207                  * reporting and reclaiming
3208                  */
3209                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3210                         if (skspcl->orphaned) {
3211                                 pr_debug("%s:%s:%d orphaned %p\n",
3212                                          skdev->name, __func__, __LINE__,
3213                                          skspcl);
3214                                 skd_release_special(skdev, skspcl);
3215                         } else {
3216                                 pr_debug("%s:%s:%d not orphaned %p\n",
3217                                          skdev->name, __func__, __LINE__,
3218                                          skspcl);
3219                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3220                         }
3221                 }
3222         }
3223         skdev->skspcl_free_list = skdev->skspcl_table;
3224
3225         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3226                 skdev->timeout_slot[i] = 0;
3227
3228         skdev->in_flight = 0;
3229 }
3230
3231 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3232 {
3233         u32 mfd;
3234         u32 mtd;
3235         u32 data;
3236
3237         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3238
3239         pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3240                  skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3241
3242         /* ignore any mtd that is an ack for something we didn't send */
3243         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3244                 return;
3245
3246         switch (FIT_MXD_TYPE(mfd)) {
3247         case FIT_MTD_FITFW_INIT:
3248                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3249
3250                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3251                         pr_err("(%s): protocol mismatch\n",
3252                                skdev->name);
3253                         pr_err("(%s):   got=%d support=%d\n",
3254                                skdev->name, skdev->proto_ver,
3255                                FIT_PROTOCOL_VERSION_1);
3256                         pr_err("(%s):   please upgrade driver\n",
3257                                skdev->name);
3258                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3259                         skd_soft_reset(skdev);
3260                         break;
3261                 }
3262                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3263                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3264                 skdev->last_mtd = mtd;
3265                 break;
3266
3267         case FIT_MTD_GET_CMDQ_DEPTH:
3268                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3269                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3270                                    SKD_N_COMPLETION_ENTRY);
3271                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3272                 skdev->last_mtd = mtd;
3273                 break;
3274
3275         case FIT_MTD_SET_COMPQ_DEPTH:
3276                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3277                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3278                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3279                 skdev->last_mtd = mtd;
3280                 break;
3281
3282         case FIT_MTD_SET_COMPQ_ADDR:
3283                 skd_reset_skcomp(skdev);
3284                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3285                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3286                 skdev->last_mtd = mtd;
3287                 break;
3288
3289         case FIT_MTD_CMD_LOG_HOST_ID:
3290                 skdev->connect_time_stamp = get_seconds();
3291                 data = skdev->connect_time_stamp & 0xFFFF;
3292                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3293                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3294                 skdev->last_mtd = mtd;
3295                 break;
3296
3297         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3298                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3299                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3300                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3301                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3302                 skdev->last_mtd = mtd;
3303                 break;
3304
3305         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3306                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3307                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3308                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3309                 skdev->last_mtd = mtd;
3310
3311                 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3312                        skd_name(skdev),
3313                        skdev->connect_time_stamp, skdev->drive_jiffies);
3314                 break;
3315
3316         case FIT_MTD_ARM_QUEUE:
3317                 skdev->last_mtd = 0;
3318                 /*
3319                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3320                  */
3321                 break;
3322
3323         default:
3324                 break;
3325         }
3326 }
3327
3328 static void skd_disable_interrupts(struct skd_device *skdev)
3329 {
3330         u32 sense;
3331
3332         sense = SKD_READL(skdev, FIT_CONTROL);
3333         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3334         SKD_WRITEL(skdev, sense, FIT_CONTROL);
3335         pr_debug("%s:%s:%d sense 0x%x\n",
3336                  skdev->name, __func__, __LINE__, sense);
3337
3338         /* Note that the 1s is written. A 1-bit means
3339          * disable, a 0 means enable.
3340          */
3341         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3342 }
3343
3344 static void skd_enable_interrupts(struct skd_device *skdev)
3345 {
3346         u32 val;
3347
3348         /* unmask interrupts first */
3349         val = FIT_ISH_FW_STATE_CHANGE +
3350               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3351
3352         /* Note that the compliment of mask is written. A 1-bit means
3353          * disable, a 0 means enable. */
3354         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3355         pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3356                  skdev->name, __func__, __LINE__, ~val);
3357
3358         val = SKD_READL(skdev, FIT_CONTROL);
3359         val |= FIT_CR_ENABLE_INTERRUPTS;
3360         pr_debug("%s:%s:%d control=0x%x\n",
3361                  skdev->name, __func__, __LINE__, val);
3362         SKD_WRITEL(skdev, val, FIT_CONTROL);
3363 }
3364
3365 /*
3366  *****************************************************************************
3367  * START, STOP, RESTART, QUIESCE, UNQUIESCE
3368  *****************************************************************************
3369  */
3370
3371 static void skd_soft_reset(struct skd_device *skdev)
3372 {
3373         u32 val;
3374
3375         val = SKD_READL(skdev, FIT_CONTROL);
3376         val |= (FIT_CR_SOFT_RESET);
3377         pr_debug("%s:%s:%d control=0x%x\n",
3378                  skdev->name, __func__, __LINE__, val);
3379         SKD_WRITEL(skdev, val, FIT_CONTROL);
3380 }
3381
3382 static void skd_start_device(struct skd_device *skdev)
3383 {
3384         unsigned long flags;
3385         u32 sense;
3386         u32 state;
3387
3388         spin_lock_irqsave(&skdev->lock, flags);
3389
3390         /* ack all ghost interrupts */
3391         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3392
3393         sense = SKD_READL(skdev, FIT_STATUS);
3394
3395         pr_debug("%s:%s:%d initial status=0x%x\n",
3396                  skdev->name, __func__, __LINE__, sense);
3397
3398         state = sense & FIT_SR_DRIVE_STATE_MASK;
3399         skdev->drive_state = state;
3400         skdev->last_mtd = 0;
3401
3402         skdev->state = SKD_DRVR_STATE_STARTING;
3403         skdev->timer_countdown = SKD_STARTING_TIMO;
3404
3405         skd_enable_interrupts(skdev);
3406
3407         switch (skdev->drive_state) {
3408         case FIT_SR_DRIVE_OFFLINE:
3409                 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3410                 break;
3411
3412         case FIT_SR_DRIVE_FW_BOOTING:
3413                 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3414                          skdev->name, __func__, __LINE__, skdev->name);
3415                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3416                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3417                 break;
3418
3419         case FIT_SR_DRIVE_BUSY_SANITIZE:
3420                 pr_info("(%s): Start: BUSY_SANITIZE\n",
3421                        skd_name(skdev));
3422                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3423                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3424                 break;
3425
3426         case FIT_SR_DRIVE_BUSY_ERASE:
3427                 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3428                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3429                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3430                 break;
3431
3432         case FIT_SR_DRIVE_INIT:
3433         case FIT_SR_DRIVE_ONLINE:
3434                 skd_soft_reset(skdev);
3435                 break;
3436
3437         case FIT_SR_DRIVE_BUSY:
3438                 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3439                 skdev->state = SKD_DRVR_STATE_BUSY;
3440                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3441                 break;
3442
3443         case FIT_SR_DRIVE_SOFT_RESET:
3444                 pr_err("(%s) drive soft reset in prog\n",
3445                        skd_name(skdev));
3446                 break;
3447
3448         case FIT_SR_DRIVE_FAULT:
3449                 /* Fault state is bad...soft reset won't do it...
3450                  * Hard reset, maybe, but does it work on device?
3451                  * For now, just fault so the system doesn't hang.
3452                  */
3453                 skd_drive_fault(skdev);
3454                 /*start the queue so we can respond with error to requests */
3455                 pr_debug("%s:%s:%d starting %s queue\n",
3456                          skdev->name, __func__, __LINE__, skdev->name);
3457                 blk_start_queue(skdev->queue);
3458                 skdev->gendisk_on = -1;
3459                 wake_up_interruptible(&skdev->waitq);
3460                 break;
3461
3462         case 0xFF:
3463                 /* Most likely the device isn't there or isn't responding
3464                  * to the BAR1 addresses. */
3465                 skd_drive_disappeared(skdev);
3466                 /*start the queue so we can respond with error to requests */
3467                 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3468                          skdev->name, __func__, __LINE__, skdev->name);
3469                 blk_start_queue(skdev->queue);
3470                 skdev->gendisk_on = -1;
3471                 wake_up_interruptible(&skdev->waitq);
3472                 break;
3473
3474         default:
3475                 pr_err("(%s) Start: unknown state %x\n",
3476                        skd_name(skdev), skdev->drive_state);
3477                 break;
3478         }
3479
3480         state = SKD_READL(skdev, FIT_CONTROL);
3481         pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3482                  skdev->name, __func__, __LINE__, state);
3483
3484         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3485         pr_debug("%s:%s:%d Intr Status=0x%x\n",
3486                  skdev->name, __func__, __LINE__, state);
3487
3488         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3489         pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3490                  skdev->name, __func__, __LINE__, state);
3491
3492         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3493         pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3494                  skdev->name, __func__, __LINE__, state);
3495
3496         state = SKD_READL(skdev, FIT_HW_VERSION);
3497         pr_debug("%s:%s:%d HW version=0x%x\n",
3498                  skdev->name, __func__, __LINE__, state);
3499
3500         spin_unlock_irqrestore(&skdev->lock, flags);
3501 }
3502
3503 static void skd_stop_device(struct skd_device *skdev)
3504 {
3505         unsigned long flags;
3506         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3507         u32 dev_state;
3508         int i;
3509
3510         spin_lock_irqsave(&skdev->lock, flags);
3511
3512         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3513                 pr_err("(%s): skd_stop_device not online no sync\n",
3514                        skd_name(skdev));
3515                 goto stop_out;
3516         }
3517
3518         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3519                 pr_err("(%s): skd_stop_device no special\n",
3520                        skd_name(skdev));
3521                 goto stop_out;
3522         }
3523
3524         skdev->state = SKD_DRVR_STATE_SYNCING;
3525         skdev->sync_done = 0;
3526
3527         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3528
3529         spin_unlock_irqrestore(&skdev->lock, flags);
3530
3531         wait_event_interruptible_timeout(skdev->waitq,
3532                                          (skdev->sync_done), (10 * HZ));
3533
3534         spin_lock_irqsave(&skdev->lock, flags);
3535
3536         switch (skdev->sync_done) {
3537         case 0:
3538                 pr_err("(%s): skd_stop_device no sync\n",
3539                        skd_name(skdev));
3540                 break;
3541         case 1:
3542                 pr_err("(%s): skd_stop_device sync done\n",
3543                        skd_name(skdev));
3544                 break;
3545         default:
3546                 pr_err("(%s): skd_stop_device sync error\n",
3547                        skd_name(skdev));
3548         }
3549
3550 stop_out:
3551         skdev->state = SKD_DRVR_STATE_STOPPING;
3552         spin_unlock_irqrestore(&skdev->lock, flags);
3553
3554         skd_kill_timer(skdev);
3555
3556         spin_lock_irqsave(&skdev->lock, flags);
3557         skd_disable_interrupts(skdev);
3558
3559         /* ensure all ints on device are cleared */
3560         /* soft reset the device to unload with a clean slate */
3561         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3562         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3563
3564         spin_unlock_irqrestore(&skdev->lock, flags);
3565
3566         /* poll every 100ms, 1 second timeout */
3567         for (i = 0; i < 10; i++) {
3568                 dev_state =
3569                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3570                 if (dev_state == FIT_SR_DRIVE_INIT)
3571                         break;
3572                 set_current_state(TASK_INTERRUPTIBLE);
3573                 schedule_timeout(msecs_to_jiffies(100));
3574         }
3575
3576         if (dev_state != FIT_SR_DRIVE_INIT)
3577                 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3578                        skd_name(skdev), dev_state);
3579 }
3580
3581 /* assume spinlock is held */
3582 static void skd_restart_device(struct skd_device *skdev)
3583 {
3584         u32 state;
3585
3586         /* ack all ghost interrupts */
3587         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3588
3589         state = SKD_READL(skdev, FIT_STATUS);
3590
3591         pr_debug("%s:%s:%d drive status=0x%x\n",
3592                  skdev->name, __func__, __LINE__, state);
3593
3594         state &= FIT_SR_DRIVE_STATE_MASK;
3595         skdev->drive_state = state;
3596         skdev->last_mtd = 0;
3597
3598         skdev->state = SKD_DRVR_STATE_RESTARTING;
3599         skdev->timer_countdown = SKD_RESTARTING_TIMO;
3600
3601         skd_soft_reset(skdev);
3602 }
3603
3604 /* assume spinlock is held */
3605 static int skd_quiesce_dev(struct skd_device *skdev)
3606 {
3607         int rc = 0;
3608
3609         switch (skdev->state) {
3610         case SKD_DRVR_STATE_BUSY:
3611         case SKD_DRVR_STATE_BUSY_IMMINENT:
3612                 pr_debug("%s:%s:%d stopping %s queue\n",
3613                          skdev->name, __func__, __LINE__, skdev->name);
3614                 blk_stop_queue(skdev->queue);
3615                 break;
3616         case SKD_DRVR_STATE_ONLINE:
3617         case SKD_DRVR_STATE_STOPPING:
3618         case SKD_DRVR_STATE_SYNCING:
3619         case SKD_DRVR_STATE_PAUSING:
3620         case SKD_DRVR_STATE_PAUSED:
3621         case SKD_DRVR_STATE_STARTING:
3622         case SKD_DRVR_STATE_RESTARTING:
3623         case SKD_DRVR_STATE_RESUMING:
3624         default:
3625                 rc = -EINVAL;
3626                 pr_debug("%s:%s:%d state [%d] not implemented\n",
3627                          skdev->name, __func__, __LINE__, skdev->state);
3628         }
3629         return rc;
3630 }
3631
3632 /* assume spinlock is held */
3633 static int skd_unquiesce_dev(struct skd_device *skdev)
3634 {
3635         int prev_driver_state = skdev->state;
3636
3637         skd_log_skdev(skdev, "unquiesce");
3638         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3639                 pr_debug("%s:%s:%d **** device already ONLINE\n",
3640                          skdev->name, __func__, __LINE__);
3641                 return 0;
3642         }
3643         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3644                 /*
3645                  * If there has been an state change to other than
3646                  * ONLINE, we will rely on controller state change
3647                  * to come back online and restart the queue.
3648                  * The BUSY state means that driver is ready to
3649                  * continue normal processing but waiting for controller
3650                  * to become available.
3651                  */
3652                 skdev->state = SKD_DRVR_STATE_BUSY;
3653                 pr_debug("%s:%s:%d drive BUSY state\n",
3654                          skdev->name, __func__, __LINE__);
3655                 return 0;
3656         }
3657
3658         /*
3659          * Drive has just come online, driver is either in startup,
3660          * paused performing a task, or bust waiting for hardware.
3661          */
3662         switch (skdev->state) {
3663         case SKD_DRVR_STATE_PAUSED:
3664         case SKD_DRVR_STATE_BUSY:
3665         case SKD_DRVR_STATE_BUSY_IMMINENT:
3666         case SKD_DRVR_STATE_BUSY_ERASE:
3667         case SKD_DRVR_STATE_STARTING:
3668         case SKD_DRVR_STATE_RESTARTING:
3669         case SKD_DRVR_STATE_FAULT:
3670         case SKD_DRVR_STATE_IDLE:
3671         case SKD_DRVR_STATE_LOAD:
3672                 skdev->state = SKD_DRVR_STATE_ONLINE;
3673                 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3674                        skd_name(skdev),
3675                        skd_skdev_state_to_str(prev_driver_state),
3676                        prev_driver_state, skd_skdev_state_to_str(skdev->state),
3677                        skdev->state);
3678                 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3679                          skdev->name, __func__, __LINE__);
3680                 pr_debug("%s:%s:%d starting %s queue\n",
3681                          skdev->name, __func__, __LINE__, skdev->name);
3682                 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3683                 blk_start_queue(skdev->queue);
3684                 skdev->gendisk_on = 1;
3685                 wake_up_interruptible(&skdev->waitq);
3686                 break;
3687
3688         case SKD_DRVR_STATE_DISAPPEARED:
3689         default:
3690                 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3691                          skdev->name, __func__, __LINE__,
3692                          skdev->state);
3693                 return -EBUSY;
3694         }
3695         return 0;
3696 }
3697
3698 /*
3699  *****************************************************************************
3700  * PCIe MSI/MSI-X INTERRUPT HANDLERS
3701  *****************************************************************************
3702  */
3703
3704 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3705 {
3706         struct skd_device *skdev = skd_host_data;
3707         unsigned long flags;
3708
3709         spin_lock_irqsave(&skdev->lock, flags);
3710         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3711                  skdev->name, __func__, __LINE__,
3712                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3713         pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3714                irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3715         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3716         spin_unlock_irqrestore(&skdev->lock, flags);
3717         return IRQ_HANDLED;
3718 }
3719
3720 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3721 {
3722         struct skd_device *skdev = skd_host_data;
3723         unsigned long flags;
3724
3725         spin_lock_irqsave(&skdev->lock, flags);
3726         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3727                  skdev->name, __func__, __LINE__,
3728                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3729         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3730         skd_isr_fwstate(skdev);
3731         spin_unlock_irqrestore(&skdev->lock, flags);
3732         return IRQ_HANDLED;
3733 }
3734
3735 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3736 {
3737         struct skd_device *skdev = skd_host_data;
3738         unsigned long flags;
3739         int flush_enqueued = 0;
3740         int deferred;
3741
3742         spin_lock_irqsave(&skdev->lock, flags);
3743         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3744                  skdev->name, __func__, __LINE__,
3745                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3746         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3747         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3748                                                 &flush_enqueued);
3749         if (flush_enqueued)
3750                 skd_request_fn(skdev->queue);
3751
3752         if (deferred)
3753                 schedule_work(&skdev->completion_worker);
3754         else if (!flush_enqueued)
3755                 skd_request_fn(skdev->queue);
3756
3757         spin_unlock_irqrestore(&skdev->lock, flags);
3758
3759         return IRQ_HANDLED;
3760 }
3761
3762 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3763 {
3764         struct skd_device *skdev = skd_host_data;
3765         unsigned long flags;
3766
3767         spin_lock_irqsave(&skdev->lock, flags);
3768         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3769                  skdev->name, __func__, __LINE__,
3770                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3771         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3772         skd_isr_msg_from_dev(skdev);
3773         spin_unlock_irqrestore(&skdev->lock, flags);
3774         return IRQ_HANDLED;
3775 }
3776
3777 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3778 {
3779         struct skd_device *skdev = skd_host_data;
3780         unsigned long flags;
3781
3782         spin_lock_irqsave(&skdev->lock, flags);
3783         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3784                  skdev->name, __func__, __LINE__,
3785                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3786         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3787         spin_unlock_irqrestore(&skdev->lock, flags);
3788         return IRQ_HANDLED;
3789 }
3790
3791 /*
3792  *****************************************************************************
3793  * PCIe MSI/MSI-X SETUP
3794  *****************************************************************************
3795  */
3796
3797 struct skd_msix_entry {
3798         char isr_name[30];
3799 };
3800
3801 struct skd_init_msix_entry {
3802         const char *name;
3803         irq_handler_t handler;
3804 };
3805
3806 #define SKD_MAX_MSIX_COUNT              13
3807 #define SKD_MIN_MSIX_COUNT              7
3808 #define SKD_BASE_MSIX_IRQ               4
3809
3810 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3811         { "(DMA 0)",        skd_reserved_isr },
3812         { "(DMA 1)",        skd_reserved_isr },
3813         { "(DMA 2)",        skd_reserved_isr },
3814         { "(DMA 3)",        skd_reserved_isr },
3815         { "(State Change)", skd_statec_isr   },
3816         { "(COMPL_Q)",      skd_comp_q       },
3817         { "(MSG)",          skd_msg_isr      },
3818         { "(Reserved)",     skd_reserved_isr },
3819         { "(Reserved)",     skd_reserved_isr },
3820         { "(Queue Full 0)", skd_qfull_isr    },
3821         { "(Queue Full 1)", skd_qfull_isr    },
3822         { "(Queue Full 2)", skd_qfull_isr    },
3823         { "(Queue Full 3)", skd_qfull_isr    },
3824 };
3825
3826 static int skd_acquire_msix(struct skd_device *skdev)
3827 {
3828         int i, rc;
3829         struct pci_dev *pdev = skdev->pdev;
3830
3831         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3832                         PCI_IRQ_MSIX);
3833         if (rc < 0) {
3834                 pr_err("(%s): failed to enable MSI-X %d\n",
3835                        skd_name(skdev), rc);
3836                 goto out;
3837         }
3838
3839         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3840                         sizeof(struct skd_msix_entry), GFP_KERNEL);
3841         if (!skdev->msix_entries) {
3842                 rc = -ENOMEM;
3843                 pr_err("(%s): msix table allocation error\n",
3844                        skd_name(skdev));
3845                 goto out;
3846         }
3847
3848         /* Enable MSI-X vectors for the base queue */
3849         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3850                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3851
3852                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3853                          "%s%d-msix %s", DRV_NAME, skdev->devno,
3854                          msix_entries[i].name);
3855
3856                 rc = devm_request_irq(&skdev->pdev->dev,
3857                                 pci_irq_vector(skdev->pdev, i),
3858                                 msix_entries[i].handler, 0,
3859                                 qentry->isr_name, skdev);
3860                 if (rc) {
3861                         pr_err("(%s): Unable to register(%d) MSI-X "
3862                                "handler %d: %s\n",
3863                                skd_name(skdev), rc, i, qentry->isr_name);
3864                         goto msix_out;
3865                 }
3866         }
3867
3868         pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3869                  skdev->name, __func__, __LINE__,
3870                  pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
3871         return 0;
3872
3873 msix_out:
3874         while (--i >= 0)
3875                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3876 out:
3877         kfree(skdev->msix_entries);
3878         skdev->msix_entries = NULL;
3879         return rc;
3880 }
3881
3882 static int skd_acquire_irq(struct skd_device *skdev)
3883 {
3884         struct pci_dev *pdev = skdev->pdev;
3885         unsigned int irq_flag = PCI_IRQ_LEGACY;
3886         int rc;
3887
3888         if (skd_isr_type == SKD_IRQ_MSIX) {
3889                 rc = skd_acquire_msix(skdev);
3890                 if (!rc)
3891                         return 0;
3892
3893                 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3894                        skd_name(skdev), rc);
3895         }
3896
3897         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3898                         skdev->devno);
3899
3900         if (skd_isr_type != SKD_IRQ_LEGACY)
3901                 irq_flag |= PCI_IRQ_MSI;
3902         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3903         if (rc < 0) {
3904                 pr_err("(%s): failed to allocate the MSI interrupt %d\n",
3905                         skd_name(skdev), rc);
3906                 return rc;
3907         }
3908
3909         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3910                         pdev->msi_enabled ? 0 : IRQF_SHARED,
3911                         skdev->isr_name, skdev);
3912         if (rc) {
3913                 pci_free_irq_vectors(pdev);
3914                 pr_err("(%s): failed to allocate interrupt %d\n",
3915                         skd_name(skdev), rc);
3916                 return rc;
3917         }
3918
3919         return 0;
3920 }
3921
3922 static void skd_release_irq(struct skd_device *skdev)
3923 {
3924         struct pci_dev *pdev = skdev->pdev;
3925
3926         if (skdev->msix_entries) {
3927                 int i;
3928
3929                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3930                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3931                                         skdev);
3932                 }
3933
3934                 kfree(skdev->msix_entries);
3935                 skdev->msix_entries = NULL;
3936         } else {
3937                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3938         }
3939
3940         pci_free_irq_vectors(pdev);
3941 }
3942
3943 /*
3944  *****************************************************************************
3945  * CONSTRUCT
3946  *****************************************************************************
3947  */
3948
3949 static int skd_cons_skcomp(struct skd_device *skdev)
3950 {
3951         int rc = 0;
3952         struct fit_completion_entry_v1 *skcomp;
3953         u32 nbytes;
3954
3955         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3956         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3957
3958         pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
3959                  skdev->name, __func__, __LINE__,
3960                  nbytes, SKD_N_COMPLETION_ENTRY);
3961
3962         skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
3963                                        &skdev->cq_dma_address);
3964
3965         if (skcomp == NULL) {
3966                 rc = -ENOMEM;
3967                 goto err_out;
3968         }
3969
3970         skdev->skcomp_table = skcomp;
3971         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3972                                                            sizeof(*skcomp) *
3973                                                            SKD_N_COMPLETION_ENTRY);
3974
3975 err_out:
3976         return rc;
3977 }
3978
3979 static int skd_cons_skmsg(struct skd_device *skdev)
3980 {
3981         int rc = 0;
3982         u32 i;
3983
3984         pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3985                  skdev->name, __func__, __LINE__,
3986                  sizeof(struct skd_fitmsg_context),
3987                  skdev->num_fitmsg_context,
3988                  sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
3989
3990         skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
3991                                      *skdev->num_fitmsg_context, GFP_KERNEL);
3992         if (skdev->skmsg_table == NULL) {
3993                 rc = -ENOMEM;
3994                 goto err_out;
3995         }
3996
3997         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3998                 struct skd_fitmsg_context *skmsg;
3999
4000                 skmsg = &skdev->skmsg_table[i];
4001
4002                 skmsg->id = i + SKD_ID_FIT_MSG;
4003
4004                 skmsg->state = SKD_MSG_STATE_IDLE;
4005                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4006                                                       SKD_N_FITMSG_BYTES + 64,
4007                                                       &skmsg->mb_dma_address);
4008
4009                 if (skmsg->msg_buf == NULL) {
4010                         rc = -ENOMEM;
4011                         goto err_out;
4012                 }
4013
4014                 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4015                                       (~FIT_QCMD_BASE_ADDRESS_MASK));
4016                 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4017                 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4018                                        FIT_QCMD_BASE_ADDRESS_MASK);
4019                 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4020                 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4021                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4022
4023                 skmsg->next = &skmsg[1];
4024         }
4025
4026         /* Free list is in order starting with the 0th entry. */
4027         skdev->skmsg_table[i - 1].next = NULL;
4028         skdev->skmsg_free_list = skdev->skmsg_table;
4029
4030 err_out:
4031         return rc;
4032 }
4033
4034 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4035                                                   u32 n_sg,
4036                                                   dma_addr_t *ret_dma_addr)
4037 {
4038         struct fit_sg_descriptor *sg_list;
4039         u32 nbytes;
4040
4041         nbytes = sizeof(*sg_list) * n_sg;
4042
4043         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4044
4045         if (sg_list != NULL) {
4046                 uint64_t dma_address = *ret_dma_addr;
4047                 u32 i;
4048
4049                 memset(sg_list, 0, nbytes);
4050
4051                 for (i = 0; i < n_sg - 1; i++) {
4052                         uint64_t ndp_off;
4053                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4054
4055                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
4056                 }
4057                 sg_list[i].next_desc_ptr = 0LL;
4058         }
4059
4060         return sg_list;
4061 }
4062
4063 static int skd_cons_skreq(struct skd_device *skdev)
4064 {
4065         int rc = 0;
4066         u32 i;
4067
4068         pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4069                  skdev->name, __func__, __LINE__,
4070                  sizeof(struct skd_request_context),
4071                  skdev->num_req_context,
4072                  sizeof(struct skd_request_context) * skdev->num_req_context);
4073
4074         skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4075                                      * skdev->num_req_context, GFP_KERNEL);
4076         if (skdev->skreq_table == NULL) {
4077                 rc = -ENOMEM;
4078                 goto err_out;
4079         }
4080
4081         pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4082                  skdev->name, __func__, __LINE__,
4083                  skdev->sgs_per_request, sizeof(struct scatterlist),
4084                  skdev->sgs_per_request * sizeof(struct scatterlist));
4085
4086         for (i = 0; i < skdev->num_req_context; i++) {
4087                 struct skd_request_context *skreq;
4088
4089                 skreq = &skdev->skreq_table[i];
4090
4091                 skreq->id = i + SKD_ID_RW_REQUEST;
4092                 skreq->state = SKD_REQ_STATE_IDLE;
4093
4094                 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4095                                     skdev->sgs_per_request, GFP_KERNEL);
4096                 if (skreq->sg == NULL) {
4097                         rc = -ENOMEM;
4098                         goto err_out;
4099                 }
4100                 sg_init_table(skreq->sg, skdev->sgs_per_request);
4101
4102                 skreq->sksg_list = skd_cons_sg_list(skdev,
4103                                                     skdev->sgs_per_request,
4104                                                     &skreq->sksg_dma_address);
4105
4106                 if (skreq->sksg_list == NULL) {
4107                         rc = -ENOMEM;
4108                         goto err_out;
4109                 }
4110
4111                 skreq->next = &skreq[1];
4112         }
4113
4114         /* Free list is in order starting with the 0th entry. */
4115         skdev->skreq_table[i - 1].next = NULL;
4116         skdev->skreq_free_list = skdev->skreq_table;
4117
4118 err_out:
4119         return rc;
4120 }
4121
4122 static int skd_cons_skspcl(struct skd_device *skdev)
4123 {
4124         int rc = 0;
4125         u32 i, nbytes;
4126
4127         pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4128                  skdev->name, __func__, __LINE__,
4129                  sizeof(struct skd_special_context),
4130                  skdev->n_special,
4131                  sizeof(struct skd_special_context) * skdev->n_special);
4132
4133         skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4134                                       * skdev->n_special, GFP_KERNEL);
4135         if (skdev->skspcl_table == NULL) {
4136                 rc = -ENOMEM;
4137                 goto err_out;
4138         }
4139
4140         for (i = 0; i < skdev->n_special; i++) {
4141                 struct skd_special_context *skspcl;
4142
4143                 skspcl = &skdev->skspcl_table[i];
4144
4145                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4146                 skspcl->req.state = SKD_REQ_STATE_IDLE;
4147
4148                 skspcl->req.next = &skspcl[1].req;
4149
4150                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4151
4152                 skspcl->msg_buf =
4153                         pci_zalloc_consistent(skdev->pdev, nbytes,
4154                                               &skspcl->mb_dma_address);
4155                 if (skspcl->msg_buf == NULL) {
4156                         rc = -ENOMEM;
4157                         goto err_out;
4158                 }
4159
4160                 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4161                                          SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4162                 if (skspcl->req.sg == NULL) {
4163                         rc = -ENOMEM;
4164                         goto err_out;
4165                 }
4166
4167                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4168                                                          SKD_N_SG_PER_SPECIAL,
4169                                                          &skspcl->req.
4170                                                          sksg_dma_address);
4171                 if (skspcl->req.sksg_list == NULL) {
4172                         rc = -ENOMEM;
4173                         goto err_out;
4174                 }
4175         }
4176
4177         /* Free list is in order starting with the 0th entry. */
4178         skdev->skspcl_table[i - 1].req.next = NULL;
4179         skdev->skspcl_free_list = skdev->skspcl_table;
4180
4181         return rc;
4182
4183 err_out:
4184         return rc;
4185 }
4186
4187 static int skd_cons_sksb(struct skd_device *skdev)
4188 {
4189         int rc = 0;
4190         struct skd_special_context *skspcl;
4191         u32 nbytes;
4192
4193         skspcl = &skdev->internal_skspcl;
4194
4195         skspcl->req.id = 0 + SKD_ID_INTERNAL;
4196         skspcl->req.state = SKD_REQ_STATE_IDLE;
4197
4198         nbytes = SKD_N_INTERNAL_BYTES;
4199
4200         skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4201                                                  &skspcl->db_dma_address);
4202         if (skspcl->data_buf == NULL) {
4203                 rc = -ENOMEM;
4204                 goto err_out;
4205         }
4206
4207         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4208         skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4209                                                 &skspcl->mb_dma_address);
4210         if (skspcl->msg_buf == NULL) {
4211                 rc = -ENOMEM;
4212                 goto err_out;
4213         }
4214
4215         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4216                                                  &skspcl->req.sksg_dma_address);
4217         if (skspcl->req.sksg_list == NULL) {
4218                 rc = -ENOMEM;
4219                 goto err_out;
4220         }
4221
4222         if (!skd_format_internal_skspcl(skdev)) {
4223                 rc = -EINVAL;
4224                 goto err_out;
4225         }
4226
4227 err_out:
4228         return rc;
4229 }
4230
4231 static int skd_cons_disk(struct skd_device *skdev)
4232 {
4233         int rc = 0;
4234         struct gendisk *disk;
4235         struct request_queue *q;
4236         unsigned long flags;
4237
4238         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4239         if (!disk) {
4240                 rc = -ENOMEM;
4241                 goto err_out;
4242         }
4243
4244         skdev->disk = disk;
4245         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4246
4247         disk->major = skdev->major;
4248         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4249         disk->fops = &skd_blockdev_ops;
4250         disk->private_data = skdev;
4251
4252         q = blk_init_queue(skd_request_fn, &skdev->lock);
4253         if (!q) {
4254                 rc = -ENOMEM;
4255                 goto err_out;
4256         }
4257         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
4258
4259         skdev->queue = q;
4260         disk->queue = q;
4261         q->queuedata = skdev;
4262
4263         blk_queue_write_cache(q, true, true);
4264         blk_queue_max_segments(q, skdev->sgs_per_request);
4265         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4266
4267         /* set optimal I/O size to 8KB */
4268         blk_queue_io_opt(q, 8192);
4269
4270         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4271         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4272
4273         spin_lock_irqsave(&skdev->lock, flags);
4274         pr_debug("%s:%s:%d stopping %s queue\n",
4275                  skdev->name, __func__, __LINE__, skdev->name);
4276         blk_stop_queue(skdev->queue);
4277         spin_unlock_irqrestore(&skdev->lock, flags);
4278
4279 err_out:
4280         return rc;
4281 }
4282
4283 #define SKD_N_DEV_TABLE         16u
4284 static u32 skd_next_devno;
4285
4286 static struct skd_device *skd_construct(struct pci_dev *pdev)
4287 {
4288         struct skd_device *skdev;
4289         int blk_major = skd_major;
4290         int rc;
4291
4292         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4293
4294         if (!skdev) {
4295                 pr_err(PFX "(%s): memory alloc failure\n",
4296                        pci_name(pdev));
4297                 return NULL;
4298         }
4299
4300         skdev->state = SKD_DRVR_STATE_LOAD;
4301         skdev->pdev = pdev;
4302         skdev->devno = skd_next_devno++;
4303         skdev->major = blk_major;
4304         sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4305         skdev->dev_max_queue_depth = 0;
4306
4307         skdev->num_req_context = skd_max_queue_depth;
4308         skdev->num_fitmsg_context = skd_max_queue_depth;
4309         skdev->n_special = skd_max_pass_thru;
4310         skdev->cur_max_queue_depth = 1;
4311         skdev->queue_low_water_mark = 1;
4312         skdev->proto_ver = 99;
4313         skdev->sgs_per_request = skd_sgs_per_request;
4314         skdev->dbg_level = skd_dbg_level;
4315
4316         atomic_set(&skdev->device_count, 0);
4317
4318         spin_lock_init(&skdev->lock);
4319
4320         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4321
4322         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4323         rc = skd_cons_skcomp(skdev);
4324         if (rc < 0)
4325                 goto err_out;
4326
4327         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4328         rc = skd_cons_skmsg(skdev);
4329         if (rc < 0)
4330                 goto err_out;
4331
4332         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4333         rc = skd_cons_skreq(skdev);
4334         if (rc < 0)
4335                 goto err_out;
4336
4337         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4338         rc = skd_cons_skspcl(skdev);
4339         if (rc < 0)
4340                 goto err_out;
4341
4342         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4343         rc = skd_cons_sksb(skdev);
4344         if (rc < 0)
4345                 goto err_out;
4346
4347         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4348         rc = skd_cons_disk(skdev);
4349         if (rc < 0)
4350                 goto err_out;
4351
4352         pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4353         return skdev;
4354
4355 err_out:
4356         pr_debug("%s:%s:%d construct failed\n",
4357                  skdev->name, __func__, __LINE__);
4358         skd_destruct(skdev);
4359         return NULL;
4360 }
4361
4362 /*
4363  *****************************************************************************
4364  * DESTRUCT (FREE)
4365  *****************************************************************************
4366  */
4367
4368 static void skd_free_skcomp(struct skd_device *skdev)
4369 {
4370         if (skdev->skcomp_table != NULL) {
4371                 u32 nbytes;
4372
4373                 nbytes = sizeof(skdev->skcomp_table[0]) *
4374                          SKD_N_COMPLETION_ENTRY;
4375                 pci_free_consistent(skdev->pdev, nbytes,
4376                                     skdev->skcomp_table, skdev->cq_dma_address);
4377         }
4378
4379         skdev->skcomp_table = NULL;
4380         skdev->cq_dma_address = 0;
4381 }
4382
4383 static void skd_free_skmsg(struct skd_device *skdev)
4384 {
4385         u32 i;
4386
4387         if (skdev->skmsg_table == NULL)
4388                 return;
4389
4390         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4391                 struct skd_fitmsg_context *skmsg;
4392
4393                 skmsg = &skdev->skmsg_table[i];
4394
4395                 if (skmsg->msg_buf != NULL) {
4396                         skmsg->msg_buf += skmsg->offset;
4397                         skmsg->mb_dma_address += skmsg->offset;
4398                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4399                                             skmsg->msg_buf,
4400                                             skmsg->mb_dma_address);
4401                 }
4402                 skmsg->msg_buf = NULL;
4403                 skmsg->mb_dma_address = 0;
4404         }
4405
4406         kfree(skdev->skmsg_table);
4407         skdev->skmsg_table = NULL;
4408 }
4409
4410 static void skd_free_sg_list(struct skd_device *skdev,
4411                              struct fit_sg_descriptor *sg_list,
4412                              u32 n_sg, dma_addr_t dma_addr)
4413 {
4414         if (sg_list != NULL) {
4415                 u32 nbytes;
4416
4417                 nbytes = sizeof(*sg_list) * n_sg;
4418
4419                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4420         }
4421 }
4422
4423 static void skd_free_skreq(struct skd_device *skdev)
4424 {
4425         u32 i;
4426
4427         if (skdev->skreq_table == NULL)
4428                 return;
4429
4430         for (i = 0; i < skdev->num_req_context; i++) {
4431                 struct skd_request_context *skreq;
4432
4433                 skreq = &skdev->skreq_table[i];
4434
4435                 skd_free_sg_list(skdev, skreq->sksg_list,
4436                                  skdev->sgs_per_request,
4437                                  skreq->sksg_dma_address);
4438
4439                 skreq->sksg_list = NULL;
4440                 skreq->sksg_dma_address = 0;
4441
4442                 kfree(skreq->sg);
4443         }
4444
4445         kfree(skdev->skreq_table);
4446         skdev->skreq_table = NULL;
4447 }
4448
4449 static void skd_free_skspcl(struct skd_device *skdev)
4450 {
4451         u32 i;
4452         u32 nbytes;
4453
4454         if (skdev->skspcl_table == NULL)
4455                 return;
4456
4457         for (i = 0; i < skdev->n_special; i++) {
4458                 struct skd_special_context *skspcl;
4459
4460                 skspcl = &skdev->skspcl_table[i];
4461
4462                 if (skspcl->msg_buf != NULL) {
4463                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4464                         pci_free_consistent(skdev->pdev, nbytes,
4465                                             skspcl->msg_buf,
4466                                             skspcl->mb_dma_address);
4467                 }
4468
4469                 skspcl->msg_buf = NULL;
4470                 skspcl->mb_dma_address = 0;
4471
4472                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4473                                  SKD_N_SG_PER_SPECIAL,
4474                                  skspcl->req.sksg_dma_address);
4475
4476                 skspcl->req.sksg_list = NULL;
4477                 skspcl->req.sksg_dma_address = 0;
4478
4479                 kfree(skspcl->req.sg);
4480         }
4481
4482         kfree(skdev->skspcl_table);
4483         skdev->skspcl_table = NULL;
4484 }
4485
4486 static void skd_free_sksb(struct skd_device *skdev)
4487 {
4488         struct skd_special_context *skspcl;
4489         u32 nbytes;
4490
4491         skspcl = &skdev->internal_skspcl;
4492
4493         if (skspcl->data_buf != NULL) {
4494                 nbytes = SKD_N_INTERNAL_BYTES;
4495
4496                 pci_free_consistent(skdev->pdev, nbytes,
4497                                     skspcl->data_buf, skspcl->db_dma_address);
4498         }
4499
4500         skspcl->data_buf = NULL;
4501         skspcl->db_dma_address = 0;
4502
4503         if (skspcl->msg_buf != NULL) {
4504                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4505                 pci_free_consistent(skdev->pdev, nbytes,
4506                                     skspcl->msg_buf, skspcl->mb_dma_address);
4507         }
4508
4509         skspcl->msg_buf = NULL;
4510         skspcl->mb_dma_address = 0;
4511
4512         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4513                          skspcl->req.sksg_dma_address);
4514
4515         skspcl->req.sksg_list = NULL;
4516         skspcl->req.sksg_dma_address = 0;
4517 }
4518
4519 static void skd_free_disk(struct skd_device *skdev)
4520 {
4521         struct gendisk *disk = skdev->disk;
4522
4523         if (disk && (disk->flags & GENHD_FL_UP))
4524                 del_gendisk(disk);
4525
4526         if (skdev->queue) {
4527                 blk_cleanup_queue(skdev->queue);
4528                 skdev->queue = NULL;
4529                 disk->queue = NULL;
4530         }
4531
4532         put_disk(disk);
4533         skdev->disk = NULL;
4534 }
4535
4536 static void skd_destruct(struct skd_device *skdev)
4537 {
4538         if (skdev == NULL)
4539                 return;
4540
4541         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4542         skd_free_disk(skdev);
4543
4544         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4545         skd_free_sksb(skdev);
4546
4547         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4548         skd_free_skspcl(skdev);
4549
4550         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4551         skd_free_skreq(skdev);
4552
4553         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4554         skd_free_skmsg(skdev);
4555
4556         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4557         skd_free_skcomp(skdev);
4558
4559         pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4560         kfree(skdev);
4561 }
4562
4563 /*
4564  *****************************************************************************
4565  * BLOCK DEVICE (BDEV) GLUE
4566  *****************************************************************************
4567  */
4568
4569 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4570 {
4571         struct skd_device *skdev;
4572         u64 capacity;
4573
4574         skdev = bdev->bd_disk->private_data;
4575
4576         pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4577                  skdev->name, __func__, __LINE__,
4578                  bdev->bd_disk->disk_name, current->comm);
4579
4580         if (skdev->read_cap_is_valid) {
4581                 capacity = get_capacity(skdev->disk);
4582                 geo->heads = 64;
4583                 geo->sectors = 255;
4584                 geo->cylinders = (capacity) / (255 * 64);
4585
4586                 return 0;
4587         }
4588         return -EIO;
4589 }
4590
4591 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4592 {
4593         pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4594         device_add_disk(parent, skdev->disk);
4595         return 0;
4596 }
4597
4598 static const struct block_device_operations skd_blockdev_ops = {
4599         .owner          = THIS_MODULE,
4600         .ioctl          = skd_bdev_ioctl,
4601         .getgeo         = skd_bdev_getgeo,
4602 };
4603
4604 /*
4605  *****************************************************************************
4606  * PCIe DRIVER GLUE
4607  *****************************************************************************
4608  */
4609
4610 static const struct pci_device_id skd_pci_tbl[] = {
4611         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4612           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4613         { 0 }                     /* terminate list */
4614 };
4615
4616 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4617
4618 static char *skd_pci_info(struct skd_device *skdev, char *str)
4619 {
4620         int pcie_reg;
4621
4622         strcpy(str, "PCIe (");
4623         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4624
4625         if (pcie_reg) {
4626
4627                 char lwstr[6];
4628                 uint16_t pcie_lstat, lspeed, lwidth;
4629
4630                 pcie_reg += 0x12;
4631                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4632                 lspeed = pcie_lstat & (0xF);
4633                 lwidth = (pcie_lstat & 0x3F0) >> 4;
4634
4635                 if (lspeed == 1)
4636                         strcat(str, "2.5GT/s ");
4637                 else if (lspeed == 2)
4638                         strcat(str, "5.0GT/s ");
4639                 else
4640                         strcat(str, "<unknown> ");
4641                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4642                 strcat(str, lwstr);
4643         }
4644         return str;
4645 }
4646
4647 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4648 {
4649         int i;
4650         int rc = 0;
4651         char pci_str[32];
4652         struct skd_device *skdev;
4653
4654         pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4655                DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4656         pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4657                pci_name(pdev), pdev->vendor, pdev->device);
4658
4659         rc = pci_enable_device(pdev);
4660         if (rc)
4661                 return rc;
4662         rc = pci_request_regions(pdev, DRV_NAME);
4663         if (rc)
4664                 goto err_out;
4665         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4666         if (!rc) {
4667                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4668
4669                         pr_err("(%s): consistent DMA mask error %d\n",
4670                                pci_name(pdev), rc);
4671                 }
4672         } else {
4673                 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4674                 if (rc) {
4675
4676                         pr_err("(%s): DMA mask error %d\n",
4677                                pci_name(pdev), rc);
4678                         goto err_out_regions;
4679                 }
4680         }
4681
4682         if (!skd_major) {
4683                 rc = register_blkdev(0, DRV_NAME);
4684                 if (rc < 0)
4685                         goto err_out_regions;
4686                 BUG_ON(!rc);
4687                 skd_major = rc;
4688         }
4689
4690         skdev = skd_construct(pdev);
4691         if (skdev == NULL) {
4692                 rc = -ENOMEM;
4693                 goto err_out_regions;
4694         }
4695
4696         skd_pci_info(skdev, pci_str);
4697         pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4698
4699         pci_set_master(pdev);
4700         rc = pci_enable_pcie_error_reporting(pdev);
4701         if (rc) {
4702                 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4703                        skd_name(skdev), rc);
4704                 skdev->pcie_error_reporting_is_enabled = 0;
4705         } else
4706                 skdev->pcie_error_reporting_is_enabled = 1;
4707
4708         pci_set_drvdata(pdev, skdev);
4709
4710         for (i = 0; i < SKD_MAX_BARS; i++) {
4711                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4712                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4713                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4714                                             skdev->mem_size[i]);
4715                 if (!skdev->mem_map[i]) {
4716                         pr_err("(%s): Unable to map adapter memory!\n",
4717                                skd_name(skdev));
4718                         rc = -ENODEV;
4719                         goto err_out_iounmap;
4720                 }
4721                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4722                          skdev->name, __func__, __LINE__,
4723                          skdev->mem_map[i],
4724                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4725         }
4726
4727         rc = skd_acquire_irq(skdev);
4728         if (rc) {
4729                 pr_err("(%s): interrupt resource error %d\n",
4730                        skd_name(skdev), rc);
4731                 goto err_out_iounmap;
4732         }
4733
4734         rc = skd_start_timer(skdev);
4735         if (rc)
4736                 goto err_out_timer;
4737
4738         init_waitqueue_head(&skdev->waitq);
4739
4740         skd_start_device(skdev);
4741
4742         rc = wait_event_interruptible_timeout(skdev->waitq,
4743                                               (skdev->gendisk_on),
4744                                               (SKD_START_WAIT_SECONDS * HZ));
4745         if (skdev->gendisk_on > 0) {
4746                 /* device came on-line after reset */
4747                 skd_bdev_attach(&pdev->dev, skdev);
4748                 rc = 0;
4749         } else {
4750                 /* we timed out, something is wrong with the device,
4751                    don't add the disk structure */
4752                 pr_err("(%s): error: waiting for s1120 timed out %d!\n",
4753                        skd_name(skdev), rc);
4754                 /* in case of no error; we timeout with ENXIO */
4755                 if (!rc)
4756                         rc = -ENXIO;
4757                 goto err_out_timer;
4758         }
4759
4760         return rc;
4761
4762 err_out_timer:
4763         skd_stop_device(skdev);
4764         skd_release_irq(skdev);
4765
4766 err_out_iounmap:
4767         for (i = 0; i < SKD_MAX_BARS; i++)
4768                 if (skdev->mem_map[i])
4769                         iounmap(skdev->mem_map[i]);
4770
4771         if (skdev->pcie_error_reporting_is_enabled)
4772                 pci_disable_pcie_error_reporting(pdev);
4773
4774         skd_destruct(skdev);
4775
4776 err_out_regions:
4777         pci_release_regions(pdev);
4778
4779 err_out:
4780         pci_disable_device(pdev);
4781         pci_set_drvdata(pdev, NULL);
4782         return rc;
4783 }
4784
4785 static void skd_pci_remove(struct pci_dev *pdev)
4786 {
4787         int i;
4788         struct skd_device *skdev;
4789
4790         skdev = pci_get_drvdata(pdev);
4791         if (!skdev) {
4792                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4793                 return;
4794         }
4795         skd_stop_device(skdev);
4796         skd_release_irq(skdev);
4797
4798         for (i = 0; i < SKD_MAX_BARS; i++)
4799                 if (skdev->mem_map[i])
4800                         iounmap((u32 *)skdev->mem_map[i]);
4801
4802         if (skdev->pcie_error_reporting_is_enabled)
4803                 pci_disable_pcie_error_reporting(pdev);
4804
4805         skd_destruct(skdev);
4806
4807         pci_release_regions(pdev);
4808         pci_disable_device(pdev);
4809         pci_set_drvdata(pdev, NULL);
4810
4811         return;
4812 }
4813
4814 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4815 {
4816         int i;
4817         struct skd_device *skdev;
4818
4819         skdev = pci_get_drvdata(pdev);
4820         if (!skdev) {
4821                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4822                 return -EIO;
4823         }
4824
4825         skd_stop_device(skdev);
4826
4827         skd_release_irq(skdev);
4828
4829         for (i = 0; i < SKD_MAX_BARS; i++)
4830                 if (skdev->mem_map[i])
4831                         iounmap((u32 *)skdev->mem_map[i]);
4832
4833         if (skdev->pcie_error_reporting_is_enabled)
4834                 pci_disable_pcie_error_reporting(pdev);
4835
4836         pci_release_regions(pdev);
4837         pci_save_state(pdev);
4838         pci_disable_device(pdev);
4839         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4840         return 0;
4841 }
4842
4843 static int skd_pci_resume(struct pci_dev *pdev)
4844 {
4845         int i;
4846         int rc = 0;
4847         struct skd_device *skdev;
4848
4849         skdev = pci_get_drvdata(pdev);
4850         if (!skdev) {
4851                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4852                 return -1;
4853         }
4854
4855         pci_set_power_state(pdev, PCI_D0);
4856         pci_enable_wake(pdev, PCI_D0, 0);
4857         pci_restore_state(pdev);
4858
4859         rc = pci_enable_device(pdev);
4860         if (rc)
4861                 return rc;
4862         rc = pci_request_regions(pdev, DRV_NAME);
4863         if (rc)
4864                 goto err_out;
4865         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4866         if (!rc) {
4867                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4868
4869                         pr_err("(%s): consistent DMA mask error %d\n",
4870                                pci_name(pdev), rc);
4871                 }
4872         } else {
4873                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4874                 if (rc) {
4875
4876                         pr_err("(%s): DMA mask error %d\n",
4877                                pci_name(pdev), rc);
4878                         goto err_out_regions;
4879                 }
4880         }
4881
4882         pci_set_master(pdev);
4883         rc = pci_enable_pcie_error_reporting(pdev);
4884         if (rc) {
4885                 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4886                        skdev->name, rc);
4887                 skdev->pcie_error_reporting_is_enabled = 0;
4888         } else
4889                 skdev->pcie_error_reporting_is_enabled = 1;
4890
4891         for (i = 0; i < SKD_MAX_BARS; i++) {
4892
4893                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4894                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4895                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4896                                             skdev->mem_size[i]);
4897                 if (!skdev->mem_map[i]) {
4898                         pr_err("(%s): Unable to map adapter memory!\n",
4899                                skd_name(skdev));
4900                         rc = -ENODEV;
4901                         goto err_out_iounmap;
4902                 }
4903                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4904                          skdev->name, __func__, __LINE__,
4905                          skdev->mem_map[i],
4906                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4907         }
4908         rc = skd_acquire_irq(skdev);
4909         if (rc) {
4910
4911                 pr_err("(%s): interrupt resource error %d\n",
4912                        pci_name(pdev), rc);
4913                 goto err_out_iounmap;
4914         }
4915
4916         rc = skd_start_timer(skdev);
4917         if (rc)
4918                 goto err_out_timer;
4919
4920         init_waitqueue_head(&skdev->waitq);
4921
4922         skd_start_device(skdev);
4923
4924         return rc;
4925
4926 err_out_timer:
4927         skd_stop_device(skdev);
4928         skd_release_irq(skdev);
4929
4930 err_out_iounmap:
4931         for (i = 0; i < SKD_MAX_BARS; i++)
4932                 if (skdev->mem_map[i])
4933                         iounmap(skdev->mem_map[i]);
4934
4935         if (skdev->pcie_error_reporting_is_enabled)
4936                 pci_disable_pcie_error_reporting(pdev);
4937
4938 err_out_regions:
4939         pci_release_regions(pdev);
4940
4941 err_out:
4942         pci_disable_device(pdev);
4943         return rc;
4944 }
4945
4946 static void skd_pci_shutdown(struct pci_dev *pdev)
4947 {
4948         struct skd_device *skdev;
4949
4950         pr_err("skd_pci_shutdown called\n");
4951
4952         skdev = pci_get_drvdata(pdev);
4953         if (!skdev) {
4954                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4955                 return;
4956         }
4957
4958         pr_err("%s: calling stop\n", skd_name(skdev));
4959         skd_stop_device(skdev);
4960 }
4961
4962 static struct pci_driver skd_driver = {
4963         .name           = DRV_NAME,
4964         .id_table       = skd_pci_tbl,
4965         .probe          = skd_pci_probe,
4966         .remove         = skd_pci_remove,
4967         .suspend        = skd_pci_suspend,
4968         .resume         = skd_pci_resume,
4969         .shutdown       = skd_pci_shutdown,
4970 };
4971
4972 /*
4973  *****************************************************************************
4974  * LOGGING SUPPORT
4975  *****************************************************************************
4976  */
4977
4978 static const char *skd_name(struct skd_device *skdev)
4979 {
4980         memset(skdev->id_str, 0, sizeof(skdev->id_str));
4981
4982         if (skdev->inquiry_is_valid)
4983                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
4984                          skdev->name, skdev->inq_serial_num,
4985                          pci_name(skdev->pdev));
4986         else
4987                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
4988                          skdev->name, pci_name(skdev->pdev));
4989
4990         return skdev->id_str;
4991 }
4992
4993 const char *skd_drive_state_to_str(int state)
4994 {
4995         switch (state) {
4996         case FIT_SR_DRIVE_OFFLINE:
4997                 return "OFFLINE";
4998         case FIT_SR_DRIVE_INIT:
4999                 return "INIT";
5000         case FIT_SR_DRIVE_ONLINE:
5001                 return "ONLINE";
5002         case FIT_SR_DRIVE_BUSY:
5003                 return "BUSY";
5004         case FIT_SR_DRIVE_FAULT:
5005                 return "FAULT";
5006         case FIT_SR_DRIVE_DEGRADED:
5007                 return "DEGRADED";
5008         case FIT_SR_PCIE_LINK_DOWN:
5009                 return "INK_DOWN";
5010         case FIT_SR_DRIVE_SOFT_RESET:
5011                 return "SOFT_RESET";
5012         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5013                 return "NEED_FW";
5014         case FIT_SR_DRIVE_INIT_FAULT:
5015                 return "INIT_FAULT";
5016         case FIT_SR_DRIVE_BUSY_SANITIZE:
5017                 return "BUSY_SANITIZE";
5018         case FIT_SR_DRIVE_BUSY_ERASE:
5019                 return "BUSY_ERASE";
5020         case FIT_SR_DRIVE_FW_BOOTING:
5021                 return "FW_BOOTING";
5022         default:
5023                 return "???";
5024         }
5025 }
5026
5027 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5028 {
5029         switch (state) {
5030         case SKD_DRVR_STATE_LOAD:
5031                 return "LOAD";
5032         case SKD_DRVR_STATE_IDLE:
5033                 return "IDLE";
5034         case SKD_DRVR_STATE_BUSY:
5035                 return "BUSY";
5036         case SKD_DRVR_STATE_STARTING:
5037                 return "STARTING";
5038         case SKD_DRVR_STATE_ONLINE:
5039                 return "ONLINE";
5040         case SKD_DRVR_STATE_PAUSING:
5041                 return "PAUSING";
5042         case SKD_DRVR_STATE_PAUSED:
5043                 return "PAUSED";
5044         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5045                 return "DRAINING_TIMEOUT";
5046         case SKD_DRVR_STATE_RESTARTING:
5047                 return "RESTARTING";
5048         case SKD_DRVR_STATE_RESUMING:
5049                 return "RESUMING";
5050         case SKD_DRVR_STATE_STOPPING:
5051                 return "STOPPING";
5052         case SKD_DRVR_STATE_SYNCING:
5053                 return "SYNCING";
5054         case SKD_DRVR_STATE_FAULT:
5055                 return "FAULT";
5056         case SKD_DRVR_STATE_DISAPPEARED:
5057                 return "DISAPPEARED";
5058         case SKD_DRVR_STATE_BUSY_ERASE:
5059                 return "BUSY_ERASE";
5060         case SKD_DRVR_STATE_BUSY_SANITIZE:
5061                 return "BUSY_SANITIZE";
5062         case SKD_DRVR_STATE_BUSY_IMMINENT:
5063                 return "BUSY_IMMINENT";
5064         case SKD_DRVR_STATE_WAIT_BOOT:
5065                 return "WAIT_BOOT";
5066
5067         default:
5068                 return "???";
5069         }
5070 }
5071
5072 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5073 {
5074         switch (state) {
5075         case SKD_MSG_STATE_IDLE:
5076                 return "IDLE";
5077         case SKD_MSG_STATE_BUSY:
5078                 return "BUSY";
5079         default:
5080                 return "???";
5081         }
5082 }
5083
5084 static const char *skd_skreq_state_to_str(enum skd_req_state state)
5085 {
5086         switch (state) {
5087         case SKD_REQ_STATE_IDLE:
5088                 return "IDLE";
5089         case SKD_REQ_STATE_SETUP:
5090                 return "SETUP";
5091         case SKD_REQ_STATE_BUSY:
5092                 return "BUSY";
5093         case SKD_REQ_STATE_COMPLETED:
5094                 return "COMPLETED";
5095         case SKD_REQ_STATE_TIMEOUT:
5096                 return "TIMEOUT";
5097         case SKD_REQ_STATE_ABORTED:
5098                 return "ABORTED";
5099         default:
5100                 return "???";
5101         }
5102 }
5103
5104 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5105 {
5106         pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5107                  skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5108         pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
5109                  skdev->name, __func__, __LINE__,
5110                  skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5111                  skd_skdev_state_to_str(skdev->state), skdev->state);
5112         pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
5113                  skdev->name, __func__, __LINE__,
5114                  skdev->in_flight, skdev->cur_max_queue_depth,
5115                  skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5116         pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
5117                  skdev->name, __func__, __LINE__,
5118                  skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5119 }
5120
5121 static void skd_log_skmsg(struct skd_device *skdev,
5122                           struct skd_fitmsg_context *skmsg, const char *event)
5123 {
5124         pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5125                  skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5126         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
5127                  skdev->name, __func__, __LINE__,
5128                  skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5129                  skmsg->id, skmsg->length);
5130 }
5131
5132 static void skd_log_skreq(struct skd_device *skdev,
5133                           struct skd_request_context *skreq, const char *event)
5134 {
5135         pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5136                  skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5137         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5138                  skdev->name, __func__, __LINE__,
5139                  skd_skreq_state_to_str(skreq->state), skreq->state,
5140                  skreq->id, skreq->fitmsg_id);
5141         pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
5142                  skdev->name, __func__, __LINE__,
5143                  skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5144
5145         if (skreq->req != NULL) {
5146                 struct request *req = skreq->req;
5147                 u32 lba = (u32)blk_rq_pos(req);
5148                 u32 count = blk_rq_sectors(req);
5149
5150                 pr_debug("%s:%s:%d "
5151                          "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5152                          skdev->name, __func__, __LINE__,
5153                          req, lba, lba, count, count,
5154                          (int)rq_data_dir(req));
5155         } else
5156                 pr_debug("%s:%s:%d req=NULL\n",
5157                          skdev->name, __func__, __LINE__);
5158 }
5159
5160 /*
5161  *****************************************************************************
5162  * MODULE GLUE
5163  *****************************************************************************
5164  */
5165
5166 static int __init skd_init(void)
5167 {
5168         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5169
5170         switch (skd_isr_type) {
5171         case SKD_IRQ_LEGACY:
5172         case SKD_IRQ_MSI:
5173         case SKD_IRQ_MSIX:
5174                 break;
5175         default:
5176                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5177                        skd_isr_type, SKD_IRQ_DEFAULT);
5178                 skd_isr_type = SKD_IRQ_DEFAULT;
5179         }
5180
5181         if (skd_max_queue_depth < 1 ||
5182             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5183                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5184                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5185                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5186         }
5187
5188         if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5189                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5190                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5191                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5192         }
5193
5194         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5195                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5196                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5197                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5198         }
5199
5200         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5201                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5202                        skd_dbg_level, 0);
5203                 skd_dbg_level = 0;
5204         }
5205
5206         if (skd_isr_comp_limit < 0) {
5207                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5208                        skd_isr_comp_limit, 0);
5209                 skd_isr_comp_limit = 0;
5210         }
5211
5212         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5213                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5214                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5215                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5216         }
5217
5218         return pci_register_driver(&skd_driver);
5219 }
5220
5221 static void __exit skd_exit(void)
5222 {
5223         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5224
5225         pci_unregister_driver(&skd_driver);
5226
5227         if (skd_major)
5228                 unregister_blkdev(skd_major, DRV_NAME);
5229 }
5230
5231 module_init(skd_init);
5232 module_exit(skd_exit);