]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/block/skd_main.c
skd: Fix a function name in a comment
[linux.git] / drivers / block / skd_main.c
1 /*
2  * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3  * was acquired by Western Digital in 2012.
4  *
5  * Copyright 2012 sTec, Inc.
6  * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7  *
8  * This file is part of the Linux kernel, and is made available under
9  * the terms of the GNU General Public License version 2.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/compiler.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/time.h>
25 #include <linux/hdreg.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/completion.h>
28 #include <linux/scatterlist.h>
29 #include <linux/version.h>
30 #include <linux/err.h>
31 #include <linux/aer.h>
32 #include <linux/wait.h>
33 #include <linux/uio.h>
34 #include <scsi/scsi.h>
35 #include <scsi/sg.h>
36 #include <linux/io.h>
37 #include <linux/uaccess.h>
38 #include <asm/unaligned.h>
39
40 #include "skd_s1120.h"
41
42 static int skd_dbg_level;
43 static int skd_isr_comp_limit = 4;
44
45 enum {
46         STEC_LINK_2_5GTS = 0,
47         STEC_LINK_5GTS = 1,
48         STEC_LINK_8GTS = 2,
49         STEC_LINK_UNKNOWN = 0xFF
50 };
51
52 enum {
53         SKD_FLUSH_INITIALIZER,
54         SKD_FLUSH_ZERO_SIZE_FIRST,
55         SKD_FLUSH_DATA_SECOND,
56 };
57
58 #define SKD_ASSERT(expr) \
59         do { \
60                 if (unlikely(!(expr))) { \
61                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
62                                # expr, __FILE__, __func__, __LINE__); \
63                 } \
64         } while (0)
65
66 #define DRV_NAME "skd"
67 #define DRV_VERSION "2.2.1"
68 #define DRV_BUILD_ID "0260"
69 #define PFX DRV_NAME ": "
70 #define DRV_BIN_VERSION 0x100
71 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
72
73 MODULE_LICENSE("GPL");
74
75 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
76 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
77
78 #define PCI_VENDOR_ID_STEC      0x1B39
79 #define PCI_DEVICE_ID_S1120     0x0001
80
81 #define SKD_FUA_NV              (1 << 1)
82 #define SKD_MINORS_PER_DEVICE   16
83
84 #define SKD_MAX_QUEUE_DEPTH     200u
85
86 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
87
88 #define SKD_N_FITMSG_BYTES      (512u)
89
90 #define SKD_N_SPECIAL_CONTEXT   32u
91 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
92
93 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
94  * 128KB limit.  That allows 4096*4K = 16M xfer size
95  */
96 #define SKD_N_SG_PER_REQ_DEFAULT 256u
97 #define SKD_N_SG_PER_SPECIAL    256u
98
99 #define SKD_N_COMPLETION_ENTRY  256u
100 #define SKD_N_READ_CAP_BYTES    (8u)
101
102 #define SKD_N_INTERNAL_BYTES    (512u)
103
104 /* 5 bits of uniqifier, 0xF800 */
105 #define SKD_ID_INCR             (0x400)
106 #define SKD_ID_TABLE_MASK       (3u << 8u)
107 #define  SKD_ID_RW_REQUEST      (0u << 8u)
108 #define  SKD_ID_INTERNAL        (1u << 8u)
109 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
110 #define  SKD_ID_FIT_MSG         (3u << 8u)
111 #define SKD_ID_SLOT_MASK        0x00FFu
112 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
113
114 #define SKD_N_TIMEOUT_SLOT      4u
115 #define SKD_TIMEOUT_SLOT_MASK   3u
116
117 #define SKD_N_MAX_SECTORS 2048u
118
119 #define SKD_MAX_RETRIES 2u
120
121 #define SKD_TIMER_SECONDS(seconds) (seconds)
122 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
123
124 #define INQ_STD_NBYTES 36
125
126 enum skd_drvr_state {
127         SKD_DRVR_STATE_LOAD,
128         SKD_DRVR_STATE_IDLE,
129         SKD_DRVR_STATE_BUSY,
130         SKD_DRVR_STATE_STARTING,
131         SKD_DRVR_STATE_ONLINE,
132         SKD_DRVR_STATE_PAUSING,
133         SKD_DRVR_STATE_PAUSED,
134         SKD_DRVR_STATE_DRAINING_TIMEOUT,
135         SKD_DRVR_STATE_RESTARTING,
136         SKD_DRVR_STATE_RESUMING,
137         SKD_DRVR_STATE_STOPPING,
138         SKD_DRVR_STATE_FAULT,
139         SKD_DRVR_STATE_DISAPPEARED,
140         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
141         SKD_DRVR_STATE_BUSY_ERASE,
142         SKD_DRVR_STATE_BUSY_SANITIZE,
143         SKD_DRVR_STATE_BUSY_IMMINENT,
144         SKD_DRVR_STATE_WAIT_BOOT,
145         SKD_DRVR_STATE_SYNCING,
146 };
147
148 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
149 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
150 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
151 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
152 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
153 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
154 #define SKD_START_WAIT_SECONDS  90u
155
156 enum skd_req_state {
157         SKD_REQ_STATE_IDLE,
158         SKD_REQ_STATE_SETUP,
159         SKD_REQ_STATE_BUSY,
160         SKD_REQ_STATE_COMPLETED,
161         SKD_REQ_STATE_TIMEOUT,
162         SKD_REQ_STATE_ABORTED,
163 };
164
165 enum skd_fit_msg_state {
166         SKD_MSG_STATE_IDLE,
167         SKD_MSG_STATE_BUSY,
168 };
169
170 enum skd_check_status_action {
171         SKD_CHECK_STATUS_REPORT_GOOD,
172         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
173         SKD_CHECK_STATUS_REQUEUE_REQUEST,
174         SKD_CHECK_STATUS_REPORT_ERROR,
175         SKD_CHECK_STATUS_BUSY_IMMINENT,
176 };
177
178 struct skd_fitmsg_context {
179         enum skd_fit_msg_state state;
180
181         struct skd_fitmsg_context *next;
182
183         u32 id;
184         u16 outstanding;
185
186         u32 length;
187         u32 offset;
188
189         u8 *msg_buf;
190         dma_addr_t mb_dma_address;
191 };
192
193 struct skd_request_context {
194         enum skd_req_state state;
195
196         struct skd_request_context *next;
197
198         u16 id;
199         u32 fitmsg_id;
200
201         struct request *req;
202         u8 flush_cmd;
203
204         u32 timeout_stamp;
205         u8 sg_data_dir;
206         struct scatterlist *sg;
207         u32 n_sg;
208         u32 sg_byte_count;
209
210         struct fit_sg_descriptor *sksg_list;
211         dma_addr_t sksg_dma_address;
212
213         struct fit_completion_entry_v1 completion;
214
215         struct fit_comp_error_info err_info;
216
217 };
218 #define SKD_DATA_DIR_HOST_TO_CARD       1
219 #define SKD_DATA_DIR_CARD_TO_HOST       2
220
221 struct skd_special_context {
222         struct skd_request_context req;
223
224         u8 orphaned;
225
226         void *data_buf;
227         dma_addr_t db_dma_address;
228
229         u8 *msg_buf;
230         dma_addr_t mb_dma_address;
231 };
232
233 struct skd_sg_io {
234         fmode_t mode;
235         void __user *argp;
236
237         struct sg_io_hdr sg;
238
239         u8 cdb[16];
240
241         u32 dxfer_len;
242         u32 iovcnt;
243         struct sg_iovec *iov;
244         struct sg_iovec no_iov_iov;
245
246         struct skd_special_context *skspcl;
247 };
248
249 typedef enum skd_irq_type {
250         SKD_IRQ_LEGACY,
251         SKD_IRQ_MSI,
252         SKD_IRQ_MSIX
253 } skd_irq_type_t;
254
255 #define SKD_MAX_BARS                    2
256
257 struct skd_device {
258         volatile void __iomem *mem_map[SKD_MAX_BARS];
259         resource_size_t mem_phys[SKD_MAX_BARS];
260         u32 mem_size[SKD_MAX_BARS];
261
262         struct skd_msix_entry *msix_entries;
263
264         struct pci_dev *pdev;
265         int pcie_error_reporting_is_enabled;
266
267         spinlock_t lock;
268         struct gendisk *disk;
269         struct request_queue *queue;
270         struct device *class_dev;
271         int gendisk_on;
272         int sync_done;
273
274         atomic_t device_count;
275         u32 devno;
276         u32 major;
277         char name[32];
278         char isr_name[30];
279
280         enum skd_drvr_state state;
281         u32 drive_state;
282
283         u32 in_flight;
284         u32 cur_max_queue_depth;
285         u32 queue_low_water_mark;
286         u32 dev_max_queue_depth;
287
288         u32 num_fitmsg_context;
289         u32 num_req_context;
290
291         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
292         u32 timeout_stamp;
293         struct skd_fitmsg_context *skmsg_free_list;
294         struct skd_fitmsg_context *skmsg_table;
295
296         struct skd_request_context *skreq_free_list;
297         struct skd_request_context *skreq_table;
298
299         struct skd_special_context *skspcl_free_list;
300         struct skd_special_context *skspcl_table;
301
302         struct skd_special_context internal_skspcl;
303         u32 read_cap_blocksize;
304         u32 read_cap_last_lba;
305         int read_cap_is_valid;
306         int inquiry_is_valid;
307         u8 inq_serial_num[13];  /*12 chars plus null term */
308         u8 id_str[80];          /* holds a composite name (pci + sernum) */
309
310         u8 skcomp_cycle;
311         u32 skcomp_ix;
312         struct fit_completion_entry_v1 *skcomp_table;
313         struct fit_comp_error_info *skerr_table;
314         dma_addr_t cq_dma_address;
315
316         wait_queue_head_t waitq;
317
318         struct timer_list timer;
319         u32 timer_countdown;
320         u32 timer_substate;
321
322         int n_special;
323         int sgs_per_request;
324         u32 last_mtd;
325
326         u32 proto_ver;
327
328         int dbg_level;
329         u32 connect_time_stamp;
330         int connect_retries;
331 #define SKD_MAX_CONNECT_RETRIES 16
332         u32 drive_jiffies;
333
334         u32 timo_slot;
335
336         struct work_struct completion_worker;
337 };
338
339 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
340 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
341 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
342
343 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
344 {
345         u32 val;
346
347         if (likely(skdev->dbg_level < 2))
348                 return readl(skdev->mem_map[1] + offset);
349         else {
350                 barrier();
351                 val = readl(skdev->mem_map[1] + offset);
352                 barrier();
353                 pr_debug("%s:%s:%d offset %x = %x\n",
354                          skdev->name, __func__, __LINE__, offset, val);
355                 return val;
356         }
357
358 }
359
360 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
361                                    u32 offset)
362 {
363         if (likely(skdev->dbg_level < 2)) {
364                 writel(val, skdev->mem_map[1] + offset);
365                 barrier();
366         } else {
367                 barrier();
368                 writel(val, skdev->mem_map[1] + offset);
369                 barrier();
370                 pr_debug("%s:%s:%d offset %x = %x\n",
371                          skdev->name, __func__, __LINE__, offset, val);
372         }
373 }
374
375 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
376                                    u32 offset)
377 {
378         if (likely(skdev->dbg_level < 2)) {
379                 writeq(val, skdev->mem_map[1] + offset);
380                 barrier();
381         } else {
382                 barrier();
383                 writeq(val, skdev->mem_map[1] + offset);
384                 barrier();
385                 pr_debug("%s:%s:%d offset %x = %016llx\n",
386                          skdev->name, __func__, __LINE__, offset, val);
387         }
388 }
389
390
391 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
392 static int skd_isr_type = SKD_IRQ_DEFAULT;
393
394 module_param(skd_isr_type, int, 0444);
395 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
396                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
397
398 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
399 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
400
401 module_param(skd_max_req_per_msg, int, 0444);
402 MODULE_PARM_DESC(skd_max_req_per_msg,
403                  "Maximum SCSI requests packed in a single message."
404                  " (1-14, default==1)");
405
406 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
407 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
408 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
409
410 module_param(skd_max_queue_depth, int, 0444);
411 MODULE_PARM_DESC(skd_max_queue_depth,
412                  "Maximum SCSI requests issued to s1120."
413                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
414
415 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
416 module_param(skd_sgs_per_request, int, 0444);
417 MODULE_PARM_DESC(skd_sgs_per_request,
418                  "Maximum SG elements per block request."
419                  " (1-4096, default==256)");
420
421 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
422 module_param(skd_max_pass_thru, int, 0444);
423 MODULE_PARM_DESC(skd_max_pass_thru,
424                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
425
426 module_param(skd_dbg_level, int, 0444);
427 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
428
429 module_param(skd_isr_comp_limit, int, 0444);
430 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
431
432 /* Major device number dynamically assigned. */
433 static u32 skd_major;
434
435 static void skd_destruct(struct skd_device *skdev);
436 static const struct block_device_operations skd_blockdev_ops;
437 static void skd_send_fitmsg(struct skd_device *skdev,
438                             struct skd_fitmsg_context *skmsg);
439 static void skd_send_special_fitmsg(struct skd_device *skdev,
440                                     struct skd_special_context *skspcl);
441 static void skd_request_fn(struct request_queue *rq);
442 static void skd_end_request(struct skd_device *skdev,
443                 struct skd_request_context *skreq, blk_status_t status);
444 static bool skd_preop_sg_list(struct skd_device *skdev,
445                              struct skd_request_context *skreq);
446 static void skd_postop_sg_list(struct skd_device *skdev,
447                                struct skd_request_context *skreq);
448
449 static void skd_restart_device(struct skd_device *skdev);
450 static int skd_quiesce_dev(struct skd_device *skdev);
451 static int skd_unquiesce_dev(struct skd_device *skdev);
452 static void skd_release_special(struct skd_device *skdev,
453                                 struct skd_special_context *skspcl);
454 static void skd_disable_interrupts(struct skd_device *skdev);
455 static void skd_isr_fwstate(struct skd_device *skdev);
456 static void skd_recover_requests(struct skd_device *skdev, int requeue);
457 static void skd_soft_reset(struct skd_device *skdev);
458
459 static const char *skd_name(struct skd_device *skdev);
460 const char *skd_drive_state_to_str(int state);
461 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
462 static void skd_log_skdev(struct skd_device *skdev, const char *event);
463 static void skd_log_skmsg(struct skd_device *skdev,
464                           struct skd_fitmsg_context *skmsg, const char *event);
465 static void skd_log_skreq(struct skd_device *skdev,
466                           struct skd_request_context *skreq, const char *event);
467
468 /*
469  *****************************************************************************
470  * READ/WRITE REQUESTS
471  *****************************************************************************
472  */
473 static void skd_fail_all_pending(struct skd_device *skdev)
474 {
475         struct request_queue *q = skdev->queue;
476         struct request *req;
477
478         for (;; ) {
479                 req = blk_peek_request(q);
480                 if (req == NULL)
481                         break;
482                 blk_start_request(req);
483                 __blk_end_request_all(req, BLK_STS_IOERR);
484         }
485 }
486
487 static void
488 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
489                 int data_dir, unsigned lba,
490                 unsigned count)
491 {
492         if (data_dir == READ)
493                 scsi_req->cdb[0] = 0x28;
494         else
495                 scsi_req->cdb[0] = 0x2a;
496
497         scsi_req->cdb[1] = 0;
498         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
499         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
500         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
501         scsi_req->cdb[5] = (lba & 0xff);
502         scsi_req->cdb[6] = 0;
503         scsi_req->cdb[7] = (count & 0xff00) >> 8;
504         scsi_req->cdb[8] = count & 0xff;
505         scsi_req->cdb[9] = 0;
506 }
507
508 static void
509 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
510                             struct skd_request_context *skreq)
511 {
512         skreq->flush_cmd = 1;
513
514         scsi_req->cdb[0] = 0x35;
515         scsi_req->cdb[1] = 0;
516         scsi_req->cdb[2] = 0;
517         scsi_req->cdb[3] = 0;
518         scsi_req->cdb[4] = 0;
519         scsi_req->cdb[5] = 0;
520         scsi_req->cdb[6] = 0;
521         scsi_req->cdb[7] = 0;
522         scsi_req->cdb[8] = 0;
523         scsi_req->cdb[9] = 0;
524 }
525
526 static void skd_request_fn_not_online(struct request_queue *q);
527
528 static void skd_request_fn(struct request_queue *q)
529 {
530         struct skd_device *skdev = q->queuedata;
531         struct skd_fitmsg_context *skmsg = NULL;
532         struct fit_msg_hdr *fmh = NULL;
533         struct skd_request_context *skreq;
534         struct request *req = NULL;
535         struct skd_scsi_request *scsi_req;
536         unsigned long io_flags;
537         u32 lba;
538         u32 count;
539         int data_dir;
540         u32 be_lba;
541         u32 be_count;
542         u64 be_dmaa;
543         u64 cmdctxt;
544         u32 timo_slot;
545         void *cmd_ptr;
546         int flush, fua;
547
548         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
549                 skd_request_fn_not_online(q);
550                 return;
551         }
552
553         if (blk_queue_stopped(skdev->queue)) {
554                 if (skdev->skmsg_free_list == NULL ||
555                     skdev->skreq_free_list == NULL ||
556                     skdev->in_flight >= skdev->queue_low_water_mark)
557                         /* There is still some kind of shortage */
558                         return;
559
560                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
561         }
562
563         /*
564          * Stop conditions:
565          *  - There are no more native requests
566          *  - There are already the maximum number of requests in progress
567          *  - There are no more skd_request_context entries
568          *  - There are no more FIT msg buffers
569          */
570         for (;; ) {
571
572                 flush = fua = 0;
573
574                 req = blk_peek_request(q);
575
576                 /* Are there any native requests to start? */
577                 if (req == NULL)
578                         break;
579
580                 lba = (u32)blk_rq_pos(req);
581                 count = blk_rq_sectors(req);
582                 data_dir = rq_data_dir(req);
583                 io_flags = req->cmd_flags;
584
585                 if (req_op(req) == REQ_OP_FLUSH)
586                         flush++;
587
588                 if (io_flags & REQ_FUA)
589                         fua++;
590
591                 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
592                          "count=%u(0x%x) dir=%d\n",
593                          skdev->name, __func__, __LINE__,
594                          req, lba, lba, count, count, data_dir);
595
596                 /* At this point we know there is a request */
597
598                 /* Are too many requets already in progress? */
599                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
600                         pr_debug("%s:%s:%d qdepth %d, limit %d\n",
601                                  skdev->name, __func__, __LINE__,
602                                  skdev->in_flight, skdev->cur_max_queue_depth);
603                         break;
604                 }
605
606                 /* Is a skd_request_context available? */
607                 skreq = skdev->skreq_free_list;
608                 if (skreq == NULL) {
609                         pr_debug("%s:%s:%d Out of req=%p\n",
610                                  skdev->name, __func__, __LINE__, q);
611                         break;
612                 }
613                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
614                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
615
616                 /* Now we check to see if we can get a fit msg */
617                 if (skmsg == NULL) {
618                         if (skdev->skmsg_free_list == NULL) {
619                                 pr_debug("%s:%s:%d Out of msg\n",
620                                          skdev->name, __func__, __LINE__);
621                                 break;
622                         }
623                 }
624
625                 skreq->flush_cmd = 0;
626                 skreq->n_sg = 0;
627                 skreq->sg_byte_count = 0;
628
629                 /*
630                  * OK to now dequeue request from q.
631                  *
632                  * At this point we are comitted to either start or reject
633                  * the native request. Note that skd_request_context is
634                  * available but is still at the head of the free list.
635                  */
636                 blk_start_request(req);
637                 skreq->req = req;
638                 skreq->fitmsg_id = 0;
639
640                 /* Either a FIT msg is in progress or we have to start one. */
641                 if (skmsg == NULL) {
642                         /* Are there any FIT msg buffers available? */
643                         skmsg = skdev->skmsg_free_list;
644                         if (skmsg == NULL) {
645                                 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
646                                          skdev->name, __func__, __LINE__,
647                                          skdev);
648                                 break;
649                         }
650                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
651                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
652
653                         skdev->skmsg_free_list = skmsg->next;
654
655                         skmsg->state = SKD_MSG_STATE_BUSY;
656                         skmsg->id += SKD_ID_INCR;
657
658                         /* Initialize the FIT msg header */
659                         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
660                         memset(fmh, 0, sizeof(*fmh));
661                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
662                         skmsg->length = sizeof(*fmh);
663                 }
664
665                 skreq->fitmsg_id = skmsg->id;
666
667                 /*
668                  * Note that a FIT msg may have just been started
669                  * but contains no SoFIT requests yet.
670                  */
671
672                 /*
673                  * Transcode the request, checking as we go. The outcome of
674                  * the transcoding is represented by the error variable.
675                  */
676                 cmd_ptr = &skmsg->msg_buf[skmsg->length];
677                 memset(cmd_ptr, 0, 32);
678
679                 be_lba = cpu_to_be32(lba);
680                 be_count = cpu_to_be32(count);
681                 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
682                 cmdctxt = skreq->id + SKD_ID_INCR;
683
684                 scsi_req = cmd_ptr;
685                 scsi_req->hdr.tag = cmdctxt;
686                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
687
688                 if (data_dir == READ)
689                         skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
690                 else
691                         skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
692
693                 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
694                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
695                         SKD_ASSERT(skreq->flush_cmd == 1);
696                 } else {
697                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
698                 }
699
700                 if (fua)
701                         scsi_req->cdb[1] |= SKD_FUA_NV;
702
703                 if (!req->bio)
704                         goto skip_sg;
705
706                 if (!skd_preop_sg_list(skdev, skreq)) {
707                         /*
708                          * Complete the native request with error.
709                          * Note that the request context is still at the
710                          * head of the free list, and that the SoFIT request
711                          * was encoded into the FIT msg buffer but the FIT
712                          * msg length has not been updated. In short, the
713                          * only resource that has been allocated but might
714                          * not be used is that the FIT msg could be empty.
715                          */
716                         pr_debug("%s:%s:%d error Out\n",
717                                  skdev->name, __func__, __LINE__);
718                         skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
719                         continue;
720                 }
721
722 skip_sg:
723                 scsi_req->hdr.sg_list_len_bytes =
724                         cpu_to_be32(skreq->sg_byte_count);
725
726                 /* Complete resource allocations. */
727                 skdev->skreq_free_list = skreq->next;
728                 skreq->state = SKD_REQ_STATE_BUSY;
729                 skreq->id += SKD_ID_INCR;
730
731                 skmsg->length += sizeof(struct skd_scsi_request);
732                 fmh->num_protocol_cmds_coalesced++;
733
734                 /*
735                  * Update the active request counts.
736                  * Capture the timeout timestamp.
737                  */
738                 skreq->timeout_stamp = skdev->timeout_stamp;
739                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
740                 skdev->timeout_slot[timo_slot]++;
741                 skdev->in_flight++;
742                 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
743                          skdev->name, __func__, __LINE__,
744                          skreq->id, skdev->in_flight);
745
746                 /*
747                  * If the FIT msg buffer is full send it.
748                  */
749                 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
750                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
751                         skd_send_fitmsg(skdev, skmsg);
752                         skmsg = NULL;
753                         fmh = NULL;
754                 }
755         }
756
757         /*
758          * Is a FIT msg in progress? If it is empty put the buffer back
759          * on the free list. If it is non-empty send what we got.
760          * This minimizes latency when there are fewer requests than
761          * what fits in a FIT msg.
762          */
763         if (skmsg != NULL) {
764                 /* Bigger than just a FIT msg header? */
765                 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
766                         pr_debug("%s:%s:%d sending msg=%p, len %d\n",
767                                  skdev->name, __func__, __LINE__,
768                                  skmsg, skmsg->length);
769                         skd_send_fitmsg(skdev, skmsg);
770                 } else {
771                         /*
772                          * The FIT msg is empty. It means we got started
773                          * on the msg, but the requests were rejected.
774                          */
775                         skmsg->state = SKD_MSG_STATE_IDLE;
776                         skmsg->id += SKD_ID_INCR;
777                         skmsg->next = skdev->skmsg_free_list;
778                         skdev->skmsg_free_list = skmsg;
779                 }
780                 skmsg = NULL;
781                 fmh = NULL;
782         }
783
784         /*
785          * If req is non-NULL it means there is something to do but
786          * we are out of a resource.
787          */
788         if (req)
789                 blk_stop_queue(skdev->queue);
790 }
791
792 static void skd_end_request(struct skd_device *skdev,
793                 struct skd_request_context *skreq, blk_status_t error)
794 {
795         if (unlikely(error)) {
796                 struct request *req = skreq->req;
797                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
798                 u32 lba = (u32)blk_rq_pos(req);
799                 u32 count = blk_rq_sectors(req);
800
801                 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
802                        skd_name(skdev), cmd, lba, count, skreq->id);
803         } else
804                 pr_debug("%s:%s:%d id=0x%x error=%d\n",
805                          skdev->name, __func__, __LINE__, skreq->id, error);
806
807         __blk_end_request_all(skreq->req, error);
808 }
809
810 static bool skd_preop_sg_list(struct skd_device *skdev,
811                              struct skd_request_context *skreq)
812 {
813         struct request *req = skreq->req;
814         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
815         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
816         struct scatterlist *sg = &skreq->sg[0];
817         int n_sg;
818         int i;
819
820         skreq->sg_byte_count = 0;
821
822         /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
823                    skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
824
825         n_sg = blk_rq_map_sg(skdev->queue, req, sg);
826         if (n_sg <= 0)
827                 return false;
828
829         /*
830          * Map scatterlist to PCI bus addresses.
831          * Note PCI might change the number of entries.
832          */
833         n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
834         if (n_sg <= 0)
835                 return false;
836
837         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
838
839         skreq->n_sg = n_sg;
840
841         for (i = 0; i < n_sg; i++) {
842                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
843                 u32 cnt = sg_dma_len(&sg[i]);
844                 uint64_t dma_addr = sg_dma_address(&sg[i]);
845
846                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
847                 sgd->byte_count = cnt;
848                 skreq->sg_byte_count += cnt;
849                 sgd->host_side_addr = dma_addr;
850                 sgd->dev_side_addr = 0;
851         }
852
853         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
854         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
855
856         if (unlikely(skdev->dbg_level > 1)) {
857                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
858                          skdev->name, __func__, __LINE__,
859                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
860                 for (i = 0; i < n_sg; i++) {
861                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
862                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
863                                  "addr=0x%llx next=0x%llx\n",
864                                  skdev->name, __func__, __LINE__,
865                                  i, sgd->byte_count, sgd->control,
866                                  sgd->host_side_addr, sgd->next_desc_ptr);
867                 }
868         }
869
870         return true;
871 }
872
873 static void skd_postop_sg_list(struct skd_device *skdev,
874                                struct skd_request_context *skreq)
875 {
876         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
877         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
878
879         /*
880          * restore the next ptr for next IO request so we
881          * don't have to set it every time.
882          */
883         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
884                 skreq->sksg_dma_address +
885                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
886         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
887 }
888
889 static void skd_request_fn_not_online(struct request_queue *q)
890 {
891         struct skd_device *skdev = q->queuedata;
892         int error;
893
894         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
895
896         skd_log_skdev(skdev, "req_not_online");
897         switch (skdev->state) {
898         case SKD_DRVR_STATE_PAUSING:
899         case SKD_DRVR_STATE_PAUSED:
900         case SKD_DRVR_STATE_STARTING:
901         case SKD_DRVR_STATE_RESTARTING:
902         case SKD_DRVR_STATE_WAIT_BOOT:
903         /* In case of starting, we haven't started the queue,
904          * so we can't get here... but requests are
905          * possibly hanging out waiting for us because we
906          * reported the dev/skd0 already.  They'll wait
907          * forever if connect doesn't complete.
908          * What to do??? delay dev/skd0 ??
909          */
910         case SKD_DRVR_STATE_BUSY:
911         case SKD_DRVR_STATE_BUSY_IMMINENT:
912         case SKD_DRVR_STATE_BUSY_ERASE:
913         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
914                 return;
915
916         case SKD_DRVR_STATE_BUSY_SANITIZE:
917         case SKD_DRVR_STATE_STOPPING:
918         case SKD_DRVR_STATE_SYNCING:
919         case SKD_DRVR_STATE_FAULT:
920         case SKD_DRVR_STATE_DISAPPEARED:
921         default:
922                 error = -EIO;
923                 break;
924         }
925
926         /* If we get here, terminate all pending block requeusts
927          * with EIO and any scsi pass thru with appropriate sense
928          */
929
930         skd_fail_all_pending(skdev);
931 }
932
933 /*
934  *****************************************************************************
935  * TIMER
936  *****************************************************************************
937  */
938
939 static void skd_timer_tick_not_online(struct skd_device *skdev);
940
941 static void skd_timer_tick(ulong arg)
942 {
943         struct skd_device *skdev = (struct skd_device *)arg;
944
945         u32 timo_slot;
946         u32 overdue_timestamp;
947         unsigned long reqflags;
948         u32 state;
949
950         if (skdev->state == SKD_DRVR_STATE_FAULT)
951                 /* The driver has declared fault, and we want it to
952                  * stay that way until driver is reloaded.
953                  */
954                 return;
955
956         spin_lock_irqsave(&skdev->lock, reqflags);
957
958         state = SKD_READL(skdev, FIT_STATUS);
959         state &= FIT_SR_DRIVE_STATE_MASK;
960         if (state != skdev->drive_state)
961                 skd_isr_fwstate(skdev);
962
963         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
964                 skd_timer_tick_not_online(skdev);
965                 goto timer_func_out;
966         }
967         skdev->timeout_stamp++;
968         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
969
970         /*
971          * All requests that happened during the previous use of
972          * this slot should be done by now. The previous use was
973          * over 7 seconds ago.
974          */
975         if (skdev->timeout_slot[timo_slot] == 0)
976                 goto timer_func_out;
977
978         /* Something is overdue */
979         overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
980
981         pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
982                  skdev->name, __func__, __LINE__,
983                  skdev->timeout_slot[timo_slot], skdev->in_flight);
984         pr_err("(%s): Overdue IOs (%d), busy %d\n",
985                skd_name(skdev), skdev->timeout_slot[timo_slot],
986                skdev->in_flight);
987
988         skdev->timer_countdown = SKD_DRAINING_TIMO;
989         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
990         skdev->timo_slot = timo_slot;
991         blk_stop_queue(skdev->queue);
992
993 timer_func_out:
994         mod_timer(&skdev->timer, (jiffies + HZ));
995
996         spin_unlock_irqrestore(&skdev->lock, reqflags);
997 }
998
999 static void skd_timer_tick_not_online(struct skd_device *skdev)
1000 {
1001         switch (skdev->state) {
1002         case SKD_DRVR_STATE_IDLE:
1003         case SKD_DRVR_STATE_LOAD:
1004                 break;
1005         case SKD_DRVR_STATE_BUSY_SANITIZE:
1006                 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1007                          skdev->name, __func__, __LINE__,
1008                          skdev->drive_state, skdev->state);
1009                 /* If we've been in sanitize for 3 seconds, we figure we're not
1010                  * going to get anymore completions, so recover requests now
1011                  */
1012                 if (skdev->timer_countdown > 0) {
1013                         skdev->timer_countdown--;
1014                         return;
1015                 }
1016                 skd_recover_requests(skdev, 0);
1017                 break;
1018
1019         case SKD_DRVR_STATE_BUSY:
1020         case SKD_DRVR_STATE_BUSY_IMMINENT:
1021         case SKD_DRVR_STATE_BUSY_ERASE:
1022                 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1023                          skdev->name, __func__, __LINE__,
1024                          skdev->state, skdev->timer_countdown);
1025                 if (skdev->timer_countdown > 0) {
1026                         skdev->timer_countdown--;
1027                         return;
1028                 }
1029                 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1030                          skdev->name, __func__, __LINE__,
1031                          skdev->state, skdev->timer_countdown);
1032                 skd_restart_device(skdev);
1033                 break;
1034
1035         case SKD_DRVR_STATE_WAIT_BOOT:
1036         case SKD_DRVR_STATE_STARTING:
1037                 if (skdev->timer_countdown > 0) {
1038                         skdev->timer_countdown--;
1039                         return;
1040                 }
1041                 /* For now, we fault the drive.  Could attempt resets to
1042                  * revcover at some point. */
1043                 skdev->state = SKD_DRVR_STATE_FAULT;
1044
1045                 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1046                        skd_name(skdev), skdev->drive_state);
1047
1048                 /*start the queue so we can respond with error to requests */
1049                 /* wakeup anyone waiting for startup complete */
1050                 blk_start_queue(skdev->queue);
1051                 skdev->gendisk_on = -1;
1052                 wake_up_interruptible(&skdev->waitq);
1053                 break;
1054
1055         case SKD_DRVR_STATE_ONLINE:
1056                 /* shouldn't get here. */
1057                 break;
1058
1059         case SKD_DRVR_STATE_PAUSING:
1060         case SKD_DRVR_STATE_PAUSED:
1061                 break;
1062
1063         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1064                 pr_debug("%s:%s:%d "
1065                          "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1066                          skdev->name, __func__, __LINE__,
1067                          skdev->timo_slot,
1068                          skdev->timer_countdown,
1069                          skdev->in_flight,
1070                          skdev->timeout_slot[skdev->timo_slot]);
1071                 /* if the slot has cleared we can let the I/O continue */
1072                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1073                         pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1074                                  skdev->name, __func__, __LINE__);
1075                         skdev->state = SKD_DRVR_STATE_ONLINE;
1076                         blk_start_queue(skdev->queue);
1077                         return;
1078                 }
1079                 if (skdev->timer_countdown > 0) {
1080                         skdev->timer_countdown--;
1081                         return;
1082                 }
1083                 skd_restart_device(skdev);
1084                 break;
1085
1086         case SKD_DRVR_STATE_RESTARTING:
1087                 if (skdev->timer_countdown > 0) {
1088                         skdev->timer_countdown--;
1089                         return;
1090                 }
1091                 /* For now, we fault the drive. Could attempt resets to
1092                  * revcover at some point. */
1093                 skdev->state = SKD_DRVR_STATE_FAULT;
1094                 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1095                        skd_name(skdev), skdev->drive_state);
1096
1097                 /*
1098                  * Recovering does two things:
1099                  * 1. completes IO with error
1100                  * 2. reclaims dma resources
1101                  * When is it safe to recover requests?
1102                  * - if the drive state is faulted
1103                  * - if the state is still soft reset after out timeout
1104                  * - if the drive registers are dead (state = FF)
1105                  * If it is "unsafe", we still need to recover, so we will
1106                  * disable pci bus mastering and disable our interrupts.
1107                  */
1108
1109                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1110                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1111                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1112                         /* It never came out of soft reset. Try to
1113                          * recover the requests and then let them
1114                          * fail. This is to mitigate hung processes. */
1115                         skd_recover_requests(skdev, 0);
1116                 else {
1117                         pr_err("(%s): Disable BusMaster (%x)\n",
1118                                skd_name(skdev), skdev->drive_state);
1119                         pci_disable_device(skdev->pdev);
1120                         skd_disable_interrupts(skdev);
1121                         skd_recover_requests(skdev, 0);
1122                 }
1123
1124                 /*start the queue so we can respond with error to requests */
1125                 /* wakeup anyone waiting for startup complete */
1126                 blk_start_queue(skdev->queue);
1127                 skdev->gendisk_on = -1;
1128                 wake_up_interruptible(&skdev->waitq);
1129                 break;
1130
1131         case SKD_DRVR_STATE_RESUMING:
1132         case SKD_DRVR_STATE_STOPPING:
1133         case SKD_DRVR_STATE_SYNCING:
1134         case SKD_DRVR_STATE_FAULT:
1135         case SKD_DRVR_STATE_DISAPPEARED:
1136         default:
1137                 break;
1138         }
1139 }
1140
1141 static int skd_start_timer(struct skd_device *skdev)
1142 {
1143         int rc;
1144
1145         init_timer(&skdev->timer);
1146         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1147
1148         rc = mod_timer(&skdev->timer, (jiffies + HZ));
1149         if (rc)
1150                 pr_err("%s: failed to start timer %d\n",
1151                        __func__, rc);
1152         return rc;
1153 }
1154
1155 static void skd_kill_timer(struct skd_device *skdev)
1156 {
1157         del_timer_sync(&skdev->timer);
1158 }
1159
1160 /*
1161  *****************************************************************************
1162  * IOCTL
1163  *****************************************************************************
1164  */
1165 static int skd_ioctl_sg_io(struct skd_device *skdev,
1166                            fmode_t mode, void __user *argp);
1167 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1168                                         struct skd_sg_io *sksgio);
1169 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1170                                    struct skd_sg_io *sksgio);
1171 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1172                                     struct skd_sg_io *sksgio);
1173 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1174                                  struct skd_sg_io *sksgio, int dxfer_dir);
1175 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1176                                  struct skd_sg_io *sksgio);
1177 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1178 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1179                                     struct skd_sg_io *sksgio);
1180 static int skd_sg_io_put_status(struct skd_device *skdev,
1181                                 struct skd_sg_io *sksgio);
1182
1183 static void skd_complete_special(struct skd_device *skdev,
1184                                  volatile struct fit_completion_entry_v1
1185                                  *skcomp,
1186                                  volatile struct fit_comp_error_info *skerr,
1187                                  struct skd_special_context *skspcl);
1188
1189 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1190                           uint cmd_in, ulong arg)
1191 {
1192         static const int sg_version_num = 30527;
1193         int rc = 0, timeout;
1194         struct gendisk *disk = bdev->bd_disk;
1195         struct skd_device *skdev = disk->private_data;
1196         int __user *p = (int __user *)arg;
1197
1198         pr_debug("%s:%s:%d %s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1199                  skdev->name, __func__, __LINE__,
1200                  disk->disk_name, current->comm, mode, cmd_in, arg);
1201
1202         if (!capable(CAP_SYS_ADMIN))
1203                 return -EPERM;
1204
1205         switch (cmd_in) {
1206         case SG_SET_TIMEOUT:
1207                 rc = get_user(timeout, p);
1208                 if (!rc)
1209                         disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1210                 break;
1211         case SG_GET_TIMEOUT:
1212                 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1213                 break;
1214         case SG_GET_VERSION_NUM:
1215                 rc = put_user(sg_version_num, p);
1216                 break;
1217         case SG_IO:
1218                 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1219                 break;
1220
1221         default:
1222                 rc = -ENOTTY;
1223                 break;
1224         }
1225
1226         pr_debug("%s:%s:%d %s:  completion rc %d\n",
1227                  skdev->name, __func__, __LINE__, disk->disk_name, rc);
1228         return rc;
1229 }
1230
1231 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1232                            void __user *argp)
1233 {
1234         int rc;
1235         struct skd_sg_io sksgio;
1236
1237         memset(&sksgio, 0, sizeof(sksgio));
1238         sksgio.mode = mode;
1239         sksgio.argp = argp;
1240         sksgio.iov = &sksgio.no_iov_iov;
1241
1242         switch (skdev->state) {
1243         case SKD_DRVR_STATE_ONLINE:
1244         case SKD_DRVR_STATE_BUSY_IMMINENT:
1245                 break;
1246
1247         default:
1248                 pr_debug("%s:%s:%d drive not online\n",
1249                          skdev->name, __func__, __LINE__);
1250                 rc = -ENXIO;
1251                 goto out;
1252         }
1253
1254         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1255         if (rc)
1256                 goto out;
1257
1258         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1259         if (rc)
1260                 goto out;
1261
1262         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1263         if (rc)
1264                 goto out;
1265
1266         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1267         if (rc)
1268                 goto out;
1269
1270         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1271         if (rc)
1272                 goto out;
1273
1274         rc = skd_sg_io_await(skdev, &sksgio);
1275         if (rc)
1276                 goto out;
1277
1278         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1279         if (rc)
1280                 goto out;
1281
1282         rc = skd_sg_io_put_status(skdev, &sksgio);
1283         if (rc)
1284                 goto out;
1285
1286         rc = 0;
1287
1288 out:
1289         skd_sg_io_release_skspcl(skdev, &sksgio);
1290
1291         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1292                 kfree(sksgio.iov);
1293         return rc;
1294 }
1295
1296 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1297                                         struct skd_sg_io *sksgio)
1298 {
1299         struct sg_io_hdr *sgp = &sksgio->sg;
1300         int i, acc;
1301
1302         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1303                 pr_debug("%s:%s:%d access sg failed %p\n",
1304                          skdev->name, __func__, __LINE__, sksgio->argp);
1305                 return -EFAULT;
1306         }
1307
1308         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1309                 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1310                          skdev->name, __func__, __LINE__, sksgio->argp);
1311                 return -EFAULT;
1312         }
1313
1314         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1315                 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1316                          skdev->name, __func__, __LINE__, sgp->interface_id);
1317                 return -EINVAL;
1318         }
1319
1320         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1321                 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1322                          skdev->name, __func__, __LINE__, sgp->cmd_len);
1323                 return -EINVAL;
1324         }
1325
1326         if (sgp->iovec_count > 256) {
1327                 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1328                          skdev->name, __func__, __LINE__, sgp->iovec_count);
1329                 return -EINVAL;
1330         }
1331
1332         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1333                 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1334                          skdev->name, __func__, __LINE__, sgp->dxfer_len);
1335                 return -EINVAL;
1336         }
1337
1338         switch (sgp->dxfer_direction) {
1339         case SG_DXFER_NONE:
1340                 acc = -1;
1341                 break;
1342
1343         case SG_DXFER_TO_DEV:
1344                 acc = VERIFY_READ;
1345                 break;
1346
1347         case SG_DXFER_FROM_DEV:
1348         case SG_DXFER_TO_FROM_DEV:
1349                 acc = VERIFY_WRITE;
1350                 break;
1351
1352         default:
1353                 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1354                          skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1355                 return -EINVAL;
1356         }
1357
1358         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1359                 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1360                          skdev->name, __func__, __LINE__, sgp->cmdp);
1361                 return -EFAULT;
1362         }
1363
1364         if (sgp->mx_sb_len != 0) {
1365                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1366                         pr_debug("%s:%s:%d access sbp failed %p\n",
1367                                  skdev->name, __func__, __LINE__, sgp->sbp);
1368                         return -EFAULT;
1369                 }
1370         }
1371
1372         if (sgp->iovec_count == 0) {
1373                 sksgio->iov[0].iov_base = sgp->dxferp;
1374                 sksgio->iov[0].iov_len = sgp->dxfer_len;
1375                 sksgio->iovcnt = 1;
1376                 sksgio->dxfer_len = sgp->dxfer_len;
1377         } else {
1378                 struct sg_iovec *iov;
1379                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1380                 size_t iov_data_len;
1381
1382                 iov = kmalloc(nbytes, GFP_KERNEL);
1383                 if (iov == NULL) {
1384                         pr_debug("%s:%s:%d alloc iovec failed %d\n",
1385                                  skdev->name, __func__, __LINE__,
1386                                  sgp->iovec_count);
1387                         return -ENOMEM;
1388                 }
1389                 sksgio->iov = iov;
1390                 sksgio->iovcnt = sgp->iovec_count;
1391
1392                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1393                         pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1394                                  skdev->name, __func__, __LINE__, sgp->dxferp);
1395                         return -EFAULT;
1396                 }
1397
1398                 /*
1399                  * Sum up the vecs, making sure they don't overflow
1400                  */
1401                 iov_data_len = 0;
1402                 for (i = 0; i < sgp->iovec_count; i++) {
1403                         if (iov_data_len + iov[i].iov_len < iov_data_len)
1404                                 return -EINVAL;
1405                         iov_data_len += iov[i].iov_len;
1406                 }
1407
1408                 /* SG_IO howto says that the shorter of the two wins */
1409                 if (sgp->dxfer_len < iov_data_len) {
1410                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1411                                                      sgp->iovec_count,
1412                                                      sgp->dxfer_len);
1413                         sksgio->dxfer_len = sgp->dxfer_len;
1414                 } else
1415                         sksgio->dxfer_len = iov_data_len;
1416         }
1417
1418         if (sgp->dxfer_direction != SG_DXFER_NONE) {
1419                 struct sg_iovec *iov = sksgio->iov;
1420                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1421                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1422                                 pr_debug("%s:%s:%d access data failed %p/%d\n",
1423                                          skdev->name, __func__, __LINE__,
1424                                          iov->iov_base, (int)iov->iov_len);
1425                                 return -EFAULT;
1426                         }
1427                 }
1428         }
1429
1430         return 0;
1431 }
1432
1433 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1434                                    struct skd_sg_io *sksgio)
1435 {
1436         struct skd_special_context *skspcl = NULL;
1437         int rc;
1438
1439         for (;;) {
1440                 ulong flags;
1441
1442                 spin_lock_irqsave(&skdev->lock, flags);
1443                 skspcl = skdev->skspcl_free_list;
1444                 if (skspcl != NULL) {
1445                         skdev->skspcl_free_list =
1446                                 (struct skd_special_context *)skspcl->req.next;
1447                         skspcl->req.id += SKD_ID_INCR;
1448                         skspcl->req.state = SKD_REQ_STATE_SETUP;
1449                         skspcl->orphaned = 0;
1450                         skspcl->req.n_sg = 0;
1451                 }
1452                 spin_unlock_irqrestore(&skdev->lock, flags);
1453
1454                 if (skspcl != NULL) {
1455                         rc = 0;
1456                         break;
1457                 }
1458
1459                 pr_debug("%s:%s:%d blocking\n",
1460                          skdev->name, __func__, __LINE__);
1461
1462                 rc = wait_event_interruptible_timeout(
1463                                 skdev->waitq,
1464                                 (skdev->skspcl_free_list != NULL),
1465                                 msecs_to_jiffies(sksgio->sg.timeout));
1466
1467                 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1468                          skdev->name, __func__, __LINE__, rc);
1469
1470                 if (rc <= 0) {
1471                         if (rc == 0)
1472                                 rc = -ETIMEDOUT;
1473                         else
1474                                 rc = -EINTR;
1475                         break;
1476                 }
1477                 /*
1478                  * If we get here rc > 0 meaning the timeout to
1479                  * wait_event_interruptible_timeout() had time left, hence the
1480                  * sought event -- non-empty free list -- happened.
1481                  * Retry the allocation.
1482                  */
1483         }
1484         sksgio->skspcl = skspcl;
1485
1486         return rc;
1487 }
1488
1489 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1490                                     struct skd_request_context *skreq,
1491                                     u32 dxfer_len)
1492 {
1493         u32 resid = dxfer_len;
1494
1495         /*
1496          * The DMA engine must have aligned addresses and byte counts.
1497          */
1498         resid += (-resid) & 3;
1499         skreq->sg_byte_count = resid;
1500
1501         skreq->n_sg = 0;
1502
1503         while (resid > 0) {
1504                 u32 nbytes = PAGE_SIZE;
1505                 u32 ix = skreq->n_sg;
1506                 struct scatterlist *sg = &skreq->sg[ix];
1507                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1508                 struct page *page;
1509
1510                 if (nbytes > resid)
1511                         nbytes = resid;
1512
1513                 page = alloc_page(GFP_KERNEL);
1514                 if (page == NULL)
1515                         return -ENOMEM;
1516
1517                 sg_set_page(sg, page, nbytes, 0);
1518
1519                 /* TODO: This should be going through a pci_???()
1520                  * routine to do proper mapping. */
1521                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1522                 sksg->byte_count = nbytes;
1523
1524                 sksg->host_side_addr = sg_phys(sg);
1525
1526                 sksg->dev_side_addr = 0;
1527                 sksg->next_desc_ptr = skreq->sksg_dma_address +
1528                                       (ix + 1) * sizeof(*sksg);
1529
1530                 skreq->n_sg++;
1531                 resid -= nbytes;
1532         }
1533
1534         if (skreq->n_sg > 0) {
1535                 u32 ix = skreq->n_sg - 1;
1536                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1537
1538                 sksg->control = FIT_SGD_CONTROL_LAST;
1539                 sksg->next_desc_ptr = 0;
1540         }
1541
1542         if (unlikely(skdev->dbg_level > 1)) {
1543                 u32 i;
1544
1545                 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1546                          skdev->name, __func__, __LINE__,
1547                          skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1548                 for (i = 0; i < skreq->n_sg; i++) {
1549                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1550
1551                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
1552                                  "addr=0x%llx next=0x%llx\n",
1553                                  skdev->name, __func__, __LINE__,
1554                                  i, sgd->byte_count, sgd->control,
1555                                  sgd->host_side_addr, sgd->next_desc_ptr);
1556                 }
1557         }
1558
1559         return 0;
1560 }
1561
1562 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1563                                     struct skd_sg_io *sksgio)
1564 {
1565         struct skd_special_context *skspcl = sksgio->skspcl;
1566         struct skd_request_context *skreq = &skspcl->req;
1567         u32 dxfer_len = sksgio->dxfer_len;
1568         int rc;
1569
1570         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1571         /*
1572          * Eventually, errors or not, skd_release_special() is called
1573          * to recover allocations including partial allocations.
1574          */
1575         return rc;
1576 }
1577
1578 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1579                                  struct skd_sg_io *sksgio, int dxfer_dir)
1580 {
1581         struct skd_special_context *skspcl = sksgio->skspcl;
1582         u32 iov_ix = 0;
1583         struct sg_iovec curiov;
1584         u32 sksg_ix = 0;
1585         u8 *bufp = NULL;
1586         u32 buf_len = 0;
1587         u32 resid = sksgio->dxfer_len;
1588         int rc;
1589
1590         curiov.iov_len = 0;
1591         curiov.iov_base = NULL;
1592
1593         if (dxfer_dir != sksgio->sg.dxfer_direction) {
1594                 if (dxfer_dir != SG_DXFER_TO_DEV ||
1595                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1596                         return 0;
1597         }
1598
1599         while (resid > 0) {
1600                 u32 nbytes = PAGE_SIZE;
1601
1602                 if (curiov.iov_len == 0) {
1603                         curiov = sksgio->iov[iov_ix++];
1604                         continue;
1605                 }
1606
1607                 if (buf_len == 0) {
1608                         struct page *page;
1609                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
1610                         bufp = page_address(page);
1611                         buf_len = PAGE_SIZE;
1612                 }
1613
1614                 nbytes = min_t(u32, nbytes, resid);
1615                 nbytes = min_t(u32, nbytes, curiov.iov_len);
1616                 nbytes = min_t(u32, nbytes, buf_len);
1617
1618                 if (dxfer_dir == SG_DXFER_TO_DEV)
1619                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1620                 else
1621                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1622
1623                 if (rc)
1624                         return -EFAULT;
1625
1626                 resid -= nbytes;
1627                 curiov.iov_len -= nbytes;
1628                 curiov.iov_base += nbytes;
1629                 buf_len -= nbytes;
1630         }
1631
1632         return 0;
1633 }
1634
1635 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1636                                  struct skd_sg_io *sksgio)
1637 {
1638         struct skd_special_context *skspcl = sksgio->skspcl;
1639         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1640         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1641
1642         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1643
1644         /* Initialize the FIT msg header */
1645         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1646         fmh->num_protocol_cmds_coalesced = 1;
1647
1648         /* Initialize the SCSI request */
1649         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1650                 scsi_req->hdr.sg_list_dma_address =
1651                         cpu_to_be64(skspcl->req.sksg_dma_address);
1652         scsi_req->hdr.tag = skspcl->req.id;
1653         scsi_req->hdr.sg_list_len_bytes =
1654                 cpu_to_be32(skspcl->req.sg_byte_count);
1655         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1656
1657         skspcl->req.state = SKD_REQ_STATE_BUSY;
1658         skd_send_special_fitmsg(skdev, skspcl);
1659
1660         return 0;
1661 }
1662
1663 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1664 {
1665         unsigned long flags;
1666         int rc;
1667
1668         rc = wait_event_interruptible_timeout(skdev->waitq,
1669                                               (sksgio->skspcl->req.state !=
1670                                                SKD_REQ_STATE_BUSY),
1671                                               msecs_to_jiffies(sksgio->sg.
1672                                                                timeout));
1673
1674         spin_lock_irqsave(&skdev->lock, flags);
1675
1676         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1677                 pr_debug("%s:%s:%d skspcl %p aborted\n",
1678                          skdev->name, __func__, __LINE__, sksgio->skspcl);
1679
1680                 /* Build check cond, sense and let command finish. */
1681                 /* For a timeout, we must fabricate completion and sense
1682                  * data to complete the command */
1683                 sksgio->skspcl->req.completion.status =
1684                         SAM_STAT_CHECK_CONDITION;
1685
1686                 memset(&sksgio->skspcl->req.err_info, 0,
1687                        sizeof(sksgio->skspcl->req.err_info));
1688                 sksgio->skspcl->req.err_info.type = 0x70;
1689                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1690                 sksgio->skspcl->req.err_info.code = 0x44;
1691                 sksgio->skspcl->req.err_info.qual = 0;
1692                 rc = 0;
1693         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1694                 /* No longer on the adapter. We finish. */
1695                 rc = 0;
1696         else {
1697                 /* Something's gone wrong. Still busy. Timeout or
1698                  * user interrupted (control-C). Mark as an orphan
1699                  * so it will be disposed when completed. */
1700                 sksgio->skspcl->orphaned = 1;
1701                 sksgio->skspcl = NULL;
1702                 if (rc == 0) {
1703                         pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1704                                  skdev->name, __func__, __LINE__,
1705                                  sksgio, sksgio->sg.timeout);
1706                         rc = -ETIMEDOUT;
1707                 } else {
1708                         pr_debug("%s:%s:%d cntlc %p\n",
1709                                  skdev->name, __func__, __LINE__, sksgio);
1710                         rc = -EINTR;
1711                 }
1712         }
1713
1714         spin_unlock_irqrestore(&skdev->lock, flags);
1715
1716         return rc;
1717 }
1718
1719 static int skd_sg_io_put_status(struct skd_device *skdev,
1720                                 struct skd_sg_io *sksgio)
1721 {
1722         struct sg_io_hdr *sgp = &sksgio->sg;
1723         struct skd_special_context *skspcl = sksgio->skspcl;
1724         int resid = 0;
1725
1726         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1727
1728         sgp->status = skspcl->req.completion.status;
1729         resid = sksgio->dxfer_len - nb;
1730
1731         sgp->masked_status = sgp->status & STATUS_MASK;
1732         sgp->msg_status = 0;
1733         sgp->host_status = 0;
1734         sgp->driver_status = 0;
1735         sgp->resid = resid;
1736         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1737                 sgp->info |= SG_INFO_CHECK;
1738
1739         pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1740                  skdev->name, __func__, __LINE__,
1741                  sgp->status, sgp->masked_status, sgp->resid);
1742
1743         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1744                 if (sgp->mx_sb_len > 0) {
1745                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
1746                         u32 nbytes = sizeof(*ei);
1747
1748                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1749
1750                         sgp->sb_len_wr = nbytes;
1751
1752                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1753                                 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1754                                          skdev->name, __func__, __LINE__,
1755                                          sgp->sbp);
1756                                 return -EFAULT;
1757                         }
1758                 }
1759         }
1760
1761         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1762                 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1763                          skdev->name, __func__, __LINE__, sksgio->argp);
1764                 return -EFAULT;
1765         }
1766
1767         return 0;
1768 }
1769
1770 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1771                                     struct skd_sg_io *sksgio)
1772 {
1773         struct skd_special_context *skspcl = sksgio->skspcl;
1774
1775         if (skspcl != NULL) {
1776                 ulong flags;
1777
1778                 sksgio->skspcl = NULL;
1779
1780                 spin_lock_irqsave(&skdev->lock, flags);
1781                 skd_release_special(skdev, skspcl);
1782                 spin_unlock_irqrestore(&skdev->lock, flags);
1783         }
1784
1785         return 0;
1786 }
1787
1788 /*
1789  *****************************************************************************
1790  * INTERNAL REQUESTS -- generated by driver itself
1791  *****************************************************************************
1792  */
1793
1794 static int skd_format_internal_skspcl(struct skd_device *skdev)
1795 {
1796         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1797         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1798         struct fit_msg_hdr *fmh;
1799         uint64_t dma_address;
1800         struct skd_scsi_request *scsi;
1801
1802         fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1803         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1804         fmh->num_protocol_cmds_coalesced = 1;
1805
1806         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1807         memset(scsi, 0, sizeof(*scsi));
1808         dma_address = skspcl->req.sksg_dma_address;
1809         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1810         sgd->control = FIT_SGD_CONTROL_LAST;
1811         sgd->byte_count = 0;
1812         sgd->host_side_addr = skspcl->db_dma_address;
1813         sgd->dev_side_addr = 0;
1814         sgd->next_desc_ptr = 0LL;
1815
1816         return 1;
1817 }
1818
1819 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1820
1821 static void skd_send_internal_skspcl(struct skd_device *skdev,
1822                                      struct skd_special_context *skspcl,
1823                                      u8 opcode)
1824 {
1825         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1826         struct skd_scsi_request *scsi;
1827         unsigned char *buf = skspcl->data_buf;
1828         int i;
1829
1830         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1831                 /*
1832                  * A refresh is already in progress.
1833                  * Just wait for it to finish.
1834                  */
1835                 return;
1836
1837         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1838         skspcl->req.state = SKD_REQ_STATE_BUSY;
1839         skspcl->req.id += SKD_ID_INCR;
1840
1841         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1842         scsi->hdr.tag = skspcl->req.id;
1843
1844         memset(scsi->cdb, 0, sizeof(scsi->cdb));
1845
1846         switch (opcode) {
1847         case TEST_UNIT_READY:
1848                 scsi->cdb[0] = TEST_UNIT_READY;
1849                 sgd->byte_count = 0;
1850                 scsi->hdr.sg_list_len_bytes = 0;
1851                 break;
1852
1853         case READ_CAPACITY:
1854                 scsi->cdb[0] = READ_CAPACITY;
1855                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1856                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1857                 break;
1858
1859         case INQUIRY:
1860                 scsi->cdb[0] = INQUIRY;
1861                 scsi->cdb[1] = 0x01;    /* evpd */
1862                 scsi->cdb[2] = 0x80;    /* serial number page */
1863                 scsi->cdb[4] = 0x10;
1864                 sgd->byte_count = 16;
1865                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1866                 break;
1867
1868         case SYNCHRONIZE_CACHE:
1869                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1870                 sgd->byte_count = 0;
1871                 scsi->hdr.sg_list_len_bytes = 0;
1872                 break;
1873
1874         case WRITE_BUFFER:
1875                 scsi->cdb[0] = WRITE_BUFFER;
1876                 scsi->cdb[1] = 0x02;
1877                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1878                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1879                 sgd->byte_count = WR_BUF_SIZE;
1880                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1881                 /* fill incrementing byte pattern */
1882                 for (i = 0; i < sgd->byte_count; i++)
1883                         buf[i] = i & 0xFF;
1884                 break;
1885
1886         case READ_BUFFER:
1887                 scsi->cdb[0] = READ_BUFFER;
1888                 scsi->cdb[1] = 0x02;
1889                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1890                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1891                 sgd->byte_count = WR_BUF_SIZE;
1892                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1893                 memset(skspcl->data_buf, 0, sgd->byte_count);
1894                 break;
1895
1896         default:
1897                 SKD_ASSERT("Don't know what to send");
1898                 return;
1899
1900         }
1901         skd_send_special_fitmsg(skdev, skspcl);
1902 }
1903
1904 static void skd_refresh_device_data(struct skd_device *skdev)
1905 {
1906         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1907
1908         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1909 }
1910
1911 static int skd_chk_read_buf(struct skd_device *skdev,
1912                             struct skd_special_context *skspcl)
1913 {
1914         unsigned char *buf = skspcl->data_buf;
1915         int i;
1916
1917         /* check for incrementing byte pattern */
1918         for (i = 0; i < WR_BUF_SIZE; i++)
1919                 if (buf[i] != (i & 0xFF))
1920                         return 1;
1921
1922         return 0;
1923 }
1924
1925 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1926                                  u8 code, u8 qual, u8 fruc)
1927 {
1928         /* If the check condition is of special interest, log a message */
1929         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1930             && (code == 0x04) && (qual == 0x06)) {
1931                 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1932                        "ascq/fruc %02x/%02x/%02x/%02x\n",
1933                        skd_name(skdev), key, code, qual, fruc);
1934         }
1935 }
1936
1937 static void skd_complete_internal(struct skd_device *skdev,
1938                                   volatile struct fit_completion_entry_v1
1939                                   *skcomp,
1940                                   volatile struct fit_comp_error_info *skerr,
1941                                   struct skd_special_context *skspcl)
1942 {
1943         u8 *buf = skspcl->data_buf;
1944         u8 status;
1945         int i;
1946         struct skd_scsi_request *scsi =
1947                 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1948
1949         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1950
1951         pr_debug("%s:%s:%d complete internal %x\n",
1952                  skdev->name, __func__, __LINE__, scsi->cdb[0]);
1953
1954         skspcl->req.completion = *skcomp;
1955         skspcl->req.state = SKD_REQ_STATE_IDLE;
1956         skspcl->req.id += SKD_ID_INCR;
1957
1958         status = skspcl->req.completion.status;
1959
1960         skd_log_check_status(skdev, status, skerr->key, skerr->code,
1961                              skerr->qual, skerr->fruc);
1962
1963         switch (scsi->cdb[0]) {
1964         case TEST_UNIT_READY:
1965                 if (status == SAM_STAT_GOOD)
1966                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1967                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1968                          (skerr->key == MEDIUM_ERROR))
1969                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1970                 else {
1971                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1972                                 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1973                                          skdev->name, __func__, __LINE__,
1974                                          skdev->state);
1975                                 return;
1976                         }
1977                         pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1978                                  skdev->name, __func__, __LINE__);
1979                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1980                 }
1981                 break;
1982
1983         case WRITE_BUFFER:
1984                 if (status == SAM_STAT_GOOD)
1985                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1986                 else {
1987                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1988                                 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
1989                                          skdev->name, __func__, __LINE__,
1990                                          skdev->state);
1991                                 return;
1992                         }
1993                         pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
1994                                  skdev->name, __func__, __LINE__);
1995                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1996                 }
1997                 break;
1998
1999         case READ_BUFFER:
2000                 if (status == SAM_STAT_GOOD) {
2001                         if (skd_chk_read_buf(skdev, skspcl) == 0)
2002                                 skd_send_internal_skspcl(skdev, skspcl,
2003                                                          READ_CAPACITY);
2004                         else {
2005                                 pr_err("(%s):*** W/R Buffer mismatch %d ***\n",
2006                                        skd_name(skdev), skdev->connect_retries);
2007                                 if (skdev->connect_retries <
2008                                     SKD_MAX_CONNECT_RETRIES) {
2009                                         skdev->connect_retries++;
2010                                         skd_soft_reset(skdev);
2011                                 } else {
2012                                         pr_err("(%s): W/R Buffer Connect Error\n",
2013                                                skd_name(skdev));
2014                                         return;
2015                                 }
2016                         }
2017
2018                 } else {
2019                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2020                                 pr_debug("%s:%s:%d "
2021                                          "read buffer failed, don't send anymore state 0x%x\n",
2022                                          skdev->name, __func__, __LINE__,
2023                                          skdev->state);
2024                                 return;
2025                         }
2026                         pr_debug("%s:%s:%d "
2027                                  "**** read buffer failed, retry skerr\n",
2028                                  skdev->name, __func__, __LINE__);
2029                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
2030                 }
2031                 break;
2032
2033         case READ_CAPACITY:
2034                 skdev->read_cap_is_valid = 0;
2035                 if (status == SAM_STAT_GOOD) {
2036                         skdev->read_cap_last_lba =
2037                                 (buf[0] << 24) | (buf[1] << 16) |
2038                                 (buf[2] << 8) | buf[3];
2039                         skdev->read_cap_blocksize =
2040                                 (buf[4] << 24) | (buf[5] << 16) |
2041                                 (buf[6] << 8) | buf[7];
2042
2043                         pr_debug("%s:%s:%d last lba %d, bs %d\n",
2044                                  skdev->name, __func__, __LINE__,
2045                                  skdev->read_cap_last_lba,
2046                                  skdev->read_cap_blocksize);
2047
2048                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2049
2050                         skdev->read_cap_is_valid = 1;
2051
2052                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2053                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2054                            (skerr->key == MEDIUM_ERROR)) {
2055                         skdev->read_cap_last_lba = ~0;
2056                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2057                         pr_debug("%s:%s:%d "
2058                                  "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2059                                  skdev->name, __func__, __LINE__);
2060                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2061                 } else {
2062                         pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2063                                  skdev->name, __func__, __LINE__);
2064                         skd_send_internal_skspcl(skdev, skspcl,
2065                                                  TEST_UNIT_READY);
2066                 }
2067                 break;
2068
2069         case INQUIRY:
2070                 skdev->inquiry_is_valid = 0;
2071                 if (status == SAM_STAT_GOOD) {
2072                         skdev->inquiry_is_valid = 1;
2073
2074                         for (i = 0; i < 12; i++)
2075                                 skdev->inq_serial_num[i] = buf[i + 4];
2076                         skdev->inq_serial_num[12] = 0;
2077                 }
2078
2079                 if (skd_unquiesce_dev(skdev) < 0)
2080                         pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2081                                  skdev->name, __func__, __LINE__);
2082                  /* connection is complete */
2083                 skdev->connect_retries = 0;
2084                 break;
2085
2086         case SYNCHRONIZE_CACHE:
2087                 if (status == SAM_STAT_GOOD)
2088                         skdev->sync_done = 1;
2089                 else
2090                         skdev->sync_done = -1;
2091                 wake_up_interruptible(&skdev->waitq);
2092                 break;
2093
2094         default:
2095                 SKD_ASSERT("we didn't send this");
2096         }
2097 }
2098
2099 /*
2100  *****************************************************************************
2101  * FIT MESSAGES
2102  *****************************************************************************
2103  */
2104
2105 static void skd_send_fitmsg(struct skd_device *skdev,
2106                             struct skd_fitmsg_context *skmsg)
2107 {
2108         u64 qcmd;
2109         struct fit_msg_hdr *fmh;
2110
2111         pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2112                  skdev->name, __func__, __LINE__,
2113                  skmsg->mb_dma_address, skdev->in_flight);
2114         pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2115                  skdev->name, __func__, __LINE__,
2116                  skmsg->msg_buf, skmsg->offset);
2117
2118         qcmd = skmsg->mb_dma_address;
2119         qcmd |= FIT_QCMD_QID_NORMAL;
2120
2121         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2122         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2123
2124         if (unlikely(skdev->dbg_level > 1)) {
2125                 u8 *bp = (u8 *)skmsg->msg_buf;
2126                 int i;
2127                 for (i = 0; i < skmsg->length; i += 8) {
2128                         pr_debug("%s:%s:%d msg[%2d] %8ph\n",
2129                                  skdev->name, __func__, __LINE__, i, &bp[i]);
2130                         if (i == 0)
2131                                 i = 64 - 8;
2132                 }
2133         }
2134
2135         if (skmsg->length > 256)
2136                 qcmd |= FIT_QCMD_MSGSIZE_512;
2137         else if (skmsg->length > 128)
2138                 qcmd |= FIT_QCMD_MSGSIZE_256;
2139         else if (skmsg->length > 64)
2140                 qcmd |= FIT_QCMD_MSGSIZE_128;
2141         else
2142                 /*
2143                  * This makes no sense because the FIT msg header is
2144                  * 64 bytes. If the msg is only 64 bytes long it has
2145                  * no payload.
2146                  */
2147                 qcmd |= FIT_QCMD_MSGSIZE_64;
2148
2149         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2150         smp_wmb();
2151
2152         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2153 }
2154
2155 static void skd_send_special_fitmsg(struct skd_device *skdev,
2156                                     struct skd_special_context *skspcl)
2157 {
2158         u64 qcmd;
2159
2160         if (unlikely(skdev->dbg_level > 1)) {
2161                 u8 *bp = (u8 *)skspcl->msg_buf;
2162                 int i;
2163
2164                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2165                         pr_debug("%s:%s:%d  spcl[%2d] %8ph\n",
2166                                  skdev->name, __func__, __LINE__, i, &bp[i]);
2167                         if (i == 0)
2168                                 i = 64 - 8;
2169                 }
2170
2171                 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2172                          skdev->name, __func__, __LINE__,
2173                          skspcl, skspcl->req.id, skspcl->req.sksg_list,
2174                          skspcl->req.sksg_dma_address);
2175                 for (i = 0; i < skspcl->req.n_sg; i++) {
2176                         struct fit_sg_descriptor *sgd =
2177                                 &skspcl->req.sksg_list[i];
2178
2179                         pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
2180                                  "addr=0x%llx next=0x%llx\n",
2181                                  skdev->name, __func__, __LINE__,
2182                                  i, sgd->byte_count, sgd->control,
2183                                  sgd->host_side_addr, sgd->next_desc_ptr);
2184                 }
2185         }
2186
2187         /*
2188          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2189          * and one 64-byte SSDI command.
2190          */
2191         qcmd = skspcl->mb_dma_address;
2192         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2193
2194         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2195         smp_wmb();
2196
2197         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2198 }
2199
2200 /*
2201  *****************************************************************************
2202  * COMPLETION QUEUE
2203  *****************************************************************************
2204  */
2205
2206 static void skd_complete_other(struct skd_device *skdev,
2207                                volatile struct fit_completion_entry_v1 *skcomp,
2208                                volatile struct fit_comp_error_info *skerr);
2209
2210 struct sns_info {
2211         u8 type;
2212         u8 stat;
2213         u8 key;
2214         u8 asc;
2215         u8 ascq;
2216         u8 mask;
2217         enum skd_check_status_action action;
2218 };
2219
2220 static struct sns_info skd_chkstat_table[] = {
2221         /* Good */
2222         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2223           SKD_CHECK_STATUS_REPORT_GOOD },
2224
2225         /* Smart alerts */
2226         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2227           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2228         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2229           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2230         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2231           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2232
2233         /* Retry (with limits) */
2234         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2235           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2236         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2237           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2238         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2239           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2240         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2241           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2242
2243         /* Busy (or about to be) */
2244         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2245           SKD_CHECK_STATUS_BUSY_IMMINENT },
2246 };
2247
2248 /*
2249  * Look up status and sense data to decide how to handle the error
2250  * from the device.
2251  * mask says which fields must match e.g., mask=0x18 means check
2252  * type and stat, ignore key, asc, ascq.
2253  */
2254
2255 static enum skd_check_status_action
2256 skd_check_status(struct skd_device *skdev,
2257                  u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2258 {
2259         int i, n;
2260
2261         pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2262                skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2263                skerr->fruc);
2264
2265         pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2266                  skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2267                  skerr->key, skerr->code, skerr->qual, skerr->fruc);
2268
2269         /* Does the info match an entry in the good category? */
2270         n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2271         for (i = 0; i < n; i++) {
2272                 struct sns_info *sns = &skd_chkstat_table[i];
2273
2274                 if (sns->mask & 0x10)
2275                         if (skerr->type != sns->type)
2276                                 continue;
2277
2278                 if (sns->mask & 0x08)
2279                         if (cmp_status != sns->stat)
2280                                 continue;
2281
2282                 if (sns->mask & 0x04)
2283                         if (skerr->key != sns->key)
2284                                 continue;
2285
2286                 if (sns->mask & 0x02)
2287                         if (skerr->code != sns->asc)
2288                                 continue;
2289
2290                 if (sns->mask & 0x01)
2291                         if (skerr->qual != sns->ascq)
2292                                 continue;
2293
2294                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2295                         pr_err("(%s): SMART Alert: sense key/asc/ascq "
2296                                "%02x/%02x/%02x\n",
2297                                skd_name(skdev), skerr->key,
2298                                skerr->code, skerr->qual);
2299                 }
2300                 return sns->action;
2301         }
2302
2303         /* No other match, so nonzero status means error,
2304          * zero status means good
2305          */
2306         if (cmp_status) {
2307                 pr_debug("%s:%s:%d status check: error\n",
2308                          skdev->name, __func__, __LINE__);
2309                 return SKD_CHECK_STATUS_REPORT_ERROR;
2310         }
2311
2312         pr_debug("%s:%s:%d status check good default\n",
2313                  skdev->name, __func__, __LINE__);
2314         return SKD_CHECK_STATUS_REPORT_GOOD;
2315 }
2316
2317 static void skd_resolve_req_exception(struct skd_device *skdev,
2318                                       struct skd_request_context *skreq)
2319 {
2320         u8 cmp_status = skreq->completion.status;
2321
2322         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2323         case SKD_CHECK_STATUS_REPORT_GOOD:
2324         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2325                 skd_end_request(skdev, skreq, BLK_STS_OK);
2326                 break;
2327
2328         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2329                 skd_log_skreq(skdev, skreq, "retry(busy)");
2330                 blk_requeue_request(skdev->queue, skreq->req);
2331                 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2332                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2333                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2334                 skd_quiesce_dev(skdev);
2335                 break;
2336
2337         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2338                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2339                         skd_log_skreq(skdev, skreq, "retry");
2340                         blk_requeue_request(skdev->queue, skreq->req);
2341                         break;
2342                 }
2343                 /* fall through */
2344
2345         case SKD_CHECK_STATUS_REPORT_ERROR:
2346         default:
2347                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
2348                 break;
2349         }
2350 }
2351
2352 /* assume spinlock is already held */
2353 static void skd_release_skreq(struct skd_device *skdev,
2354                               struct skd_request_context *skreq)
2355 {
2356         u32 msg_slot;
2357         struct skd_fitmsg_context *skmsg;
2358
2359         u32 timo_slot;
2360
2361         /*
2362          * Reclaim the FIT msg buffer if this is
2363          * the first of the requests it carried to
2364          * be completed. The FIT msg buffer used to
2365          * send this request cannot be reused until
2366          * we are sure the s1120 card has copied
2367          * it to its memory. The FIT msg might have
2368          * contained several requests. As soon as
2369          * any of them are completed we know that
2370          * the entire FIT msg was transferred.
2371          * Only the first completed request will
2372          * match the FIT msg buffer id. The FIT
2373          * msg buffer id is immediately updated.
2374          * When subsequent requests complete the FIT
2375          * msg buffer id won't match, so we know
2376          * quite cheaply that it is already done.
2377          */
2378         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2379         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2380
2381         skmsg = &skdev->skmsg_table[msg_slot];
2382         if (skmsg->id == skreq->fitmsg_id) {
2383                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2384                 SKD_ASSERT(skmsg->outstanding > 0);
2385                 skmsg->outstanding--;
2386                 if (skmsg->outstanding == 0) {
2387                         skmsg->state = SKD_MSG_STATE_IDLE;
2388                         skmsg->id += SKD_ID_INCR;
2389                         skmsg->next = skdev->skmsg_free_list;
2390                         skdev->skmsg_free_list = skmsg;
2391                 }
2392         }
2393
2394         /*
2395          * Decrease the number of active requests.
2396          * Also decrements the count in the timeout slot.
2397          */
2398         SKD_ASSERT(skdev->in_flight > 0);
2399         skdev->in_flight -= 1;
2400
2401         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2402         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2403         skdev->timeout_slot[timo_slot] -= 1;
2404
2405         /*
2406          * Reset backpointer
2407          */
2408         skreq->req = NULL;
2409
2410         /*
2411          * Reclaim the skd_request_context
2412          */
2413         skreq->state = SKD_REQ_STATE_IDLE;
2414         skreq->id += SKD_ID_INCR;
2415         skreq->next = skdev->skreq_free_list;
2416         skdev->skreq_free_list = skreq;
2417 }
2418
2419 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2420
2421 static void skd_do_inq_page_00(struct skd_device *skdev,
2422                                volatile struct fit_completion_entry_v1 *skcomp,
2423                                volatile struct fit_comp_error_info *skerr,
2424                                uint8_t *cdb, uint8_t *buf)
2425 {
2426         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2427
2428         /* Caller requested "supported pages".  The driver needs to insert
2429          * its page.
2430          */
2431         pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2432                  skdev->name, __func__, __LINE__);
2433
2434         /* If the device rejected the request because the CDB was
2435          * improperly formed, then just leave.
2436          */
2437         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2438             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2439                 return;
2440
2441         /* Get the amount of space the caller allocated */
2442         max_bytes = (cdb[3] << 8) | cdb[4];
2443
2444         /* Get the number of pages actually returned by the device */
2445         drive_pages = (buf[2] << 8) | buf[3];
2446         drive_bytes = drive_pages + 4;
2447         new_size = drive_pages + 1;
2448
2449         /* Supported pages must be in numerical order, so find where
2450          * the driver page needs to be inserted into the list of
2451          * pages returned by the device.
2452          */
2453         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2454                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2455                         return; /* Device using this page code. abort */
2456                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2457                         break;
2458         }
2459
2460         if (insert_pt < max_bytes) {
2461                 uint16_t u;
2462
2463                 /* Shift everything up one byte to make room. */
2464                 for (u = new_size + 3; u > insert_pt; u--)
2465                         buf[u] = buf[u - 1];
2466                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2467
2468                 /* SCSI byte order increment of num_returned_bytes by 1 */
2469                 skcomp->num_returned_bytes =
2470                         be32_to_cpu(skcomp->num_returned_bytes) + 1;
2471                 skcomp->num_returned_bytes =
2472                         be32_to_cpu(skcomp->num_returned_bytes);
2473         }
2474
2475         /* update page length field to reflect the driver's page too */
2476         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2477         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2478 }
2479
2480 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2481 {
2482         int pcie_reg;
2483         u16 pci_bus_speed;
2484         u8 pci_lanes;
2485
2486         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2487         if (pcie_reg) {
2488                 u16 linksta;
2489                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2490
2491                 pci_bus_speed = linksta & 0xF;
2492                 pci_lanes = (linksta & 0x3F0) >> 4;
2493         } else {
2494                 *speed = STEC_LINK_UNKNOWN;
2495                 *width = 0xFF;
2496                 return;
2497         }
2498
2499         switch (pci_bus_speed) {
2500         case 1:
2501                 *speed = STEC_LINK_2_5GTS;
2502                 break;
2503         case 2:
2504                 *speed = STEC_LINK_5GTS;
2505                 break;
2506         case 3:
2507                 *speed = STEC_LINK_8GTS;
2508                 break;
2509         default:
2510                 *speed = STEC_LINK_UNKNOWN;
2511                 break;
2512         }
2513
2514         if (pci_lanes <= 0x20)
2515                 *width = pci_lanes;
2516         else
2517                 *width = 0xFF;
2518 }
2519
2520 static void skd_do_inq_page_da(struct skd_device *skdev,
2521                                volatile struct fit_completion_entry_v1 *skcomp,
2522                                volatile struct fit_comp_error_info *skerr,
2523                                uint8_t *cdb, uint8_t *buf)
2524 {
2525         struct pci_dev *pdev = skdev->pdev;
2526         unsigned max_bytes;
2527         struct driver_inquiry_data inq;
2528         u16 val;
2529
2530         pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2531                  skdev->name, __func__, __LINE__);
2532
2533         memset(&inq, 0, sizeof(inq));
2534
2535         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2536
2537         skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2538         inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2539         inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2540         inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2541
2542         pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2543         inq.pcie_vendor_id = cpu_to_be16(val);
2544
2545         pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2546         inq.pcie_device_id = cpu_to_be16(val);
2547
2548         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2549         inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2550
2551         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2552         inq.pcie_subsystem_device_id = cpu_to_be16(val);
2553
2554         /* Driver version, fixed lenth, padded with spaces on the right */
2555         inq.driver_version_length = sizeof(inq.driver_version);
2556         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2557         memcpy(inq.driver_version, DRV_VER_COMPL,
2558                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2559
2560         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2561
2562         /* Clear the error set by the device */
2563         skcomp->status = SAM_STAT_GOOD;
2564         memset((void *)skerr, 0, sizeof(*skerr));
2565
2566         /* copy response into output buffer */
2567         max_bytes = (cdb[3] << 8) | cdb[4];
2568         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2569
2570         skcomp->num_returned_bytes =
2571                 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2572 }
2573
2574 static void skd_do_driver_inq(struct skd_device *skdev,
2575                               volatile struct fit_completion_entry_v1 *skcomp,
2576                               volatile struct fit_comp_error_info *skerr,
2577                               uint8_t *cdb, uint8_t *buf)
2578 {
2579         if (!buf)
2580                 return;
2581         else if (cdb[0] != INQUIRY)
2582                 return;         /* Not an INQUIRY */
2583         else if ((cdb[1] & 1) == 0)
2584                 return;         /* EVPD not set */
2585         else if (cdb[2] == 0)
2586                 /* Need to add driver's page to supported pages list */
2587                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2588         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2589                 /* Caller requested driver's page */
2590                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2591 }
2592
2593 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2594 {
2595         if (!sg)
2596                 return NULL;
2597         if (!sg_page(sg))
2598                 return NULL;
2599         return sg_virt(sg);
2600 }
2601
2602 static void skd_process_scsi_inq(struct skd_device *skdev,
2603                                  volatile struct fit_completion_entry_v1
2604                                  *skcomp,
2605                                  volatile struct fit_comp_error_info *skerr,
2606                                  struct skd_special_context *skspcl)
2607 {
2608         uint8_t *buf;
2609         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2610         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2611
2612         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2613                             skspcl->req.sg_data_dir);
2614         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2615
2616         if (buf)
2617                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2618 }
2619
2620 static int skd_isr_completion_posted(struct skd_device *skdev,
2621                                         int limit, int *enqueued)
2622 {
2623         volatile struct fit_completion_entry_v1 *skcmp = NULL;
2624         volatile struct fit_comp_error_info *skerr;
2625         u16 req_id;
2626         u32 req_slot;
2627         struct skd_request_context *skreq;
2628         u16 cmp_cntxt = 0;
2629         u8 cmp_status = 0;
2630         u8 cmp_cycle = 0;
2631         u32 cmp_bytes = 0;
2632         int rc = 0;
2633         int processed = 0;
2634
2635         for (;; ) {
2636                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2637
2638                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2639                 cmp_cycle = skcmp->cycle;
2640                 cmp_cntxt = skcmp->tag;
2641                 cmp_status = skcmp->status;
2642                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2643
2644                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2645
2646                 pr_debug("%s:%s:%d "
2647                          "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2648                          "busy=%d rbytes=0x%x proto=%d\n",
2649                          skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2650                          skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2651                          skdev->in_flight, cmp_bytes, skdev->proto_ver);
2652
2653                 if (cmp_cycle != skdev->skcomp_cycle) {
2654                         pr_debug("%s:%s:%d end of completions\n",
2655                                  skdev->name, __func__, __LINE__);
2656                         break;
2657                 }
2658                 /*
2659                  * Update the completion queue head index and possibly
2660                  * the completion cycle count. 8-bit wrap-around.
2661                  */
2662                 skdev->skcomp_ix++;
2663                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2664                         skdev->skcomp_ix = 0;
2665                         skdev->skcomp_cycle++;
2666                 }
2667
2668                 /*
2669                  * The command context is a unique 32-bit ID. The low order
2670                  * bits help locate the request. The request is usually a
2671                  * r/w request (see skd_start() above) or a special request.
2672                  */
2673                 req_id = cmp_cntxt;
2674                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2675
2676                 /* Is this other than a r/w request? */
2677                 if (req_slot >= skdev->num_req_context) {
2678                         /*
2679                          * This is not a completion for a r/w request.
2680                          */
2681                         skd_complete_other(skdev, skcmp, skerr);
2682                         continue;
2683                 }
2684
2685                 skreq = &skdev->skreq_table[req_slot];
2686
2687                 /*
2688                  * Make sure the request ID for the slot matches.
2689                  */
2690                 if (skreq->id != req_id) {
2691                         pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2692                                  skdev->name, __func__, __LINE__,
2693                                  req_id, skreq->id);
2694                         {
2695                                 u16 new_id = cmp_cntxt;
2696                                 pr_err("(%s): Completion mismatch "
2697                                        "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2698                                        skd_name(skdev), req_id,
2699                                        skreq->id, new_id);
2700
2701                                 continue;
2702                         }
2703                 }
2704
2705                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2706
2707                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2708                         pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2709                                  skdev->name, __func__, __LINE__,
2710                                  skreq, skreq->id);
2711                         /* a previously timed out command can
2712                          * now be cleaned up */
2713                         skd_release_skreq(skdev, skreq);
2714                         continue;
2715                 }
2716
2717                 skreq->completion = *skcmp;
2718                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2719                         skreq->err_info = *skerr;
2720                         skd_log_check_status(skdev, cmp_status, skerr->key,
2721                                              skerr->code, skerr->qual,
2722                                              skerr->fruc);
2723                 }
2724                 /* Release DMA resources for the request. */
2725                 if (skreq->n_sg > 0)
2726                         skd_postop_sg_list(skdev, skreq);
2727
2728                 if (!skreq->req) {
2729                         pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2730                                  "req=0x%x req_id=0x%x\n",
2731                                  skdev->name, __func__, __LINE__,
2732                                  skreq, skreq->id, req_id);
2733                 } else {
2734                         /*
2735                          * Capture the outcome and post it back to the
2736                          * native request.
2737                          */
2738                         if (likely(cmp_status == SAM_STAT_GOOD))
2739                                 skd_end_request(skdev, skreq, BLK_STS_OK);
2740                         else
2741                                 skd_resolve_req_exception(skdev, skreq);
2742                 }
2743
2744                 /*
2745                  * Release the skreq, its FIT msg (if one), timeout slot,
2746                  * and queue depth.
2747                  */
2748                 skd_release_skreq(skdev, skreq);
2749
2750                 /* skd_isr_comp_limit equal zero means no limit */
2751                 if (limit) {
2752                         if (++processed >= limit) {
2753                                 rc = 1;
2754                                 break;
2755                         }
2756                 }
2757         }
2758
2759         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2760                 && (skdev->in_flight) == 0) {
2761                 skdev->state = SKD_DRVR_STATE_PAUSED;
2762                 wake_up_interruptible(&skdev->waitq);
2763         }
2764
2765         return rc;
2766 }
2767
2768 static void skd_complete_other(struct skd_device *skdev,
2769                                volatile struct fit_completion_entry_v1 *skcomp,
2770                                volatile struct fit_comp_error_info *skerr)
2771 {
2772         u32 req_id = 0;
2773         u32 req_table;
2774         u32 req_slot;
2775         struct skd_special_context *skspcl;
2776
2777         req_id = skcomp->tag;
2778         req_table = req_id & SKD_ID_TABLE_MASK;
2779         req_slot = req_id & SKD_ID_SLOT_MASK;
2780
2781         pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2782                  skdev->name, __func__, __LINE__,
2783                  req_table, req_id, req_slot);
2784
2785         /*
2786          * Based on the request id, determine how to dispatch this completion.
2787          * This swich/case is finding the good cases and forwarding the
2788          * completion entry. Errors are reported below the switch.
2789          */
2790         switch (req_table) {
2791         case SKD_ID_RW_REQUEST:
2792                 /*
2793                  * The caller, skd_isr_completion_posted() above,
2794                  * handles r/w requests. The only way we get here
2795                  * is if the req_slot is out of bounds.
2796                  */
2797                 break;
2798
2799         case SKD_ID_SPECIAL_REQUEST:
2800                 /*
2801                  * Make sure the req_slot is in bounds and that the id
2802                  * matches.
2803                  */
2804                 if (req_slot < skdev->n_special) {
2805                         skspcl = &skdev->skspcl_table[req_slot];
2806                         if (skspcl->req.id == req_id &&
2807                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2808                                 skd_complete_special(skdev,
2809                                                      skcomp, skerr, skspcl);
2810                                 return;
2811                         }
2812                 }
2813                 break;
2814
2815         case SKD_ID_INTERNAL:
2816                 if (req_slot == 0) {
2817                         skspcl = &skdev->internal_skspcl;
2818                         if (skspcl->req.id == req_id &&
2819                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2820                                 skd_complete_internal(skdev,
2821                                                       skcomp, skerr, skspcl);
2822                                 return;
2823                         }
2824                 }
2825                 break;
2826
2827         case SKD_ID_FIT_MSG:
2828                 /*
2829                  * These id's should never appear in a completion record.
2830                  */
2831                 break;
2832
2833         default:
2834                 /*
2835                  * These id's should never appear anywhere;
2836                  */
2837                 break;
2838         }
2839
2840         /*
2841          * If we get here it is a bad or stale id.
2842          */
2843 }
2844
2845 static void skd_complete_special(struct skd_device *skdev,
2846                                  volatile struct fit_completion_entry_v1
2847                                  *skcomp,
2848                                  volatile struct fit_comp_error_info *skerr,
2849                                  struct skd_special_context *skspcl)
2850 {
2851         pr_debug("%s:%s:%d  completing special request %p\n",
2852                  skdev->name, __func__, __LINE__, skspcl);
2853         if (skspcl->orphaned) {
2854                 /* Discard orphaned request */
2855                 /* ?: Can this release directly or does it need
2856                  * to use a worker? */
2857                 pr_debug("%s:%s:%d release orphaned %p\n",
2858                          skdev->name, __func__, __LINE__, skspcl);
2859                 skd_release_special(skdev, skspcl);
2860                 return;
2861         }
2862
2863         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2864
2865         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2866         skspcl->req.completion = *skcomp;
2867         skspcl->req.err_info = *skerr;
2868
2869         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2870                              skerr->code, skerr->qual, skerr->fruc);
2871
2872         wake_up_interruptible(&skdev->waitq);
2873 }
2874
2875 /* assume spinlock is already held */
2876 static void skd_release_special(struct skd_device *skdev,
2877                                 struct skd_special_context *skspcl)
2878 {
2879         int i, was_depleted;
2880
2881         for (i = 0; i < skspcl->req.n_sg; i++) {
2882                 struct page *page = sg_page(&skspcl->req.sg[i]);
2883                 __free_page(page);
2884         }
2885
2886         was_depleted = (skdev->skspcl_free_list == NULL);
2887
2888         skspcl->req.state = SKD_REQ_STATE_IDLE;
2889         skspcl->req.id += SKD_ID_INCR;
2890         skspcl->req.next =
2891                 (struct skd_request_context *)skdev->skspcl_free_list;
2892         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2893
2894         if (was_depleted) {
2895                 pr_debug("%s:%s:%d skspcl was depleted\n",
2896                          skdev->name, __func__, __LINE__);
2897                 /* Free list was depleted. Their might be waiters. */
2898                 wake_up_interruptible(&skdev->waitq);
2899         }
2900 }
2901
2902 static void skd_reset_skcomp(struct skd_device *skdev)
2903 {
2904         u32 nbytes;
2905         struct fit_completion_entry_v1 *skcomp;
2906
2907         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2908         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2909
2910         memset(skdev->skcomp_table, 0, nbytes);
2911
2912         skdev->skcomp_ix = 0;
2913         skdev->skcomp_cycle = 1;
2914 }
2915
2916 /*
2917  *****************************************************************************
2918  * INTERRUPTS
2919  *****************************************************************************
2920  */
2921 static void skd_completion_worker(struct work_struct *work)
2922 {
2923         struct skd_device *skdev =
2924                 container_of(work, struct skd_device, completion_worker);
2925         unsigned long flags;
2926         int flush_enqueued = 0;
2927
2928         spin_lock_irqsave(&skdev->lock, flags);
2929
2930         /*
2931          * pass in limit=0, which means no limit..
2932          * process everything in compq
2933          */
2934         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2935         skd_request_fn(skdev->queue);
2936
2937         spin_unlock_irqrestore(&skdev->lock, flags);
2938 }
2939
2940 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2941
2942 static irqreturn_t
2943 skd_isr(int irq, void *ptr)
2944 {
2945         struct skd_device *skdev;
2946         u32 intstat;
2947         u32 ack;
2948         int rc = 0;
2949         int deferred = 0;
2950         int flush_enqueued = 0;
2951
2952         skdev = (struct skd_device *)ptr;
2953         spin_lock(&skdev->lock);
2954
2955         for (;; ) {
2956                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2957
2958                 ack = FIT_INT_DEF_MASK;
2959                 ack &= intstat;
2960
2961                 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2962                          skdev->name, __func__, __LINE__, intstat, ack);
2963
2964                 /* As long as there is an int pending on device, keep
2965                  * running loop.  When none, get out, but if we've never
2966                  * done any processing, call completion handler?
2967                  */
2968                 if (ack == 0) {
2969                         /* No interrupts on device, but run the completion
2970                          * processor anyway?
2971                          */
2972                         if (rc == 0)
2973                                 if (likely (skdev->state
2974                                         == SKD_DRVR_STATE_ONLINE))
2975                                         deferred = 1;
2976                         break;
2977                 }
2978
2979                 rc = IRQ_HANDLED;
2980
2981                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2982
2983                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2984                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2985                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
2986                                 /*
2987                                  * If we have already deferred completion
2988                                  * processing, don't bother running it again
2989                                  */
2990                                 if (deferred == 0)
2991                                         deferred =
2992                                                 skd_isr_completion_posted(skdev,
2993                                                 skd_isr_comp_limit, &flush_enqueued);
2994                         }
2995
2996                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2997                                 skd_isr_fwstate(skdev);
2998                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2999                                     skdev->state ==
3000                                     SKD_DRVR_STATE_DISAPPEARED) {
3001                                         spin_unlock(&skdev->lock);
3002                                         return rc;
3003                                 }
3004                         }
3005
3006                         if (intstat & FIT_ISH_MSG_FROM_DEV)
3007                                 skd_isr_msg_from_dev(skdev);
3008                 }
3009         }
3010
3011         if (unlikely(flush_enqueued))
3012                 skd_request_fn(skdev->queue);
3013
3014         if (deferred)
3015                 schedule_work(&skdev->completion_worker);
3016         else if (!flush_enqueued)
3017                 skd_request_fn(skdev->queue);
3018
3019         spin_unlock(&skdev->lock);
3020
3021         return rc;
3022 }
3023
3024 static void skd_drive_fault(struct skd_device *skdev)
3025 {
3026         skdev->state = SKD_DRVR_STATE_FAULT;
3027         pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3028 }
3029
3030 static void skd_drive_disappeared(struct skd_device *skdev)
3031 {
3032         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3033         pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3034 }
3035
3036 static void skd_isr_fwstate(struct skd_device *skdev)
3037 {
3038         u32 sense;
3039         u32 state;
3040         u32 mtd;
3041         int prev_driver_state = skdev->state;
3042
3043         sense = SKD_READL(skdev, FIT_STATUS);
3044         state = sense & FIT_SR_DRIVE_STATE_MASK;
3045
3046         pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3047                skd_name(skdev),
3048                skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3049                skd_drive_state_to_str(state), state);
3050
3051         skdev->drive_state = state;
3052
3053         switch (skdev->drive_state) {
3054         case FIT_SR_DRIVE_INIT:
3055                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3056                         skd_disable_interrupts(skdev);
3057                         break;
3058                 }
3059                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3060                         skd_recover_requests(skdev, 0);
3061                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3062                         skdev->timer_countdown = SKD_STARTING_TIMO;
3063                         skdev->state = SKD_DRVR_STATE_STARTING;
3064                         skd_soft_reset(skdev);
3065                         break;
3066                 }
3067                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3068                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3069                 skdev->last_mtd = mtd;
3070                 break;
3071
3072         case FIT_SR_DRIVE_ONLINE:
3073                 skdev->cur_max_queue_depth = skd_max_queue_depth;
3074                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3075                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3076
3077                 skdev->queue_low_water_mark =
3078                         skdev->cur_max_queue_depth * 2 / 3 + 1;
3079                 if (skdev->queue_low_water_mark < 1)
3080                         skdev->queue_low_water_mark = 1;
3081                 pr_info("(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3082                        skd_name(skdev),
3083                        skdev->cur_max_queue_depth,
3084                        skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3085
3086                 skd_refresh_device_data(skdev);
3087                 break;
3088
3089         case FIT_SR_DRIVE_BUSY:
3090                 skdev->state = SKD_DRVR_STATE_BUSY;
3091                 skdev->timer_countdown = SKD_BUSY_TIMO;
3092                 skd_quiesce_dev(skdev);
3093                 break;
3094         case FIT_SR_DRIVE_BUSY_SANITIZE:
3095                 /* set timer for 3 seconds, we'll abort any unfinished
3096                  * commands after that expires
3097                  */
3098                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3099                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3100                 blk_start_queue(skdev->queue);
3101                 break;
3102         case FIT_SR_DRIVE_BUSY_ERASE:
3103                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3104                 skdev->timer_countdown = SKD_BUSY_TIMO;
3105                 break;
3106         case FIT_SR_DRIVE_OFFLINE:
3107                 skdev->state = SKD_DRVR_STATE_IDLE;
3108                 break;
3109         case FIT_SR_DRIVE_SOFT_RESET:
3110                 switch (skdev->state) {
3111                 case SKD_DRVR_STATE_STARTING:
3112                 case SKD_DRVR_STATE_RESTARTING:
3113                         /* Expected by a caller of skd_soft_reset() */
3114                         break;
3115                 default:
3116                         skdev->state = SKD_DRVR_STATE_RESTARTING;
3117                         break;
3118                 }
3119                 break;
3120         case FIT_SR_DRIVE_FW_BOOTING:
3121                 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3122                          skdev->name, __func__, __LINE__, skdev->name);
3123                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3124                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3125                 break;
3126
3127         case FIT_SR_DRIVE_DEGRADED:
3128         case FIT_SR_PCIE_LINK_DOWN:
3129         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3130                 break;
3131
3132         case FIT_SR_DRIVE_FAULT:
3133                 skd_drive_fault(skdev);
3134                 skd_recover_requests(skdev, 0);
3135                 blk_start_queue(skdev->queue);
3136                 break;
3137
3138         /* PCIe bus returned all Fs? */
3139         case 0xFF:
3140                 pr_info("(%s): state=0x%x sense=0x%x\n",
3141                        skd_name(skdev), state, sense);
3142                 skd_drive_disappeared(skdev);
3143                 skd_recover_requests(skdev, 0);
3144                 blk_start_queue(skdev->queue);
3145                 break;
3146         default:
3147                 /*
3148                  * Uknown FW State. Wait for a state we recognize.
3149                  */
3150                 break;
3151         }
3152         pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3153                skd_name(skdev),
3154                skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3155                skd_skdev_state_to_str(skdev->state), skdev->state);
3156 }
3157
3158 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3159 {
3160         int i;
3161
3162         for (i = 0; i < skdev->num_req_context; i++) {
3163                 struct skd_request_context *skreq = &skdev->skreq_table[i];
3164
3165                 if (skreq->state == SKD_REQ_STATE_BUSY) {
3166                         skd_log_skreq(skdev, skreq, "recover");
3167
3168                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3169                         SKD_ASSERT(skreq->req != NULL);
3170
3171                         /* Release DMA resources for the request. */
3172                         if (skreq->n_sg > 0)
3173                                 skd_postop_sg_list(skdev, skreq);
3174
3175                         if (requeue &&
3176                             (unsigned long) ++skreq->req->special <
3177                             SKD_MAX_RETRIES)
3178                                 blk_requeue_request(skdev->queue, skreq->req);
3179                         else
3180                                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
3181
3182                         skreq->req = NULL;
3183
3184                         skreq->state = SKD_REQ_STATE_IDLE;
3185                         skreq->id += SKD_ID_INCR;
3186                 }
3187                 if (i > 0)
3188                         skreq[-1].next = skreq;
3189                 skreq->next = NULL;
3190         }
3191         skdev->skreq_free_list = skdev->skreq_table;
3192
3193         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3194                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3195
3196                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3197                         skd_log_skmsg(skdev, skmsg, "salvaged");
3198                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3199                         skmsg->state = SKD_MSG_STATE_IDLE;
3200                         skmsg->id += SKD_ID_INCR;
3201                 }
3202                 if (i > 0)
3203                         skmsg[-1].next = skmsg;
3204                 skmsg->next = NULL;
3205         }
3206         skdev->skmsg_free_list = skdev->skmsg_table;
3207
3208         for (i = 0; i < skdev->n_special; i++) {
3209                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3210
3211                 /* If orphaned, reclaim it because it has already been reported
3212                  * to the process as an error (it was just waiting for
3213                  * a completion that didn't come, and now it will never come)
3214                  * If busy, change to a state that will cause it to error
3215                  * out in the wait routine and let it do the normal
3216                  * reporting and reclaiming
3217                  */
3218                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3219                         if (skspcl->orphaned) {
3220                                 pr_debug("%s:%s:%d orphaned %p\n",
3221                                          skdev->name, __func__, __LINE__,
3222                                          skspcl);
3223                                 skd_release_special(skdev, skspcl);
3224                         } else {
3225                                 pr_debug("%s:%s:%d not orphaned %p\n",
3226                                          skdev->name, __func__, __LINE__,
3227                                          skspcl);
3228                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3229                         }
3230                 }
3231         }
3232         skdev->skspcl_free_list = skdev->skspcl_table;
3233
3234         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3235                 skdev->timeout_slot[i] = 0;
3236
3237         skdev->in_flight = 0;
3238 }
3239
3240 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3241 {
3242         u32 mfd;
3243         u32 mtd;
3244         u32 data;
3245
3246         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3247
3248         pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3249                  skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3250
3251         /* ignore any mtd that is an ack for something we didn't send */
3252         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3253                 return;
3254
3255         switch (FIT_MXD_TYPE(mfd)) {
3256         case FIT_MTD_FITFW_INIT:
3257                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3258
3259                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3260                         pr_err("(%s): protocol mismatch\n",
3261                                skdev->name);
3262                         pr_err("(%s):   got=%d support=%d\n",
3263                                skdev->name, skdev->proto_ver,
3264                                FIT_PROTOCOL_VERSION_1);
3265                         pr_err("(%s):   please upgrade driver\n",
3266                                skdev->name);
3267                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3268                         skd_soft_reset(skdev);
3269                         break;
3270                 }
3271                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3272                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3273                 skdev->last_mtd = mtd;
3274                 break;
3275
3276         case FIT_MTD_GET_CMDQ_DEPTH:
3277                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3278                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3279                                    SKD_N_COMPLETION_ENTRY);
3280                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3281                 skdev->last_mtd = mtd;
3282                 break;
3283
3284         case FIT_MTD_SET_COMPQ_DEPTH:
3285                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3286                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3287                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3288                 skdev->last_mtd = mtd;
3289                 break;
3290
3291         case FIT_MTD_SET_COMPQ_ADDR:
3292                 skd_reset_skcomp(skdev);
3293                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3294                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3295                 skdev->last_mtd = mtd;
3296                 break;
3297
3298         case FIT_MTD_CMD_LOG_HOST_ID:
3299                 skdev->connect_time_stamp = get_seconds();
3300                 data = skdev->connect_time_stamp & 0xFFFF;
3301                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3302                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3303                 skdev->last_mtd = mtd;
3304                 break;
3305
3306         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3307                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3308                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3309                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3310                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3311                 skdev->last_mtd = mtd;
3312                 break;
3313
3314         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3315                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3316                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3317                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3318                 skdev->last_mtd = mtd;
3319
3320                 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3321                        skd_name(skdev),
3322                        skdev->connect_time_stamp, skdev->drive_jiffies);
3323                 break;
3324
3325         case FIT_MTD_ARM_QUEUE:
3326                 skdev->last_mtd = 0;
3327                 /*
3328                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3329                  */
3330                 break;
3331
3332         default:
3333                 break;
3334         }
3335 }
3336
3337 static void skd_disable_interrupts(struct skd_device *skdev)
3338 {
3339         u32 sense;
3340
3341         sense = SKD_READL(skdev, FIT_CONTROL);
3342         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3343         SKD_WRITEL(skdev, sense, FIT_CONTROL);
3344         pr_debug("%s:%s:%d sense 0x%x\n",
3345                  skdev->name, __func__, __LINE__, sense);
3346
3347         /* Note that the 1s is written. A 1-bit means
3348          * disable, a 0 means enable.
3349          */
3350         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3351 }
3352
3353 static void skd_enable_interrupts(struct skd_device *skdev)
3354 {
3355         u32 val;
3356
3357         /* unmask interrupts first */
3358         val = FIT_ISH_FW_STATE_CHANGE +
3359               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3360
3361         /* Note that the compliment of mask is written. A 1-bit means
3362          * disable, a 0 means enable. */
3363         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3364         pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3365                  skdev->name, __func__, __LINE__, ~val);
3366
3367         val = SKD_READL(skdev, FIT_CONTROL);
3368         val |= FIT_CR_ENABLE_INTERRUPTS;
3369         pr_debug("%s:%s:%d control=0x%x\n",
3370                  skdev->name, __func__, __LINE__, val);
3371         SKD_WRITEL(skdev, val, FIT_CONTROL);
3372 }
3373
3374 /*
3375  *****************************************************************************
3376  * START, STOP, RESTART, QUIESCE, UNQUIESCE
3377  *****************************************************************************
3378  */
3379
3380 static void skd_soft_reset(struct skd_device *skdev)
3381 {
3382         u32 val;
3383
3384         val = SKD_READL(skdev, FIT_CONTROL);
3385         val |= (FIT_CR_SOFT_RESET);
3386         pr_debug("%s:%s:%d control=0x%x\n",
3387                  skdev->name, __func__, __LINE__, val);
3388         SKD_WRITEL(skdev, val, FIT_CONTROL);
3389 }
3390
3391 static void skd_start_device(struct skd_device *skdev)
3392 {
3393         unsigned long flags;
3394         u32 sense;
3395         u32 state;
3396
3397         spin_lock_irqsave(&skdev->lock, flags);
3398
3399         /* ack all ghost interrupts */
3400         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3401
3402         sense = SKD_READL(skdev, FIT_STATUS);
3403
3404         pr_debug("%s:%s:%d initial status=0x%x\n",
3405                  skdev->name, __func__, __LINE__, sense);
3406
3407         state = sense & FIT_SR_DRIVE_STATE_MASK;
3408         skdev->drive_state = state;
3409         skdev->last_mtd = 0;
3410
3411         skdev->state = SKD_DRVR_STATE_STARTING;
3412         skdev->timer_countdown = SKD_STARTING_TIMO;
3413
3414         skd_enable_interrupts(skdev);
3415
3416         switch (skdev->drive_state) {
3417         case FIT_SR_DRIVE_OFFLINE:
3418                 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3419                 break;
3420
3421         case FIT_SR_DRIVE_FW_BOOTING:
3422                 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3423                          skdev->name, __func__, __LINE__, skdev->name);
3424                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3425                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3426                 break;
3427
3428         case FIT_SR_DRIVE_BUSY_SANITIZE:
3429                 pr_info("(%s): Start: BUSY_SANITIZE\n",
3430                        skd_name(skdev));
3431                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3432                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3433                 break;
3434
3435         case FIT_SR_DRIVE_BUSY_ERASE:
3436                 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3437                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3438                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3439                 break;
3440
3441         case FIT_SR_DRIVE_INIT:
3442         case FIT_SR_DRIVE_ONLINE:
3443                 skd_soft_reset(skdev);
3444                 break;
3445
3446         case FIT_SR_DRIVE_BUSY:
3447                 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3448                 skdev->state = SKD_DRVR_STATE_BUSY;
3449                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3450                 break;
3451
3452         case FIT_SR_DRIVE_SOFT_RESET:
3453                 pr_err("(%s) drive soft reset in prog\n",
3454                        skd_name(skdev));
3455                 break;
3456
3457         case FIT_SR_DRIVE_FAULT:
3458                 /* Fault state is bad...soft reset won't do it...
3459                  * Hard reset, maybe, but does it work on device?
3460                  * For now, just fault so the system doesn't hang.
3461                  */
3462                 skd_drive_fault(skdev);
3463                 /*start the queue so we can respond with error to requests */
3464                 pr_debug("%s:%s:%d starting %s queue\n",
3465                          skdev->name, __func__, __LINE__, skdev->name);
3466                 blk_start_queue(skdev->queue);
3467                 skdev->gendisk_on = -1;
3468                 wake_up_interruptible(&skdev->waitq);
3469                 break;
3470
3471         case 0xFF:
3472                 /* Most likely the device isn't there or isn't responding
3473                  * to the BAR1 addresses. */
3474                 skd_drive_disappeared(skdev);
3475                 /*start the queue so we can respond with error to requests */
3476                 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3477                          skdev->name, __func__, __LINE__, skdev->name);
3478                 blk_start_queue(skdev->queue);
3479                 skdev->gendisk_on = -1;
3480                 wake_up_interruptible(&skdev->waitq);
3481                 break;
3482
3483         default:
3484                 pr_err("(%s) Start: unknown state %x\n",
3485                        skd_name(skdev), skdev->drive_state);
3486                 break;
3487         }
3488
3489         state = SKD_READL(skdev, FIT_CONTROL);
3490         pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3491                  skdev->name, __func__, __LINE__, state);
3492
3493         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3494         pr_debug("%s:%s:%d Intr Status=0x%x\n",
3495                  skdev->name, __func__, __LINE__, state);
3496
3497         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3498         pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3499                  skdev->name, __func__, __LINE__, state);
3500
3501         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3502         pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3503                  skdev->name, __func__, __LINE__, state);
3504
3505         state = SKD_READL(skdev, FIT_HW_VERSION);
3506         pr_debug("%s:%s:%d HW version=0x%x\n",
3507                  skdev->name, __func__, __LINE__, state);
3508
3509         spin_unlock_irqrestore(&skdev->lock, flags);
3510 }
3511
3512 static void skd_stop_device(struct skd_device *skdev)
3513 {
3514         unsigned long flags;
3515         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3516         u32 dev_state;
3517         int i;
3518
3519         spin_lock_irqsave(&skdev->lock, flags);
3520
3521         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3522                 pr_err("(%s): skd_stop_device not online no sync\n",
3523                        skd_name(skdev));
3524                 goto stop_out;
3525         }
3526
3527         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3528                 pr_err("(%s): skd_stop_device no special\n",
3529                        skd_name(skdev));
3530                 goto stop_out;
3531         }
3532
3533         skdev->state = SKD_DRVR_STATE_SYNCING;
3534         skdev->sync_done = 0;
3535
3536         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3537
3538         spin_unlock_irqrestore(&skdev->lock, flags);
3539
3540         wait_event_interruptible_timeout(skdev->waitq,
3541                                          (skdev->sync_done), (10 * HZ));
3542
3543         spin_lock_irqsave(&skdev->lock, flags);
3544
3545         switch (skdev->sync_done) {
3546         case 0:
3547                 pr_err("(%s): skd_stop_device no sync\n",
3548                        skd_name(skdev));
3549                 break;
3550         case 1:
3551                 pr_err("(%s): skd_stop_device sync done\n",
3552                        skd_name(skdev));
3553                 break;
3554         default:
3555                 pr_err("(%s): skd_stop_device sync error\n",
3556                        skd_name(skdev));
3557         }
3558
3559 stop_out:
3560         skdev->state = SKD_DRVR_STATE_STOPPING;
3561         spin_unlock_irqrestore(&skdev->lock, flags);
3562
3563         skd_kill_timer(skdev);
3564
3565         spin_lock_irqsave(&skdev->lock, flags);
3566         skd_disable_interrupts(skdev);
3567
3568         /* ensure all ints on device are cleared */
3569         /* soft reset the device to unload with a clean slate */
3570         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3571         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3572
3573         spin_unlock_irqrestore(&skdev->lock, flags);
3574
3575         /* poll every 100ms, 1 second timeout */
3576         for (i = 0; i < 10; i++) {
3577                 dev_state =
3578                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3579                 if (dev_state == FIT_SR_DRIVE_INIT)
3580                         break;
3581                 set_current_state(TASK_INTERRUPTIBLE);
3582                 schedule_timeout(msecs_to_jiffies(100));
3583         }
3584
3585         if (dev_state != FIT_SR_DRIVE_INIT)
3586                 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3587                        skd_name(skdev), dev_state);
3588 }
3589
3590 /* assume spinlock is held */
3591 static void skd_restart_device(struct skd_device *skdev)
3592 {
3593         u32 state;
3594
3595         /* ack all ghost interrupts */
3596         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3597
3598         state = SKD_READL(skdev, FIT_STATUS);
3599
3600         pr_debug("%s:%s:%d drive status=0x%x\n",
3601                  skdev->name, __func__, __LINE__, state);
3602
3603         state &= FIT_SR_DRIVE_STATE_MASK;
3604         skdev->drive_state = state;
3605         skdev->last_mtd = 0;
3606
3607         skdev->state = SKD_DRVR_STATE_RESTARTING;
3608         skdev->timer_countdown = SKD_RESTARTING_TIMO;
3609
3610         skd_soft_reset(skdev);
3611 }
3612
3613 /* assume spinlock is held */
3614 static int skd_quiesce_dev(struct skd_device *skdev)
3615 {
3616         int rc = 0;
3617
3618         switch (skdev->state) {
3619         case SKD_DRVR_STATE_BUSY:
3620         case SKD_DRVR_STATE_BUSY_IMMINENT:
3621                 pr_debug("%s:%s:%d stopping %s queue\n",
3622                          skdev->name, __func__, __LINE__, skdev->name);
3623                 blk_stop_queue(skdev->queue);
3624                 break;
3625         case SKD_DRVR_STATE_ONLINE:
3626         case SKD_DRVR_STATE_STOPPING:
3627         case SKD_DRVR_STATE_SYNCING:
3628         case SKD_DRVR_STATE_PAUSING:
3629         case SKD_DRVR_STATE_PAUSED:
3630         case SKD_DRVR_STATE_STARTING:
3631         case SKD_DRVR_STATE_RESTARTING:
3632         case SKD_DRVR_STATE_RESUMING:
3633         default:
3634                 rc = -EINVAL;
3635                 pr_debug("%s:%s:%d state [%d] not implemented\n",
3636                          skdev->name, __func__, __LINE__, skdev->state);
3637         }
3638         return rc;
3639 }
3640
3641 /* assume spinlock is held */
3642 static int skd_unquiesce_dev(struct skd_device *skdev)
3643 {
3644         int prev_driver_state = skdev->state;
3645
3646         skd_log_skdev(skdev, "unquiesce");
3647         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3648                 pr_debug("%s:%s:%d **** device already ONLINE\n",
3649                          skdev->name, __func__, __LINE__);
3650                 return 0;
3651         }
3652         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3653                 /*
3654                  * If there has been an state change to other than
3655                  * ONLINE, we will rely on controller state change
3656                  * to come back online and restart the queue.
3657                  * The BUSY state means that driver is ready to
3658                  * continue normal processing but waiting for controller
3659                  * to become available.
3660                  */
3661                 skdev->state = SKD_DRVR_STATE_BUSY;
3662                 pr_debug("%s:%s:%d drive BUSY state\n",
3663                          skdev->name, __func__, __LINE__);
3664                 return 0;
3665         }
3666
3667         /*
3668          * Drive has just come online, driver is either in startup,
3669          * paused performing a task, or bust waiting for hardware.
3670          */
3671         switch (skdev->state) {
3672         case SKD_DRVR_STATE_PAUSED:
3673         case SKD_DRVR_STATE_BUSY:
3674         case SKD_DRVR_STATE_BUSY_IMMINENT:
3675         case SKD_DRVR_STATE_BUSY_ERASE:
3676         case SKD_DRVR_STATE_STARTING:
3677         case SKD_DRVR_STATE_RESTARTING:
3678         case SKD_DRVR_STATE_FAULT:
3679         case SKD_DRVR_STATE_IDLE:
3680         case SKD_DRVR_STATE_LOAD:
3681                 skdev->state = SKD_DRVR_STATE_ONLINE;
3682                 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3683                        skd_name(skdev),
3684                        skd_skdev_state_to_str(prev_driver_state),
3685                        prev_driver_state, skd_skdev_state_to_str(skdev->state),
3686                        skdev->state);
3687                 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3688                          skdev->name, __func__, __LINE__);
3689                 pr_debug("%s:%s:%d starting %s queue\n",
3690                          skdev->name, __func__, __LINE__, skdev->name);
3691                 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3692                 blk_start_queue(skdev->queue);
3693                 skdev->gendisk_on = 1;
3694                 wake_up_interruptible(&skdev->waitq);
3695                 break;
3696
3697         case SKD_DRVR_STATE_DISAPPEARED:
3698         default:
3699                 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3700                          skdev->name, __func__, __LINE__,
3701                          skdev->state);
3702                 return -EBUSY;
3703         }
3704         return 0;
3705 }
3706
3707 /*
3708  *****************************************************************************
3709  * PCIe MSI/MSI-X INTERRUPT HANDLERS
3710  *****************************************************************************
3711  */
3712
3713 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3714 {
3715         struct skd_device *skdev = skd_host_data;
3716         unsigned long flags;
3717
3718         spin_lock_irqsave(&skdev->lock, flags);
3719         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3720                  skdev->name, __func__, __LINE__,
3721                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3722         pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3723                irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3724         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3725         spin_unlock_irqrestore(&skdev->lock, flags);
3726         return IRQ_HANDLED;
3727 }
3728
3729 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3730 {
3731         struct skd_device *skdev = skd_host_data;
3732         unsigned long flags;
3733
3734         spin_lock_irqsave(&skdev->lock, flags);
3735         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3736                  skdev->name, __func__, __LINE__,
3737                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3738         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3739         skd_isr_fwstate(skdev);
3740         spin_unlock_irqrestore(&skdev->lock, flags);
3741         return IRQ_HANDLED;
3742 }
3743
3744 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3745 {
3746         struct skd_device *skdev = skd_host_data;
3747         unsigned long flags;
3748         int flush_enqueued = 0;
3749         int deferred;
3750
3751         spin_lock_irqsave(&skdev->lock, flags);
3752         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3753                  skdev->name, __func__, __LINE__,
3754                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3755         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3756         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3757                                                 &flush_enqueued);
3758         if (flush_enqueued)
3759                 skd_request_fn(skdev->queue);
3760
3761         if (deferred)
3762                 schedule_work(&skdev->completion_worker);
3763         else if (!flush_enqueued)
3764                 skd_request_fn(skdev->queue);
3765
3766         spin_unlock_irqrestore(&skdev->lock, flags);
3767
3768         return IRQ_HANDLED;
3769 }
3770
3771 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3772 {
3773         struct skd_device *skdev = skd_host_data;
3774         unsigned long flags;
3775
3776         spin_lock_irqsave(&skdev->lock, flags);
3777         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3778                  skdev->name, __func__, __LINE__,
3779                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3780         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3781         skd_isr_msg_from_dev(skdev);
3782         spin_unlock_irqrestore(&skdev->lock, flags);
3783         return IRQ_HANDLED;
3784 }
3785
3786 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3787 {
3788         struct skd_device *skdev = skd_host_data;
3789         unsigned long flags;
3790
3791         spin_lock_irqsave(&skdev->lock, flags);
3792         pr_debug("%s:%s:%d MSIX = 0x%x\n",
3793                  skdev->name, __func__, __LINE__,
3794                  SKD_READL(skdev, FIT_INT_STATUS_HOST));
3795         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3796         spin_unlock_irqrestore(&skdev->lock, flags);
3797         return IRQ_HANDLED;
3798 }
3799
3800 /*
3801  *****************************************************************************
3802  * PCIe MSI/MSI-X SETUP
3803  *****************************************************************************
3804  */
3805
3806 struct skd_msix_entry {
3807         char isr_name[30];
3808 };
3809
3810 struct skd_init_msix_entry {
3811         const char *name;
3812         irq_handler_t handler;
3813 };
3814
3815 #define SKD_MAX_MSIX_COUNT              13
3816 #define SKD_MIN_MSIX_COUNT              7
3817 #define SKD_BASE_MSIX_IRQ               4
3818
3819 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3820         { "(DMA 0)",        skd_reserved_isr },
3821         { "(DMA 1)",        skd_reserved_isr },
3822         { "(DMA 2)",        skd_reserved_isr },
3823         { "(DMA 3)",        skd_reserved_isr },
3824         { "(State Change)", skd_statec_isr   },
3825         { "(COMPL_Q)",      skd_comp_q       },
3826         { "(MSG)",          skd_msg_isr      },
3827         { "(Reserved)",     skd_reserved_isr },
3828         { "(Reserved)",     skd_reserved_isr },
3829         { "(Queue Full 0)", skd_qfull_isr    },
3830         { "(Queue Full 1)", skd_qfull_isr    },
3831         { "(Queue Full 2)", skd_qfull_isr    },
3832         { "(Queue Full 3)", skd_qfull_isr    },
3833 };
3834
3835 static int skd_acquire_msix(struct skd_device *skdev)
3836 {
3837         int i, rc;
3838         struct pci_dev *pdev = skdev->pdev;
3839
3840         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3841                         PCI_IRQ_MSIX);
3842         if (rc < 0) {
3843                 pr_err("(%s): failed to enable MSI-X %d\n",
3844                        skd_name(skdev), rc);
3845                 goto out;
3846         }
3847
3848         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3849                         sizeof(struct skd_msix_entry), GFP_KERNEL);
3850         if (!skdev->msix_entries) {
3851                 rc = -ENOMEM;
3852                 pr_err("(%s): msix table allocation error\n",
3853                        skd_name(skdev));
3854                 goto out;
3855         }
3856
3857         /* Enable MSI-X vectors for the base queue */
3858         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3859                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3860
3861                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3862                          "%s%d-msix %s", DRV_NAME, skdev->devno,
3863                          msix_entries[i].name);
3864
3865                 rc = devm_request_irq(&skdev->pdev->dev,
3866                                 pci_irq_vector(skdev->pdev, i),
3867                                 msix_entries[i].handler, 0,
3868                                 qentry->isr_name, skdev);
3869                 if (rc) {
3870                         pr_err("(%s): Unable to register(%d) MSI-X "
3871                                "handler %d: %s\n",
3872                                skd_name(skdev), rc, i, qentry->isr_name);
3873                         goto msix_out;
3874                 }
3875         }
3876
3877         pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3878                  skdev->name, __func__, __LINE__,
3879                  pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
3880         return 0;
3881
3882 msix_out:
3883         while (--i >= 0)
3884                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3885 out:
3886         kfree(skdev->msix_entries);
3887         skdev->msix_entries = NULL;
3888         return rc;
3889 }
3890
3891 static int skd_acquire_irq(struct skd_device *skdev)
3892 {
3893         struct pci_dev *pdev = skdev->pdev;
3894         unsigned int irq_flag = PCI_IRQ_LEGACY;
3895         int rc;
3896
3897         if (skd_isr_type == SKD_IRQ_MSIX) {
3898                 rc = skd_acquire_msix(skdev);
3899                 if (!rc)
3900                         return 0;
3901
3902                 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3903                        skd_name(skdev), rc);
3904         }
3905
3906         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3907                         skdev->devno);
3908
3909         if (skd_isr_type != SKD_IRQ_LEGACY)
3910                 irq_flag |= PCI_IRQ_MSI;
3911         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3912         if (rc < 0) {
3913                 pr_err("(%s): failed to allocate the MSI interrupt %d\n",
3914                         skd_name(skdev), rc);
3915                 return rc;
3916         }
3917
3918         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3919                         pdev->msi_enabled ? 0 : IRQF_SHARED,
3920                         skdev->isr_name, skdev);
3921         if (rc) {
3922                 pci_free_irq_vectors(pdev);
3923                 pr_err("(%s): failed to allocate interrupt %d\n",
3924                         skd_name(skdev), rc);
3925                 return rc;
3926         }
3927
3928         return 0;
3929 }
3930
3931 static void skd_release_irq(struct skd_device *skdev)
3932 {
3933         struct pci_dev *pdev = skdev->pdev;
3934
3935         if (skdev->msix_entries) {
3936                 int i;
3937
3938                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3939                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3940                                         skdev);
3941                 }
3942
3943                 kfree(skdev->msix_entries);
3944                 skdev->msix_entries = NULL;
3945         } else {
3946                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3947         }
3948
3949         pci_free_irq_vectors(pdev);
3950 }
3951
3952 /*
3953  *****************************************************************************
3954  * CONSTRUCT
3955  *****************************************************************************
3956  */
3957
3958 static int skd_cons_skcomp(struct skd_device *skdev)
3959 {
3960         int rc = 0;
3961         struct fit_completion_entry_v1 *skcomp;
3962         u32 nbytes;
3963
3964         nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3965         nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3966
3967         pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
3968                  skdev->name, __func__, __LINE__,
3969                  nbytes, SKD_N_COMPLETION_ENTRY);
3970
3971         skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
3972                                        &skdev->cq_dma_address);
3973
3974         if (skcomp == NULL) {
3975                 rc = -ENOMEM;
3976                 goto err_out;
3977         }
3978
3979         skdev->skcomp_table = skcomp;
3980         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3981                                                            sizeof(*skcomp) *
3982                                                            SKD_N_COMPLETION_ENTRY);
3983
3984 err_out:
3985         return rc;
3986 }
3987
3988 static int skd_cons_skmsg(struct skd_device *skdev)
3989 {
3990         int rc = 0;
3991         u32 i;
3992
3993         pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3994                  skdev->name, __func__, __LINE__,
3995                  sizeof(struct skd_fitmsg_context),
3996                  skdev->num_fitmsg_context,
3997                  sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
3998
3999         skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4000                                      *skdev->num_fitmsg_context, GFP_KERNEL);
4001         if (skdev->skmsg_table == NULL) {
4002                 rc = -ENOMEM;
4003                 goto err_out;
4004         }
4005
4006         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4007                 struct skd_fitmsg_context *skmsg;
4008
4009                 skmsg = &skdev->skmsg_table[i];
4010
4011                 skmsg->id = i + SKD_ID_FIT_MSG;
4012
4013                 skmsg->state = SKD_MSG_STATE_IDLE;
4014                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4015                                                       SKD_N_FITMSG_BYTES + 64,
4016                                                       &skmsg->mb_dma_address);
4017
4018                 if (skmsg->msg_buf == NULL) {
4019                         rc = -ENOMEM;
4020                         goto err_out;
4021                 }
4022
4023                 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4024                                       (~FIT_QCMD_BASE_ADDRESS_MASK));
4025                 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4026                 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4027                                        FIT_QCMD_BASE_ADDRESS_MASK);
4028                 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4029                 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4030                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4031
4032                 skmsg->next = &skmsg[1];
4033         }
4034
4035         /* Free list is in order starting with the 0th entry. */
4036         skdev->skmsg_table[i - 1].next = NULL;
4037         skdev->skmsg_free_list = skdev->skmsg_table;
4038
4039 err_out:
4040         return rc;
4041 }
4042
4043 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4044                                                   u32 n_sg,
4045                                                   dma_addr_t *ret_dma_addr)
4046 {
4047         struct fit_sg_descriptor *sg_list;
4048         u32 nbytes;
4049
4050         nbytes = sizeof(*sg_list) * n_sg;
4051
4052         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4053
4054         if (sg_list != NULL) {
4055                 uint64_t dma_address = *ret_dma_addr;
4056                 u32 i;
4057
4058                 memset(sg_list, 0, nbytes);
4059
4060                 for (i = 0; i < n_sg - 1; i++) {
4061                         uint64_t ndp_off;
4062                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4063
4064                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
4065                 }
4066                 sg_list[i].next_desc_ptr = 0LL;
4067         }
4068
4069         return sg_list;
4070 }
4071
4072 static int skd_cons_skreq(struct skd_device *skdev)
4073 {
4074         int rc = 0;
4075         u32 i;
4076
4077         pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4078                  skdev->name, __func__, __LINE__,
4079                  sizeof(struct skd_request_context),
4080                  skdev->num_req_context,
4081                  sizeof(struct skd_request_context) * skdev->num_req_context);
4082
4083         skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4084                                      * skdev->num_req_context, GFP_KERNEL);
4085         if (skdev->skreq_table == NULL) {
4086                 rc = -ENOMEM;
4087                 goto err_out;
4088         }
4089
4090         pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4091                  skdev->name, __func__, __LINE__,
4092                  skdev->sgs_per_request, sizeof(struct scatterlist),
4093                  skdev->sgs_per_request * sizeof(struct scatterlist));
4094
4095         for (i = 0; i < skdev->num_req_context; i++) {
4096                 struct skd_request_context *skreq;
4097
4098                 skreq = &skdev->skreq_table[i];
4099
4100                 skreq->id = i + SKD_ID_RW_REQUEST;
4101                 skreq->state = SKD_REQ_STATE_IDLE;
4102
4103                 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4104                                     skdev->sgs_per_request, GFP_KERNEL);
4105                 if (skreq->sg == NULL) {
4106                         rc = -ENOMEM;
4107                         goto err_out;
4108                 }
4109                 sg_init_table(skreq->sg, skdev->sgs_per_request);
4110
4111                 skreq->sksg_list = skd_cons_sg_list(skdev,
4112                                                     skdev->sgs_per_request,
4113                                                     &skreq->sksg_dma_address);
4114
4115                 if (skreq->sksg_list == NULL) {
4116                         rc = -ENOMEM;
4117                         goto err_out;
4118                 }
4119
4120                 skreq->next = &skreq[1];
4121         }
4122
4123         /* Free list is in order starting with the 0th entry. */
4124         skdev->skreq_table[i - 1].next = NULL;
4125         skdev->skreq_free_list = skdev->skreq_table;
4126
4127 err_out:
4128         return rc;
4129 }
4130
4131 static int skd_cons_skspcl(struct skd_device *skdev)
4132 {
4133         int rc = 0;
4134         u32 i, nbytes;
4135
4136         pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4137                  skdev->name, __func__, __LINE__,
4138                  sizeof(struct skd_special_context),
4139                  skdev->n_special,
4140                  sizeof(struct skd_special_context) * skdev->n_special);
4141
4142         skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4143                                       * skdev->n_special, GFP_KERNEL);
4144         if (skdev->skspcl_table == NULL) {
4145                 rc = -ENOMEM;
4146                 goto err_out;
4147         }
4148
4149         for (i = 0; i < skdev->n_special; i++) {
4150                 struct skd_special_context *skspcl;
4151
4152                 skspcl = &skdev->skspcl_table[i];
4153
4154                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4155                 skspcl->req.state = SKD_REQ_STATE_IDLE;
4156
4157                 skspcl->req.next = &skspcl[1].req;
4158
4159                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4160
4161                 skspcl->msg_buf =
4162                         pci_zalloc_consistent(skdev->pdev, nbytes,
4163                                               &skspcl->mb_dma_address);
4164                 if (skspcl->msg_buf == NULL) {
4165                         rc = -ENOMEM;
4166                         goto err_out;
4167                 }
4168
4169                 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4170                                          SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4171                 if (skspcl->req.sg == NULL) {
4172                         rc = -ENOMEM;
4173                         goto err_out;
4174                 }
4175
4176                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4177                                                          SKD_N_SG_PER_SPECIAL,
4178                                                          &skspcl->req.
4179                                                          sksg_dma_address);
4180                 if (skspcl->req.sksg_list == NULL) {
4181                         rc = -ENOMEM;
4182                         goto err_out;
4183                 }
4184         }
4185
4186         /* Free list is in order starting with the 0th entry. */
4187         skdev->skspcl_table[i - 1].req.next = NULL;
4188         skdev->skspcl_free_list = skdev->skspcl_table;
4189
4190         return rc;
4191
4192 err_out:
4193         return rc;
4194 }
4195
4196 static int skd_cons_sksb(struct skd_device *skdev)
4197 {
4198         int rc = 0;
4199         struct skd_special_context *skspcl;
4200         u32 nbytes;
4201
4202         skspcl = &skdev->internal_skspcl;
4203
4204         skspcl->req.id = 0 + SKD_ID_INTERNAL;
4205         skspcl->req.state = SKD_REQ_STATE_IDLE;
4206
4207         nbytes = SKD_N_INTERNAL_BYTES;
4208
4209         skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4210                                                  &skspcl->db_dma_address);
4211         if (skspcl->data_buf == NULL) {
4212                 rc = -ENOMEM;
4213                 goto err_out;
4214         }
4215
4216         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4217         skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4218                                                 &skspcl->mb_dma_address);
4219         if (skspcl->msg_buf == NULL) {
4220                 rc = -ENOMEM;
4221                 goto err_out;
4222         }
4223
4224         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4225                                                  &skspcl->req.sksg_dma_address);
4226         if (skspcl->req.sksg_list == NULL) {
4227                 rc = -ENOMEM;
4228                 goto err_out;
4229         }
4230
4231         if (!skd_format_internal_skspcl(skdev)) {
4232                 rc = -EINVAL;
4233                 goto err_out;
4234         }
4235
4236 err_out:
4237         return rc;
4238 }
4239
4240 static int skd_cons_disk(struct skd_device *skdev)
4241 {
4242         int rc = 0;
4243         struct gendisk *disk;
4244         struct request_queue *q;
4245         unsigned long flags;
4246
4247         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4248         if (!disk) {
4249                 rc = -ENOMEM;
4250                 goto err_out;
4251         }
4252
4253         skdev->disk = disk;
4254         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4255
4256         disk->major = skdev->major;
4257         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4258         disk->fops = &skd_blockdev_ops;
4259         disk->private_data = skdev;
4260
4261         q = blk_init_queue(skd_request_fn, &skdev->lock);
4262         if (!q) {
4263                 rc = -ENOMEM;
4264                 goto err_out;
4265         }
4266         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
4267
4268         skdev->queue = q;
4269         disk->queue = q;
4270         q->queuedata = skdev;
4271
4272         blk_queue_write_cache(q, true, true);
4273         blk_queue_max_segments(q, skdev->sgs_per_request);
4274         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4275
4276         /* set optimal I/O size to 8KB */
4277         blk_queue_io_opt(q, 8192);
4278
4279         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4280         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4281
4282         spin_lock_irqsave(&skdev->lock, flags);
4283         pr_debug("%s:%s:%d stopping %s queue\n",
4284                  skdev->name, __func__, __LINE__, skdev->name);
4285         blk_stop_queue(skdev->queue);
4286         spin_unlock_irqrestore(&skdev->lock, flags);
4287
4288 err_out:
4289         return rc;
4290 }
4291
4292 #define SKD_N_DEV_TABLE         16u
4293 static u32 skd_next_devno;
4294
4295 static struct skd_device *skd_construct(struct pci_dev *pdev)
4296 {
4297         struct skd_device *skdev;
4298         int blk_major = skd_major;
4299         int rc;
4300
4301         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4302
4303         if (!skdev) {
4304                 pr_err(PFX "(%s): memory alloc failure\n",
4305                        pci_name(pdev));
4306                 return NULL;
4307         }
4308
4309         skdev->state = SKD_DRVR_STATE_LOAD;
4310         skdev->pdev = pdev;
4311         skdev->devno = skd_next_devno++;
4312         skdev->major = blk_major;
4313         sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4314         skdev->dev_max_queue_depth = 0;
4315
4316         skdev->num_req_context = skd_max_queue_depth;
4317         skdev->num_fitmsg_context = skd_max_queue_depth;
4318         skdev->n_special = skd_max_pass_thru;
4319         skdev->cur_max_queue_depth = 1;
4320         skdev->queue_low_water_mark = 1;
4321         skdev->proto_ver = 99;
4322         skdev->sgs_per_request = skd_sgs_per_request;
4323         skdev->dbg_level = skd_dbg_level;
4324
4325         atomic_set(&skdev->device_count, 0);
4326
4327         spin_lock_init(&skdev->lock);
4328
4329         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4330
4331         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4332         rc = skd_cons_skcomp(skdev);
4333         if (rc < 0)
4334                 goto err_out;
4335
4336         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4337         rc = skd_cons_skmsg(skdev);
4338         if (rc < 0)
4339                 goto err_out;
4340
4341         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4342         rc = skd_cons_skreq(skdev);
4343         if (rc < 0)
4344                 goto err_out;
4345
4346         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4347         rc = skd_cons_skspcl(skdev);
4348         if (rc < 0)
4349                 goto err_out;
4350
4351         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4352         rc = skd_cons_sksb(skdev);
4353         if (rc < 0)
4354                 goto err_out;
4355
4356         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4357         rc = skd_cons_disk(skdev);
4358         if (rc < 0)
4359                 goto err_out;
4360
4361         pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4362         return skdev;
4363
4364 err_out:
4365         pr_debug("%s:%s:%d construct failed\n",
4366                  skdev->name, __func__, __LINE__);
4367         skd_destruct(skdev);
4368         return NULL;
4369 }
4370
4371 /*
4372  *****************************************************************************
4373  * DESTRUCT (FREE)
4374  *****************************************************************************
4375  */
4376
4377 static void skd_free_skcomp(struct skd_device *skdev)
4378 {
4379         if (skdev->skcomp_table != NULL) {
4380                 u32 nbytes;
4381
4382                 nbytes = sizeof(skdev->skcomp_table[0]) *
4383                          SKD_N_COMPLETION_ENTRY;
4384                 pci_free_consistent(skdev->pdev, nbytes,
4385                                     skdev->skcomp_table, skdev->cq_dma_address);
4386         }
4387
4388         skdev->skcomp_table = NULL;
4389         skdev->cq_dma_address = 0;
4390 }
4391
4392 static void skd_free_skmsg(struct skd_device *skdev)
4393 {
4394         u32 i;
4395
4396         if (skdev->skmsg_table == NULL)
4397                 return;
4398
4399         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4400                 struct skd_fitmsg_context *skmsg;
4401
4402                 skmsg = &skdev->skmsg_table[i];
4403
4404                 if (skmsg->msg_buf != NULL) {
4405                         skmsg->msg_buf += skmsg->offset;
4406                         skmsg->mb_dma_address += skmsg->offset;
4407                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4408                                             skmsg->msg_buf,
4409                                             skmsg->mb_dma_address);
4410                 }
4411                 skmsg->msg_buf = NULL;
4412                 skmsg->mb_dma_address = 0;
4413         }
4414
4415         kfree(skdev->skmsg_table);
4416         skdev->skmsg_table = NULL;
4417 }
4418
4419 static void skd_free_sg_list(struct skd_device *skdev,
4420                              struct fit_sg_descriptor *sg_list,
4421                              u32 n_sg, dma_addr_t dma_addr)
4422 {
4423         if (sg_list != NULL) {
4424                 u32 nbytes;
4425
4426                 nbytes = sizeof(*sg_list) * n_sg;
4427
4428                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4429         }
4430 }
4431
4432 static void skd_free_skreq(struct skd_device *skdev)
4433 {
4434         u32 i;
4435
4436         if (skdev->skreq_table == NULL)
4437                 return;
4438
4439         for (i = 0; i < skdev->num_req_context; i++) {
4440                 struct skd_request_context *skreq;
4441
4442                 skreq = &skdev->skreq_table[i];
4443
4444                 skd_free_sg_list(skdev, skreq->sksg_list,
4445                                  skdev->sgs_per_request,
4446                                  skreq->sksg_dma_address);
4447
4448                 skreq->sksg_list = NULL;
4449                 skreq->sksg_dma_address = 0;
4450
4451                 kfree(skreq->sg);
4452         }
4453
4454         kfree(skdev->skreq_table);
4455         skdev->skreq_table = NULL;
4456 }
4457
4458 static void skd_free_skspcl(struct skd_device *skdev)
4459 {
4460         u32 i;
4461         u32 nbytes;
4462
4463         if (skdev->skspcl_table == NULL)
4464                 return;
4465
4466         for (i = 0; i < skdev->n_special; i++) {
4467                 struct skd_special_context *skspcl;
4468
4469                 skspcl = &skdev->skspcl_table[i];
4470
4471                 if (skspcl->msg_buf != NULL) {
4472                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4473                         pci_free_consistent(skdev->pdev, nbytes,
4474                                             skspcl->msg_buf,
4475                                             skspcl->mb_dma_address);
4476                 }
4477
4478                 skspcl->msg_buf = NULL;
4479                 skspcl->mb_dma_address = 0;
4480
4481                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4482                                  SKD_N_SG_PER_SPECIAL,
4483                                  skspcl->req.sksg_dma_address);
4484
4485                 skspcl->req.sksg_list = NULL;
4486                 skspcl->req.sksg_dma_address = 0;
4487
4488                 kfree(skspcl->req.sg);
4489         }
4490
4491         kfree(skdev->skspcl_table);
4492         skdev->skspcl_table = NULL;
4493 }
4494
4495 static void skd_free_sksb(struct skd_device *skdev)
4496 {
4497         struct skd_special_context *skspcl;
4498         u32 nbytes;
4499
4500         skspcl = &skdev->internal_skspcl;
4501
4502         if (skspcl->data_buf != NULL) {
4503                 nbytes = SKD_N_INTERNAL_BYTES;
4504
4505                 pci_free_consistent(skdev->pdev, nbytes,
4506                                     skspcl->data_buf, skspcl->db_dma_address);
4507         }
4508
4509         skspcl->data_buf = NULL;
4510         skspcl->db_dma_address = 0;
4511
4512         if (skspcl->msg_buf != NULL) {
4513                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4514                 pci_free_consistent(skdev->pdev, nbytes,
4515                                     skspcl->msg_buf, skspcl->mb_dma_address);
4516         }
4517
4518         skspcl->msg_buf = NULL;
4519         skspcl->mb_dma_address = 0;
4520
4521         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4522                          skspcl->req.sksg_dma_address);
4523
4524         skspcl->req.sksg_list = NULL;
4525         skspcl->req.sksg_dma_address = 0;
4526 }
4527
4528 static void skd_free_disk(struct skd_device *skdev)
4529 {
4530         struct gendisk *disk = skdev->disk;
4531
4532         if (disk && (disk->flags & GENHD_FL_UP))
4533                 del_gendisk(disk);
4534
4535         if (skdev->queue) {
4536                 blk_cleanup_queue(skdev->queue);
4537                 skdev->queue = NULL;
4538                 disk->queue = NULL;
4539         }
4540
4541         put_disk(disk);
4542         skdev->disk = NULL;
4543 }
4544
4545 static void skd_destruct(struct skd_device *skdev)
4546 {
4547         if (skdev == NULL)
4548                 return;
4549
4550         pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4551         skd_free_disk(skdev);
4552
4553         pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4554         skd_free_sksb(skdev);
4555
4556         pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4557         skd_free_skspcl(skdev);
4558
4559         pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4560         skd_free_skreq(skdev);
4561
4562         pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4563         skd_free_skmsg(skdev);
4564
4565         pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4566         skd_free_skcomp(skdev);
4567
4568         pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4569         kfree(skdev);
4570 }
4571
4572 /*
4573  *****************************************************************************
4574  * BLOCK DEVICE (BDEV) GLUE
4575  *****************************************************************************
4576  */
4577
4578 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4579 {
4580         struct skd_device *skdev;
4581         u64 capacity;
4582
4583         skdev = bdev->bd_disk->private_data;
4584
4585         pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4586                  skdev->name, __func__, __LINE__,
4587                  bdev->bd_disk->disk_name, current->comm);
4588
4589         if (skdev->read_cap_is_valid) {
4590                 capacity = get_capacity(skdev->disk);
4591                 geo->heads = 64;
4592                 geo->sectors = 255;
4593                 geo->cylinders = (capacity) / (255 * 64);
4594
4595                 return 0;
4596         }
4597         return -EIO;
4598 }
4599
4600 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4601 {
4602         pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4603         device_add_disk(parent, skdev->disk);
4604         return 0;
4605 }
4606
4607 static const struct block_device_operations skd_blockdev_ops = {
4608         .owner          = THIS_MODULE,
4609         .ioctl          = skd_bdev_ioctl,
4610         .getgeo         = skd_bdev_getgeo,
4611 };
4612
4613 /*
4614  *****************************************************************************
4615  * PCIe DRIVER GLUE
4616  *****************************************************************************
4617  */
4618
4619 static const struct pci_device_id skd_pci_tbl[] = {
4620         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4621           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4622         { 0 }                     /* terminate list */
4623 };
4624
4625 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4626
4627 static char *skd_pci_info(struct skd_device *skdev, char *str)
4628 {
4629         int pcie_reg;
4630
4631         strcpy(str, "PCIe (");
4632         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4633
4634         if (pcie_reg) {
4635
4636                 char lwstr[6];
4637                 uint16_t pcie_lstat, lspeed, lwidth;
4638
4639                 pcie_reg += 0x12;
4640                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4641                 lspeed = pcie_lstat & (0xF);
4642                 lwidth = (pcie_lstat & 0x3F0) >> 4;
4643
4644                 if (lspeed == 1)
4645                         strcat(str, "2.5GT/s ");
4646                 else if (lspeed == 2)
4647                         strcat(str, "5.0GT/s ");
4648                 else
4649                         strcat(str, "<unknown> ");
4650                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4651                 strcat(str, lwstr);
4652         }
4653         return str;
4654 }
4655
4656 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4657 {
4658         int i;
4659         int rc = 0;
4660         char pci_str[32];
4661         struct skd_device *skdev;
4662
4663         pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4664                DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4665         pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4666                pci_name(pdev), pdev->vendor, pdev->device);
4667
4668         rc = pci_enable_device(pdev);
4669         if (rc)
4670                 return rc;
4671         rc = pci_request_regions(pdev, DRV_NAME);
4672         if (rc)
4673                 goto err_out;
4674         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4675         if (!rc) {
4676                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4677
4678                         pr_err("(%s): consistent DMA mask error %d\n",
4679                                pci_name(pdev), rc);
4680                 }
4681         } else {
4682                 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4683                 if (rc) {
4684
4685                         pr_err("(%s): DMA mask error %d\n",
4686                                pci_name(pdev), rc);
4687                         goto err_out_regions;
4688                 }
4689         }
4690
4691         if (!skd_major) {
4692                 rc = register_blkdev(0, DRV_NAME);
4693                 if (rc < 0)
4694                         goto err_out_regions;
4695                 BUG_ON(!rc);
4696                 skd_major = rc;
4697         }
4698
4699         skdev = skd_construct(pdev);
4700         if (skdev == NULL) {
4701                 rc = -ENOMEM;
4702                 goto err_out_regions;
4703         }
4704
4705         skd_pci_info(skdev, pci_str);
4706         pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4707
4708         pci_set_master(pdev);
4709         rc = pci_enable_pcie_error_reporting(pdev);
4710         if (rc) {
4711                 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4712                        skd_name(skdev), rc);
4713                 skdev->pcie_error_reporting_is_enabled = 0;
4714         } else
4715                 skdev->pcie_error_reporting_is_enabled = 1;
4716
4717         pci_set_drvdata(pdev, skdev);
4718
4719         for (i = 0; i < SKD_MAX_BARS; i++) {
4720                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4721                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4722                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4723                                             skdev->mem_size[i]);
4724                 if (!skdev->mem_map[i]) {
4725                         pr_err("(%s): Unable to map adapter memory!\n",
4726                                skd_name(skdev));
4727                         rc = -ENODEV;
4728                         goto err_out_iounmap;
4729                 }
4730                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4731                          skdev->name, __func__, __LINE__,
4732                          skdev->mem_map[i],
4733                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4734         }
4735
4736         rc = skd_acquire_irq(skdev);
4737         if (rc) {
4738                 pr_err("(%s): interrupt resource error %d\n",
4739                        skd_name(skdev), rc);
4740                 goto err_out_iounmap;
4741         }
4742
4743         rc = skd_start_timer(skdev);
4744         if (rc)
4745                 goto err_out_timer;
4746
4747         init_waitqueue_head(&skdev->waitq);
4748
4749         skd_start_device(skdev);
4750
4751         rc = wait_event_interruptible_timeout(skdev->waitq,
4752                                               (skdev->gendisk_on),
4753                                               (SKD_START_WAIT_SECONDS * HZ));
4754         if (skdev->gendisk_on > 0) {
4755                 /* device came on-line after reset */
4756                 skd_bdev_attach(&pdev->dev, skdev);
4757                 rc = 0;
4758         } else {
4759                 /* we timed out, something is wrong with the device,
4760                    don't add the disk structure */
4761                 pr_err("(%s): error: waiting for s1120 timed out %d!\n",
4762                        skd_name(skdev), rc);
4763                 /* in case of no error; we timeout with ENXIO */
4764                 if (!rc)
4765                         rc = -ENXIO;
4766                 goto err_out_timer;
4767         }
4768
4769         return rc;
4770
4771 err_out_timer:
4772         skd_stop_device(skdev);
4773         skd_release_irq(skdev);
4774
4775 err_out_iounmap:
4776         for (i = 0; i < SKD_MAX_BARS; i++)
4777                 if (skdev->mem_map[i])
4778                         iounmap(skdev->mem_map[i]);
4779
4780         if (skdev->pcie_error_reporting_is_enabled)
4781                 pci_disable_pcie_error_reporting(pdev);
4782
4783         skd_destruct(skdev);
4784
4785 err_out_regions:
4786         pci_release_regions(pdev);
4787
4788 err_out:
4789         pci_disable_device(pdev);
4790         pci_set_drvdata(pdev, NULL);
4791         return rc;
4792 }
4793
4794 static void skd_pci_remove(struct pci_dev *pdev)
4795 {
4796         int i;
4797         struct skd_device *skdev;
4798
4799         skdev = pci_get_drvdata(pdev);
4800         if (!skdev) {
4801                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4802                 return;
4803         }
4804         skd_stop_device(skdev);
4805         skd_release_irq(skdev);
4806
4807         for (i = 0; i < SKD_MAX_BARS; i++)
4808                 if (skdev->mem_map[i])
4809                         iounmap((u32 *)skdev->mem_map[i]);
4810
4811         if (skdev->pcie_error_reporting_is_enabled)
4812                 pci_disable_pcie_error_reporting(pdev);
4813
4814         skd_destruct(skdev);
4815
4816         pci_release_regions(pdev);
4817         pci_disable_device(pdev);
4818         pci_set_drvdata(pdev, NULL);
4819
4820         return;
4821 }
4822
4823 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4824 {
4825         int i;
4826         struct skd_device *skdev;
4827
4828         skdev = pci_get_drvdata(pdev);
4829         if (!skdev) {
4830                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4831                 return -EIO;
4832         }
4833
4834         skd_stop_device(skdev);
4835
4836         skd_release_irq(skdev);
4837
4838         for (i = 0; i < SKD_MAX_BARS; i++)
4839                 if (skdev->mem_map[i])
4840                         iounmap((u32 *)skdev->mem_map[i]);
4841
4842         if (skdev->pcie_error_reporting_is_enabled)
4843                 pci_disable_pcie_error_reporting(pdev);
4844
4845         pci_release_regions(pdev);
4846         pci_save_state(pdev);
4847         pci_disable_device(pdev);
4848         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4849         return 0;
4850 }
4851
4852 static int skd_pci_resume(struct pci_dev *pdev)
4853 {
4854         int i;
4855         int rc = 0;
4856         struct skd_device *skdev;
4857
4858         skdev = pci_get_drvdata(pdev);
4859         if (!skdev) {
4860                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4861                 return -1;
4862         }
4863
4864         pci_set_power_state(pdev, PCI_D0);
4865         pci_enable_wake(pdev, PCI_D0, 0);
4866         pci_restore_state(pdev);
4867
4868         rc = pci_enable_device(pdev);
4869         if (rc)
4870                 return rc;
4871         rc = pci_request_regions(pdev, DRV_NAME);
4872         if (rc)
4873                 goto err_out;
4874         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4875         if (!rc) {
4876                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4877
4878                         pr_err("(%s): consistent DMA mask error %d\n",
4879                                pci_name(pdev), rc);
4880                 }
4881         } else {
4882                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4883                 if (rc) {
4884
4885                         pr_err("(%s): DMA mask error %d\n",
4886                                pci_name(pdev), rc);
4887                         goto err_out_regions;
4888                 }
4889         }
4890
4891         pci_set_master(pdev);
4892         rc = pci_enable_pcie_error_reporting(pdev);
4893         if (rc) {
4894                 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4895                        skdev->name, rc);
4896                 skdev->pcie_error_reporting_is_enabled = 0;
4897         } else
4898                 skdev->pcie_error_reporting_is_enabled = 1;
4899
4900         for (i = 0; i < SKD_MAX_BARS; i++) {
4901
4902                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4903                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4904                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4905                                             skdev->mem_size[i]);
4906                 if (!skdev->mem_map[i]) {
4907                         pr_err("(%s): Unable to map adapter memory!\n",
4908                                skd_name(skdev));
4909                         rc = -ENODEV;
4910                         goto err_out_iounmap;
4911                 }
4912                 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4913                          skdev->name, __func__, __LINE__,
4914                          skdev->mem_map[i],
4915                          (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4916         }
4917         rc = skd_acquire_irq(skdev);
4918         if (rc) {
4919
4920                 pr_err("(%s): interrupt resource error %d\n",
4921                        pci_name(pdev), rc);
4922                 goto err_out_iounmap;
4923         }
4924
4925         rc = skd_start_timer(skdev);
4926         if (rc)
4927                 goto err_out_timer;
4928
4929         init_waitqueue_head(&skdev->waitq);
4930
4931         skd_start_device(skdev);
4932
4933         return rc;
4934
4935 err_out_timer:
4936         skd_stop_device(skdev);
4937         skd_release_irq(skdev);
4938
4939 err_out_iounmap:
4940         for (i = 0; i < SKD_MAX_BARS; i++)
4941                 if (skdev->mem_map[i])
4942                         iounmap(skdev->mem_map[i]);
4943
4944         if (skdev->pcie_error_reporting_is_enabled)
4945                 pci_disable_pcie_error_reporting(pdev);
4946
4947 err_out_regions:
4948         pci_release_regions(pdev);
4949
4950 err_out:
4951         pci_disable_device(pdev);
4952         return rc;
4953 }
4954
4955 static void skd_pci_shutdown(struct pci_dev *pdev)
4956 {
4957         struct skd_device *skdev;
4958
4959         pr_err("skd_pci_shutdown called\n");
4960
4961         skdev = pci_get_drvdata(pdev);
4962         if (!skdev) {
4963                 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4964                 return;
4965         }
4966
4967         pr_err("%s: calling stop\n", skd_name(skdev));
4968         skd_stop_device(skdev);
4969 }
4970
4971 static struct pci_driver skd_driver = {
4972         .name           = DRV_NAME,
4973         .id_table       = skd_pci_tbl,
4974         .probe          = skd_pci_probe,
4975         .remove         = skd_pci_remove,
4976         .suspend        = skd_pci_suspend,
4977         .resume         = skd_pci_resume,
4978         .shutdown       = skd_pci_shutdown,
4979 };
4980
4981 /*
4982  *****************************************************************************
4983  * LOGGING SUPPORT
4984  *****************************************************************************
4985  */
4986
4987 static const char *skd_name(struct skd_device *skdev)
4988 {
4989         memset(skdev->id_str, 0, sizeof(skdev->id_str));
4990
4991         if (skdev->inquiry_is_valid)
4992                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
4993                          skdev->name, skdev->inq_serial_num,
4994                          pci_name(skdev->pdev));
4995         else
4996                 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
4997                          skdev->name, pci_name(skdev->pdev));
4998
4999         return skdev->id_str;
5000 }
5001
5002 const char *skd_drive_state_to_str(int state)
5003 {
5004         switch (state) {
5005         case FIT_SR_DRIVE_OFFLINE:
5006                 return "OFFLINE";
5007         case FIT_SR_DRIVE_INIT:
5008                 return "INIT";
5009         case FIT_SR_DRIVE_ONLINE:
5010                 return "ONLINE";
5011         case FIT_SR_DRIVE_BUSY:
5012                 return "BUSY";
5013         case FIT_SR_DRIVE_FAULT:
5014                 return "FAULT";
5015         case FIT_SR_DRIVE_DEGRADED:
5016                 return "DEGRADED";
5017         case FIT_SR_PCIE_LINK_DOWN:
5018                 return "INK_DOWN";
5019         case FIT_SR_DRIVE_SOFT_RESET:
5020                 return "SOFT_RESET";
5021         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5022                 return "NEED_FW";
5023         case FIT_SR_DRIVE_INIT_FAULT:
5024                 return "INIT_FAULT";
5025         case FIT_SR_DRIVE_BUSY_SANITIZE:
5026                 return "BUSY_SANITIZE";
5027         case FIT_SR_DRIVE_BUSY_ERASE:
5028                 return "BUSY_ERASE";
5029         case FIT_SR_DRIVE_FW_BOOTING:
5030                 return "FW_BOOTING";
5031         default:
5032                 return "???";
5033         }
5034 }
5035
5036 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5037 {
5038         switch (state) {
5039         case SKD_DRVR_STATE_LOAD:
5040                 return "LOAD";
5041         case SKD_DRVR_STATE_IDLE:
5042                 return "IDLE";
5043         case SKD_DRVR_STATE_BUSY:
5044                 return "BUSY";
5045         case SKD_DRVR_STATE_STARTING:
5046                 return "STARTING";
5047         case SKD_DRVR_STATE_ONLINE:
5048                 return "ONLINE";
5049         case SKD_DRVR_STATE_PAUSING:
5050                 return "PAUSING";
5051         case SKD_DRVR_STATE_PAUSED:
5052                 return "PAUSED";
5053         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5054                 return "DRAINING_TIMEOUT";
5055         case SKD_DRVR_STATE_RESTARTING:
5056                 return "RESTARTING";
5057         case SKD_DRVR_STATE_RESUMING:
5058                 return "RESUMING";
5059         case SKD_DRVR_STATE_STOPPING:
5060                 return "STOPPING";
5061         case SKD_DRVR_STATE_SYNCING:
5062                 return "SYNCING";
5063         case SKD_DRVR_STATE_FAULT:
5064                 return "FAULT";
5065         case SKD_DRVR_STATE_DISAPPEARED:
5066                 return "DISAPPEARED";
5067         case SKD_DRVR_STATE_BUSY_ERASE:
5068                 return "BUSY_ERASE";
5069         case SKD_DRVR_STATE_BUSY_SANITIZE:
5070                 return "BUSY_SANITIZE";
5071         case SKD_DRVR_STATE_BUSY_IMMINENT:
5072                 return "BUSY_IMMINENT";
5073         case SKD_DRVR_STATE_WAIT_BOOT:
5074                 return "WAIT_BOOT";
5075
5076         default:
5077                 return "???";
5078         }
5079 }
5080
5081 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5082 {
5083         switch (state) {
5084         case SKD_MSG_STATE_IDLE:
5085                 return "IDLE";
5086         case SKD_MSG_STATE_BUSY:
5087                 return "BUSY";
5088         default:
5089                 return "???";
5090         }
5091 }
5092
5093 static const char *skd_skreq_state_to_str(enum skd_req_state state)
5094 {
5095         switch (state) {
5096         case SKD_REQ_STATE_IDLE:
5097                 return "IDLE";
5098         case SKD_REQ_STATE_SETUP:
5099                 return "SETUP";
5100         case SKD_REQ_STATE_BUSY:
5101                 return "BUSY";
5102         case SKD_REQ_STATE_COMPLETED:
5103                 return "COMPLETED";
5104         case SKD_REQ_STATE_TIMEOUT:
5105                 return "TIMEOUT";
5106         case SKD_REQ_STATE_ABORTED:
5107                 return "ABORTED";
5108         default:
5109                 return "???";
5110         }
5111 }
5112
5113 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5114 {
5115         pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5116                  skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5117         pr_debug("%s:%s:%d   drive_state=%s(%d) driver_state=%s(%d)\n",
5118                  skdev->name, __func__, __LINE__,
5119                  skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5120                  skd_skdev_state_to_str(skdev->state), skdev->state);
5121         pr_debug("%s:%s:%d   busy=%d limit=%d dev=%d lowat=%d\n",
5122                  skdev->name, __func__, __LINE__,
5123                  skdev->in_flight, skdev->cur_max_queue_depth,
5124                  skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5125         pr_debug("%s:%s:%d   timestamp=0x%x cycle=%d cycle_ix=%d\n",
5126                  skdev->name, __func__, __LINE__,
5127                  skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5128 }
5129
5130 static void skd_log_skmsg(struct skd_device *skdev,
5131                           struct skd_fitmsg_context *skmsg, const char *event)
5132 {
5133         pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5134                  skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5135         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x length=%d\n",
5136                  skdev->name, __func__, __LINE__,
5137                  skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5138                  skmsg->id, skmsg->length);
5139 }
5140
5141 static void skd_log_skreq(struct skd_device *skdev,
5142                           struct skd_request_context *skreq, const char *event)
5143 {
5144         pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5145                  skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5146         pr_debug("%s:%s:%d   state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5147                  skdev->name, __func__, __LINE__,
5148                  skd_skreq_state_to_str(skreq->state), skreq->state,
5149                  skreq->id, skreq->fitmsg_id);
5150         pr_debug("%s:%s:%d   timo=0x%x sg_dir=%d n_sg=%d\n",
5151                  skdev->name, __func__, __LINE__,
5152                  skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5153
5154         if (skreq->req != NULL) {
5155                 struct request *req = skreq->req;
5156                 u32 lba = (u32)blk_rq_pos(req);
5157                 u32 count = blk_rq_sectors(req);
5158
5159                 pr_debug("%s:%s:%d "
5160                          "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5161                          skdev->name, __func__, __LINE__,
5162                          req, lba, lba, count, count,
5163                          (int)rq_data_dir(req));
5164         } else
5165                 pr_debug("%s:%s:%d req=NULL\n",
5166                          skdev->name, __func__, __LINE__);
5167 }
5168
5169 /*
5170  *****************************************************************************
5171  * MODULE GLUE
5172  *****************************************************************************
5173  */
5174
5175 static int __init skd_init(void)
5176 {
5177         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5178
5179         switch (skd_isr_type) {
5180         case SKD_IRQ_LEGACY:
5181         case SKD_IRQ_MSI:
5182         case SKD_IRQ_MSIX:
5183                 break;
5184         default:
5185                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5186                        skd_isr_type, SKD_IRQ_DEFAULT);
5187                 skd_isr_type = SKD_IRQ_DEFAULT;
5188         }
5189
5190         if (skd_max_queue_depth < 1 ||
5191             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5192                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5193                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5194                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5195         }
5196
5197         if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5198                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5199                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5200                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5201         }
5202
5203         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5204                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5205                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5206                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5207         }
5208
5209         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5210                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5211                        skd_dbg_level, 0);
5212                 skd_dbg_level = 0;
5213         }
5214
5215         if (skd_isr_comp_limit < 0) {
5216                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5217                        skd_isr_comp_limit, 0);
5218                 skd_isr_comp_limit = 0;
5219         }
5220
5221         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5222                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5223                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5224                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5225         }
5226
5227         return pci_register_driver(&skd_driver);
5228 }
5229
5230 static void __exit skd_exit(void)
5231 {
5232         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5233
5234         pci_unregister_driver(&skd_driver);
5235
5236         if (skd_major)
5237                 unregister_blkdev(skd_major, DRV_NAME);
5238 }
5239
5240 module_init(skd_init);
5241 module_exit(skd_exit);