]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/block/skd_main.c
skd: Fix size argument in skd_free_skcomp()
[linux.git] / drivers / block / skd_main.c
1 /*
2  * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3  * was acquired by Western Digital in 2012.
4  *
5  * Copyright 2012 sTec, Inc.
6  * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7  *
8  * This file is part of the Linux kernel, and is made available under
9  * the terms of the GNU General Public License version 2.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/compiler.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/time.h>
25 #include <linux/hdreg.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/completion.h>
28 #include <linux/scatterlist.h>
29 #include <linux/version.h>
30 #include <linux/err.h>
31 #include <linux/aer.h>
32 #include <linux/wait.h>
33 #include <linux/uio.h>
34 #include <linux/stringify.h>
35 #include <scsi/scsi.h>
36 #include <scsi/sg.h>
37 #include <linux/io.h>
38 #include <linux/uaccess.h>
39 #include <asm/unaligned.h>
40
41 #include "skd_s1120.h"
42
43 static int skd_dbg_level;
44 static int skd_isr_comp_limit = 4;
45
46 enum {
47         STEC_LINK_2_5GTS = 0,
48         STEC_LINK_5GTS = 1,
49         STEC_LINK_8GTS = 2,
50         STEC_LINK_UNKNOWN = 0xFF
51 };
52
53 enum {
54         SKD_FLUSH_INITIALIZER,
55         SKD_FLUSH_ZERO_SIZE_FIRST,
56         SKD_FLUSH_DATA_SECOND,
57 };
58
59 #define SKD_ASSERT(expr) \
60         do { \
61                 if (unlikely(!(expr))) { \
62                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
63                                # expr, __FILE__, __func__, __LINE__); \
64                 } \
65         } while (0)
66
67 #define DRV_NAME "skd"
68 #define DRV_VERSION "2.2.1"
69 #define DRV_BUILD_ID "0260"
70 #define PFX DRV_NAME ": "
71 #define DRV_BIN_VERSION 0x100
72 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
73
74 MODULE_LICENSE("GPL");
75
76 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
77 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
78
79 #define PCI_VENDOR_ID_STEC      0x1B39
80 #define PCI_DEVICE_ID_S1120     0x0001
81
82 #define SKD_FUA_NV              (1 << 1)
83 #define SKD_MINORS_PER_DEVICE   16
84
85 #define SKD_MAX_QUEUE_DEPTH     200u
86
87 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
88
89 #define SKD_N_FITMSG_BYTES      (512u)
90 #define SKD_MAX_REQ_PER_MSG     14
91
92 #define SKD_N_SPECIAL_CONTEXT   32u
93 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
94
95 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
96  * 128KB limit.  That allows 4096*4K = 16M xfer size
97  */
98 #define SKD_N_SG_PER_REQ_DEFAULT 256u
99 #define SKD_N_SG_PER_SPECIAL    256u
100
101 #define SKD_N_COMPLETION_ENTRY  256u
102 #define SKD_N_READ_CAP_BYTES    (8u)
103
104 #define SKD_N_INTERNAL_BYTES    (512u)
105
106 #define SKD_SKCOMP_SIZE                                                 \
107         ((sizeof(struct fit_completion_entry_v1) +                      \
108           sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
109
110 /* 5 bits of uniqifier, 0xF800 */
111 #define SKD_ID_INCR             (0x400)
112 #define SKD_ID_TABLE_MASK       (3u << 8u)
113 #define  SKD_ID_RW_REQUEST      (0u << 8u)
114 #define  SKD_ID_INTERNAL        (1u << 8u)
115 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
116 #define  SKD_ID_FIT_MSG         (3u << 8u)
117 #define SKD_ID_SLOT_MASK        0x00FFu
118 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
119
120 #define SKD_N_TIMEOUT_SLOT      4u
121 #define SKD_TIMEOUT_SLOT_MASK   3u
122
123 #define SKD_N_MAX_SECTORS 2048u
124
125 #define SKD_MAX_RETRIES 2u
126
127 #define SKD_TIMER_SECONDS(seconds) (seconds)
128 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
129
130 #define INQ_STD_NBYTES 36
131
132 enum skd_drvr_state {
133         SKD_DRVR_STATE_LOAD,
134         SKD_DRVR_STATE_IDLE,
135         SKD_DRVR_STATE_BUSY,
136         SKD_DRVR_STATE_STARTING,
137         SKD_DRVR_STATE_ONLINE,
138         SKD_DRVR_STATE_PAUSING,
139         SKD_DRVR_STATE_PAUSED,
140         SKD_DRVR_STATE_DRAINING_TIMEOUT,
141         SKD_DRVR_STATE_RESTARTING,
142         SKD_DRVR_STATE_RESUMING,
143         SKD_DRVR_STATE_STOPPING,
144         SKD_DRVR_STATE_FAULT,
145         SKD_DRVR_STATE_DISAPPEARED,
146         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
147         SKD_DRVR_STATE_BUSY_ERASE,
148         SKD_DRVR_STATE_BUSY_SANITIZE,
149         SKD_DRVR_STATE_BUSY_IMMINENT,
150         SKD_DRVR_STATE_WAIT_BOOT,
151         SKD_DRVR_STATE_SYNCING,
152 };
153
154 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
155 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
156 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
157 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
158 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
159 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
160 #define SKD_START_WAIT_SECONDS  90u
161
162 enum skd_req_state {
163         SKD_REQ_STATE_IDLE,
164         SKD_REQ_STATE_SETUP,
165         SKD_REQ_STATE_BUSY,
166         SKD_REQ_STATE_COMPLETED,
167         SKD_REQ_STATE_TIMEOUT,
168         SKD_REQ_STATE_ABORTED,
169 };
170
171 enum skd_fit_msg_state {
172         SKD_MSG_STATE_IDLE,
173         SKD_MSG_STATE_BUSY,
174 };
175
176 enum skd_check_status_action {
177         SKD_CHECK_STATUS_REPORT_GOOD,
178         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
179         SKD_CHECK_STATUS_REQUEUE_REQUEST,
180         SKD_CHECK_STATUS_REPORT_ERROR,
181         SKD_CHECK_STATUS_BUSY_IMMINENT,
182 };
183
184 struct skd_fitmsg_context {
185         enum skd_fit_msg_state state;
186
187         struct skd_fitmsg_context *next;
188
189         u32 id;
190         u16 outstanding;
191
192         u32 length;
193         u32 offset;
194
195         u8 *msg_buf;
196         dma_addr_t mb_dma_address;
197 };
198
199 struct skd_request_context {
200         enum skd_req_state state;
201
202         struct skd_request_context *next;
203
204         u16 id;
205         u32 fitmsg_id;
206
207         struct request *req;
208         u8 flush_cmd;
209
210         u32 timeout_stamp;
211         u8 sg_data_dir;
212         struct scatterlist *sg;
213         u32 n_sg;
214         u32 sg_byte_count;
215
216         struct fit_sg_descriptor *sksg_list;
217         dma_addr_t sksg_dma_address;
218
219         struct fit_completion_entry_v1 completion;
220
221         struct fit_comp_error_info err_info;
222
223 };
224 #define SKD_DATA_DIR_HOST_TO_CARD       1
225 #define SKD_DATA_DIR_CARD_TO_HOST       2
226
227 struct skd_special_context {
228         struct skd_request_context req;
229
230         u8 orphaned;
231
232         void *data_buf;
233         dma_addr_t db_dma_address;
234
235         u8 *msg_buf;
236         dma_addr_t mb_dma_address;
237 };
238
239 struct skd_sg_io {
240         fmode_t mode;
241         void __user *argp;
242
243         struct sg_io_hdr sg;
244
245         u8 cdb[16];
246
247         u32 dxfer_len;
248         u32 iovcnt;
249         struct sg_iovec *iov;
250         struct sg_iovec no_iov_iov;
251
252         struct skd_special_context *skspcl;
253 };
254
255 typedef enum skd_irq_type {
256         SKD_IRQ_LEGACY,
257         SKD_IRQ_MSI,
258         SKD_IRQ_MSIX
259 } skd_irq_type_t;
260
261 #define SKD_MAX_BARS                    2
262
263 struct skd_device {
264         volatile void __iomem *mem_map[SKD_MAX_BARS];
265         resource_size_t mem_phys[SKD_MAX_BARS];
266         u32 mem_size[SKD_MAX_BARS];
267
268         struct skd_msix_entry *msix_entries;
269
270         struct pci_dev *pdev;
271         int pcie_error_reporting_is_enabled;
272
273         spinlock_t lock;
274         struct gendisk *disk;
275         struct request_queue *queue;
276         struct device *class_dev;
277         int gendisk_on;
278         int sync_done;
279
280         u32 devno;
281         u32 major;
282         char isr_name[30];
283
284         enum skd_drvr_state state;
285         u32 drive_state;
286
287         u32 in_flight;
288         u32 cur_max_queue_depth;
289         u32 queue_low_water_mark;
290         u32 dev_max_queue_depth;
291
292         u32 num_fitmsg_context;
293         u32 num_req_context;
294
295         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
296         u32 timeout_stamp;
297         struct skd_fitmsg_context *skmsg_free_list;
298         struct skd_fitmsg_context *skmsg_table;
299
300         struct skd_request_context *skreq_free_list;
301         struct skd_request_context *skreq_table;
302
303         struct skd_special_context *skspcl_free_list;
304         struct skd_special_context *skspcl_table;
305
306         struct skd_special_context internal_skspcl;
307         u32 read_cap_blocksize;
308         u32 read_cap_last_lba;
309         int read_cap_is_valid;
310         int inquiry_is_valid;
311         u8 inq_serial_num[13];  /*12 chars plus null term */
312
313         u8 skcomp_cycle;
314         u32 skcomp_ix;
315         struct fit_completion_entry_v1 *skcomp_table;
316         struct fit_comp_error_info *skerr_table;
317         dma_addr_t cq_dma_address;
318
319         wait_queue_head_t waitq;
320
321         struct timer_list timer;
322         u32 timer_countdown;
323         u32 timer_substate;
324
325         int n_special;
326         int sgs_per_request;
327         u32 last_mtd;
328
329         u32 proto_ver;
330
331         int dbg_level;
332         u32 connect_time_stamp;
333         int connect_retries;
334 #define SKD_MAX_CONNECT_RETRIES 16
335         u32 drive_jiffies;
336
337         u32 timo_slot;
338
339         struct work_struct completion_worker;
340 };
341
342 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
343 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
344 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
345
346 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
347 {
348         u32 val = readl(skdev->mem_map[1] + offset);
349
350         if (unlikely(skdev->dbg_level >= 2))
351                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
352         return val;
353 }
354
355 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
356                                    u32 offset)
357 {
358         writel(val, skdev->mem_map[1] + offset);
359         if (unlikely(skdev->dbg_level >= 2))
360                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
361 }
362
363 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
364                                    u32 offset)
365 {
366         writeq(val, skdev->mem_map[1] + offset);
367         if (unlikely(skdev->dbg_level >= 2))
368                 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
369                         val);
370 }
371
372
373 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
374 static int skd_isr_type = SKD_IRQ_DEFAULT;
375
376 module_param(skd_isr_type, int, 0444);
377 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
378                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
379
380 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
381 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
382
383 module_param(skd_max_req_per_msg, int, 0444);
384 MODULE_PARM_DESC(skd_max_req_per_msg,
385                  "Maximum SCSI requests packed in a single message."
386                  " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
387
388 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
389 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
390 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
391
392 module_param(skd_max_queue_depth, int, 0444);
393 MODULE_PARM_DESC(skd_max_queue_depth,
394                  "Maximum SCSI requests issued to s1120."
395                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
396
397 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
398 module_param(skd_sgs_per_request, int, 0444);
399 MODULE_PARM_DESC(skd_sgs_per_request,
400                  "Maximum SG elements per block request."
401                  " (1-4096, default==256)");
402
403 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
404 module_param(skd_max_pass_thru, int, 0444);
405 MODULE_PARM_DESC(skd_max_pass_thru,
406                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
407
408 module_param(skd_dbg_level, int, 0444);
409 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
410
411 module_param(skd_isr_comp_limit, int, 0444);
412 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
413
414 /* Major device number dynamically assigned. */
415 static u32 skd_major;
416
417 static void skd_destruct(struct skd_device *skdev);
418 static const struct block_device_operations skd_blockdev_ops;
419 static void skd_send_fitmsg(struct skd_device *skdev,
420                             struct skd_fitmsg_context *skmsg);
421 static void skd_send_special_fitmsg(struct skd_device *skdev,
422                                     struct skd_special_context *skspcl);
423 static void skd_request_fn(struct request_queue *rq);
424 static void skd_end_request(struct skd_device *skdev,
425                 struct skd_request_context *skreq, blk_status_t status);
426 static bool skd_preop_sg_list(struct skd_device *skdev,
427                              struct skd_request_context *skreq);
428 static void skd_postop_sg_list(struct skd_device *skdev,
429                                struct skd_request_context *skreq);
430
431 static void skd_restart_device(struct skd_device *skdev);
432 static int skd_quiesce_dev(struct skd_device *skdev);
433 static int skd_unquiesce_dev(struct skd_device *skdev);
434 static void skd_release_special(struct skd_device *skdev,
435                                 struct skd_special_context *skspcl);
436 static void skd_disable_interrupts(struct skd_device *skdev);
437 static void skd_isr_fwstate(struct skd_device *skdev);
438 static void skd_recover_requests(struct skd_device *skdev, int requeue);
439 static void skd_soft_reset(struct skd_device *skdev);
440
441 const char *skd_drive_state_to_str(int state);
442 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
443 static void skd_log_skdev(struct skd_device *skdev, const char *event);
444 static void skd_log_skmsg(struct skd_device *skdev,
445                           struct skd_fitmsg_context *skmsg, const char *event);
446 static void skd_log_skreq(struct skd_device *skdev,
447                           struct skd_request_context *skreq, const char *event);
448
449 /*
450  *****************************************************************************
451  * READ/WRITE REQUESTS
452  *****************************************************************************
453  */
454 static void skd_fail_all_pending(struct skd_device *skdev)
455 {
456         struct request_queue *q = skdev->queue;
457         struct request *req;
458
459         for (;; ) {
460                 req = blk_peek_request(q);
461                 if (req == NULL)
462                         break;
463                 blk_start_request(req);
464                 __blk_end_request_all(req, BLK_STS_IOERR);
465         }
466 }
467
468 static void
469 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
470                 int data_dir, unsigned lba,
471                 unsigned count)
472 {
473         if (data_dir == READ)
474                 scsi_req->cdb[0] = 0x28;
475         else
476                 scsi_req->cdb[0] = 0x2a;
477
478         scsi_req->cdb[1] = 0;
479         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
480         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
481         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
482         scsi_req->cdb[5] = (lba & 0xff);
483         scsi_req->cdb[6] = 0;
484         scsi_req->cdb[7] = (count & 0xff00) >> 8;
485         scsi_req->cdb[8] = count & 0xff;
486         scsi_req->cdb[9] = 0;
487 }
488
489 static void
490 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
491                             struct skd_request_context *skreq)
492 {
493         skreq->flush_cmd = 1;
494
495         scsi_req->cdb[0] = 0x35;
496         scsi_req->cdb[1] = 0;
497         scsi_req->cdb[2] = 0;
498         scsi_req->cdb[3] = 0;
499         scsi_req->cdb[4] = 0;
500         scsi_req->cdb[5] = 0;
501         scsi_req->cdb[6] = 0;
502         scsi_req->cdb[7] = 0;
503         scsi_req->cdb[8] = 0;
504         scsi_req->cdb[9] = 0;
505 }
506
507 static void skd_request_fn_not_online(struct request_queue *q);
508
509 static void skd_request_fn(struct request_queue *q)
510 {
511         struct skd_device *skdev = q->queuedata;
512         struct skd_fitmsg_context *skmsg = NULL;
513         struct fit_msg_hdr *fmh = NULL;
514         struct skd_request_context *skreq;
515         struct request *req = NULL;
516         struct skd_scsi_request *scsi_req;
517         unsigned long io_flags;
518         u32 lba;
519         u32 count;
520         int data_dir;
521         __be64 be_dmaa;
522         u64 cmdctxt;
523         u32 timo_slot;
524         void *cmd_ptr;
525         int flush, fua;
526
527         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
528                 skd_request_fn_not_online(q);
529                 return;
530         }
531
532         if (blk_queue_stopped(skdev->queue)) {
533                 if (skdev->skmsg_free_list == NULL ||
534                     skdev->skreq_free_list == NULL ||
535                     skdev->in_flight >= skdev->queue_low_water_mark)
536                         /* There is still some kind of shortage */
537                         return;
538
539                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
540         }
541
542         /*
543          * Stop conditions:
544          *  - There are no more native requests
545          *  - There are already the maximum number of requests in progress
546          *  - There are no more skd_request_context entries
547          *  - There are no more FIT msg buffers
548          */
549         for (;; ) {
550
551                 flush = fua = 0;
552
553                 req = blk_peek_request(q);
554
555                 /* Are there any native requests to start? */
556                 if (req == NULL)
557                         break;
558
559                 lba = (u32)blk_rq_pos(req);
560                 count = blk_rq_sectors(req);
561                 data_dir = rq_data_dir(req);
562                 io_flags = req->cmd_flags;
563
564                 if (req_op(req) == REQ_OP_FLUSH)
565                         flush++;
566
567                 if (io_flags & REQ_FUA)
568                         fua++;
569
570                 dev_dbg(&skdev->pdev->dev,
571                         "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
572                         req, lba, lba, count, count, data_dir);
573
574                 /* At this point we know there is a request */
575
576                 /* Are too many requets already in progress? */
577                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
578                         dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
579                                 skdev->in_flight, skdev->cur_max_queue_depth);
580                         break;
581                 }
582
583                 /* Is a skd_request_context available? */
584                 skreq = skdev->skreq_free_list;
585                 if (skreq == NULL) {
586                         dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
587                         break;
588                 }
589                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
590                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
591
592                 /* Now we check to see if we can get a fit msg */
593                 if (skmsg == NULL) {
594                         if (skdev->skmsg_free_list == NULL) {
595                                 dev_dbg(&skdev->pdev->dev, "Out of msg\n");
596                                 break;
597                         }
598                 }
599
600                 skreq->flush_cmd = 0;
601                 skreq->n_sg = 0;
602                 skreq->sg_byte_count = 0;
603
604                 /*
605                  * OK to now dequeue request from q.
606                  *
607                  * At this point we are comitted to either start or reject
608                  * the native request. Note that skd_request_context is
609                  * available but is still at the head of the free list.
610                  */
611                 blk_start_request(req);
612                 skreq->req = req;
613                 skreq->fitmsg_id = 0;
614
615                 /* Either a FIT msg is in progress or we have to start one. */
616                 if (skmsg == NULL) {
617                         /* Are there any FIT msg buffers available? */
618                         skmsg = skdev->skmsg_free_list;
619                         if (skmsg == NULL) {
620                                 dev_dbg(&skdev->pdev->dev,
621                                         "Out of msg skdev=%p\n",
622                                         skdev);
623                                 break;
624                         }
625                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
626                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
627
628                         skdev->skmsg_free_list = skmsg->next;
629
630                         skmsg->state = SKD_MSG_STATE_BUSY;
631                         skmsg->id += SKD_ID_INCR;
632
633                         /* Initialize the FIT msg header */
634                         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
635                         memset(fmh, 0, sizeof(*fmh));
636                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
637                         skmsg->length = sizeof(*fmh);
638                 }
639
640                 skreq->fitmsg_id = skmsg->id;
641
642                 /*
643                  * Note that a FIT msg may have just been started
644                  * but contains no SoFIT requests yet.
645                  */
646
647                 /*
648                  * Transcode the request, checking as we go. The outcome of
649                  * the transcoding is represented by the error variable.
650                  */
651                 cmd_ptr = &skmsg->msg_buf[skmsg->length];
652                 memset(cmd_ptr, 0, 32);
653
654                 be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
655                 cmdctxt = skreq->id + SKD_ID_INCR;
656
657                 scsi_req = cmd_ptr;
658                 scsi_req->hdr.tag = cmdctxt;
659                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
660
661                 if (data_dir == READ)
662                         skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
663                 else
664                         skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
665
666                 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
667                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
668                         SKD_ASSERT(skreq->flush_cmd == 1);
669                 } else {
670                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
671                 }
672
673                 if (fua)
674                         scsi_req->cdb[1] |= SKD_FUA_NV;
675
676                 if (!req->bio)
677                         goto skip_sg;
678
679                 if (!skd_preop_sg_list(skdev, skreq)) {
680                         /*
681                          * Complete the native request with error.
682                          * Note that the request context is still at the
683                          * head of the free list, and that the SoFIT request
684                          * was encoded into the FIT msg buffer but the FIT
685                          * msg length has not been updated. In short, the
686                          * only resource that has been allocated but might
687                          * not be used is that the FIT msg could be empty.
688                          */
689                         dev_dbg(&skdev->pdev->dev, "error Out\n");
690                         skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
691                         continue;
692                 }
693
694 skip_sg:
695                 scsi_req->hdr.sg_list_len_bytes =
696                         cpu_to_be32(skreq->sg_byte_count);
697
698                 /* Complete resource allocations. */
699                 skdev->skreq_free_list = skreq->next;
700                 skreq->state = SKD_REQ_STATE_BUSY;
701                 skreq->id += SKD_ID_INCR;
702
703                 skmsg->length += sizeof(struct skd_scsi_request);
704                 fmh->num_protocol_cmds_coalesced++;
705
706                 /*
707                  * Update the active request counts.
708                  * Capture the timeout timestamp.
709                  */
710                 skreq->timeout_stamp = skdev->timeout_stamp;
711                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
712                 skdev->timeout_slot[timo_slot]++;
713                 skdev->in_flight++;
714                 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
715                         skdev->in_flight);
716
717                 /*
718                  * If the FIT msg buffer is full send it.
719                  */
720                 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
721                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
722                         skd_send_fitmsg(skdev, skmsg);
723                         skmsg = NULL;
724                         fmh = NULL;
725                 }
726         }
727
728         /*
729          * Is a FIT msg in progress? If it is empty put the buffer back
730          * on the free list. If it is non-empty send what we got.
731          * This minimizes latency when there are fewer requests than
732          * what fits in a FIT msg.
733          */
734         if (skmsg != NULL) {
735                 /* Bigger than just a FIT msg header? */
736                 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
737                         dev_dbg(&skdev->pdev->dev, "sending msg=%p, len %d\n",
738                                 skmsg, skmsg->length);
739                         skd_send_fitmsg(skdev, skmsg);
740                 } else {
741                         /*
742                          * The FIT msg is empty. It means we got started
743                          * on the msg, but the requests were rejected.
744                          */
745                         skmsg->state = SKD_MSG_STATE_IDLE;
746                         skmsg->id += SKD_ID_INCR;
747                         skmsg->next = skdev->skmsg_free_list;
748                         skdev->skmsg_free_list = skmsg;
749                 }
750                 skmsg = NULL;
751                 fmh = NULL;
752         }
753
754         /*
755          * If req is non-NULL it means there is something to do but
756          * we are out of a resource.
757          */
758         if (req)
759                 blk_stop_queue(skdev->queue);
760 }
761
762 static void skd_end_request(struct skd_device *skdev,
763                 struct skd_request_context *skreq, blk_status_t error)
764 {
765         if (unlikely(error)) {
766                 struct request *req = skreq->req;
767                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
768                 u32 lba = (u32)blk_rq_pos(req);
769                 u32 count = blk_rq_sectors(req);
770
771                 dev_err(&skdev->pdev->dev,
772                         "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
773                         count, skreq->id);
774         } else
775                 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
776                         error);
777
778         __blk_end_request_all(skreq->req, error);
779 }
780
781 static bool skd_preop_sg_list(struct skd_device *skdev,
782                              struct skd_request_context *skreq)
783 {
784         struct request *req = skreq->req;
785         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
786         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
787         struct scatterlist *sg = &skreq->sg[0];
788         int n_sg;
789         int i;
790
791         skreq->sg_byte_count = 0;
792
793         /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
794                    skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
795
796         n_sg = blk_rq_map_sg(skdev->queue, req, sg);
797         if (n_sg <= 0)
798                 return false;
799
800         /*
801          * Map scatterlist to PCI bus addresses.
802          * Note PCI might change the number of entries.
803          */
804         n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
805         if (n_sg <= 0)
806                 return false;
807
808         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
809
810         skreq->n_sg = n_sg;
811
812         for (i = 0; i < n_sg; i++) {
813                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
814                 u32 cnt = sg_dma_len(&sg[i]);
815                 uint64_t dma_addr = sg_dma_address(&sg[i]);
816
817                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
818                 sgd->byte_count = cnt;
819                 skreq->sg_byte_count += cnt;
820                 sgd->host_side_addr = dma_addr;
821                 sgd->dev_side_addr = 0;
822         }
823
824         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
825         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
826
827         if (unlikely(skdev->dbg_level > 1)) {
828                 dev_dbg(&skdev->pdev->dev,
829                         "skreq=%x sksg_list=%p sksg_dma=%llx\n",
830                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
831                 for (i = 0; i < n_sg; i++) {
832                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
833
834                         dev_dbg(&skdev->pdev->dev,
835                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
836                                 i, sgd->byte_count, sgd->control,
837                                 sgd->host_side_addr, sgd->next_desc_ptr);
838                 }
839         }
840
841         return true;
842 }
843
844 static void skd_postop_sg_list(struct skd_device *skdev,
845                                struct skd_request_context *skreq)
846 {
847         int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
848         int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
849
850         /*
851          * restore the next ptr for next IO request so we
852          * don't have to set it every time.
853          */
854         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
855                 skreq->sksg_dma_address +
856                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
857         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
858 }
859
860 static void skd_request_fn_not_online(struct request_queue *q)
861 {
862         struct skd_device *skdev = q->queuedata;
863
864         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
865
866         skd_log_skdev(skdev, "req_not_online");
867         switch (skdev->state) {
868         case SKD_DRVR_STATE_PAUSING:
869         case SKD_DRVR_STATE_PAUSED:
870         case SKD_DRVR_STATE_STARTING:
871         case SKD_DRVR_STATE_RESTARTING:
872         case SKD_DRVR_STATE_WAIT_BOOT:
873         /* In case of starting, we haven't started the queue,
874          * so we can't get here... but requests are
875          * possibly hanging out waiting for us because we
876          * reported the dev/skd0 already.  They'll wait
877          * forever if connect doesn't complete.
878          * What to do??? delay dev/skd0 ??
879          */
880         case SKD_DRVR_STATE_BUSY:
881         case SKD_DRVR_STATE_BUSY_IMMINENT:
882         case SKD_DRVR_STATE_BUSY_ERASE:
883         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
884                 return;
885
886         case SKD_DRVR_STATE_BUSY_SANITIZE:
887         case SKD_DRVR_STATE_STOPPING:
888         case SKD_DRVR_STATE_SYNCING:
889         case SKD_DRVR_STATE_FAULT:
890         case SKD_DRVR_STATE_DISAPPEARED:
891         default:
892                 break;
893         }
894
895         /* If we get here, terminate all pending block requeusts
896          * with EIO and any scsi pass thru with appropriate sense
897          */
898
899         skd_fail_all_pending(skdev);
900 }
901
902 /*
903  *****************************************************************************
904  * TIMER
905  *****************************************************************************
906  */
907
908 static void skd_timer_tick_not_online(struct skd_device *skdev);
909
910 static void skd_timer_tick(ulong arg)
911 {
912         struct skd_device *skdev = (struct skd_device *)arg;
913
914         u32 timo_slot;
915         unsigned long reqflags;
916         u32 state;
917
918         if (skdev->state == SKD_DRVR_STATE_FAULT)
919                 /* The driver has declared fault, and we want it to
920                  * stay that way until driver is reloaded.
921                  */
922                 return;
923
924         spin_lock_irqsave(&skdev->lock, reqflags);
925
926         state = SKD_READL(skdev, FIT_STATUS);
927         state &= FIT_SR_DRIVE_STATE_MASK;
928         if (state != skdev->drive_state)
929                 skd_isr_fwstate(skdev);
930
931         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
932                 skd_timer_tick_not_online(skdev);
933                 goto timer_func_out;
934         }
935         skdev->timeout_stamp++;
936         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
937
938         /*
939          * All requests that happened during the previous use of
940          * this slot should be done by now. The previous use was
941          * over 7 seconds ago.
942          */
943         if (skdev->timeout_slot[timo_slot] == 0)
944                 goto timer_func_out;
945
946         /* Something is overdue */
947         dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
948                 skdev->timeout_slot[timo_slot], skdev->in_flight);
949         dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
950                 skdev->timeout_slot[timo_slot], skdev->in_flight);
951
952         skdev->timer_countdown = SKD_DRAINING_TIMO;
953         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
954         skdev->timo_slot = timo_slot;
955         blk_stop_queue(skdev->queue);
956
957 timer_func_out:
958         mod_timer(&skdev->timer, (jiffies + HZ));
959
960         spin_unlock_irqrestore(&skdev->lock, reqflags);
961 }
962
963 static void skd_timer_tick_not_online(struct skd_device *skdev)
964 {
965         switch (skdev->state) {
966         case SKD_DRVR_STATE_IDLE:
967         case SKD_DRVR_STATE_LOAD:
968                 break;
969         case SKD_DRVR_STATE_BUSY_SANITIZE:
970                 dev_dbg(&skdev->pdev->dev,
971                         "drive busy sanitize[%x], driver[%x]\n",
972                         skdev->drive_state, skdev->state);
973                 /* If we've been in sanitize for 3 seconds, we figure we're not
974                  * going to get anymore completions, so recover requests now
975                  */
976                 if (skdev->timer_countdown > 0) {
977                         skdev->timer_countdown--;
978                         return;
979                 }
980                 skd_recover_requests(skdev, 0);
981                 break;
982
983         case SKD_DRVR_STATE_BUSY:
984         case SKD_DRVR_STATE_BUSY_IMMINENT:
985         case SKD_DRVR_STATE_BUSY_ERASE:
986                 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
987                         skdev->state, skdev->timer_countdown);
988                 if (skdev->timer_countdown > 0) {
989                         skdev->timer_countdown--;
990                         return;
991                 }
992                 dev_dbg(&skdev->pdev->dev,
993                         "busy[%x], timedout=%d, restarting device.",
994                         skdev->state, skdev->timer_countdown);
995                 skd_restart_device(skdev);
996                 break;
997
998         case SKD_DRVR_STATE_WAIT_BOOT:
999         case SKD_DRVR_STATE_STARTING:
1000                 if (skdev->timer_countdown > 0) {
1001                         skdev->timer_countdown--;
1002                         return;
1003                 }
1004                 /* For now, we fault the drive.  Could attempt resets to
1005                  * revcover at some point. */
1006                 skdev->state = SKD_DRVR_STATE_FAULT;
1007
1008                 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
1009                         skdev->drive_state);
1010
1011                 /*start the queue so we can respond with error to requests */
1012                 /* wakeup anyone waiting for startup complete */
1013                 blk_start_queue(skdev->queue);
1014                 skdev->gendisk_on = -1;
1015                 wake_up_interruptible(&skdev->waitq);
1016                 break;
1017
1018         case SKD_DRVR_STATE_ONLINE:
1019                 /* shouldn't get here. */
1020                 break;
1021
1022         case SKD_DRVR_STATE_PAUSING:
1023         case SKD_DRVR_STATE_PAUSED:
1024                 break;
1025
1026         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1027                 dev_dbg(&skdev->pdev->dev,
1028                         "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1029                         skdev->timo_slot, skdev->timer_countdown,
1030                         skdev->in_flight,
1031                         skdev->timeout_slot[skdev->timo_slot]);
1032                 /* if the slot has cleared we can let the I/O continue */
1033                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1034                         dev_dbg(&skdev->pdev->dev,
1035                                 "Slot drained, starting queue.\n");
1036                         skdev->state = SKD_DRVR_STATE_ONLINE;
1037                         blk_start_queue(skdev->queue);
1038                         return;
1039                 }
1040                 if (skdev->timer_countdown > 0) {
1041                         skdev->timer_countdown--;
1042                         return;
1043                 }
1044                 skd_restart_device(skdev);
1045                 break;
1046
1047         case SKD_DRVR_STATE_RESTARTING:
1048                 if (skdev->timer_countdown > 0) {
1049                         skdev->timer_countdown--;
1050                         return;
1051                 }
1052                 /* For now, we fault the drive. Could attempt resets to
1053                  * revcover at some point. */
1054                 skdev->state = SKD_DRVR_STATE_FAULT;
1055                 dev_err(&skdev->pdev->dev,
1056                         "DriveFault Reconnect Timeout (%x)\n",
1057                         skdev->drive_state);
1058
1059                 /*
1060                  * Recovering does two things:
1061                  * 1. completes IO with error
1062                  * 2. reclaims dma resources
1063                  * When is it safe to recover requests?
1064                  * - if the drive state is faulted
1065                  * - if the state is still soft reset after out timeout
1066                  * - if the drive registers are dead (state = FF)
1067                  * If it is "unsafe", we still need to recover, so we will
1068                  * disable pci bus mastering and disable our interrupts.
1069                  */
1070
1071                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1072                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1073                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1074                         /* It never came out of soft reset. Try to
1075                          * recover the requests and then let them
1076                          * fail. This is to mitigate hung processes. */
1077                         skd_recover_requests(skdev, 0);
1078                 else {
1079                         dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
1080                                 skdev->drive_state);
1081                         pci_disable_device(skdev->pdev);
1082                         skd_disable_interrupts(skdev);
1083                         skd_recover_requests(skdev, 0);
1084                 }
1085
1086                 /*start the queue so we can respond with error to requests */
1087                 /* wakeup anyone waiting for startup complete */
1088                 blk_start_queue(skdev->queue);
1089                 skdev->gendisk_on = -1;
1090                 wake_up_interruptible(&skdev->waitq);
1091                 break;
1092
1093         case SKD_DRVR_STATE_RESUMING:
1094         case SKD_DRVR_STATE_STOPPING:
1095         case SKD_DRVR_STATE_SYNCING:
1096         case SKD_DRVR_STATE_FAULT:
1097         case SKD_DRVR_STATE_DISAPPEARED:
1098         default:
1099                 break;
1100         }
1101 }
1102
1103 static int skd_start_timer(struct skd_device *skdev)
1104 {
1105         int rc;
1106
1107         init_timer(&skdev->timer);
1108         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1109
1110         rc = mod_timer(&skdev->timer, (jiffies + HZ));
1111         if (rc)
1112                 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
1113         return rc;
1114 }
1115
1116 static void skd_kill_timer(struct skd_device *skdev)
1117 {
1118         del_timer_sync(&skdev->timer);
1119 }
1120
1121 /*
1122  *****************************************************************************
1123  * IOCTL
1124  *****************************************************************************
1125  */
1126 static int skd_ioctl_sg_io(struct skd_device *skdev,
1127                            fmode_t mode, void __user *argp);
1128 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1129                                         struct skd_sg_io *sksgio);
1130 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1131                                    struct skd_sg_io *sksgio);
1132 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1133                                     struct skd_sg_io *sksgio);
1134 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1135                                  struct skd_sg_io *sksgio, int dxfer_dir);
1136 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1137                                  struct skd_sg_io *sksgio);
1138 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1139 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1140                                     struct skd_sg_io *sksgio);
1141 static int skd_sg_io_put_status(struct skd_device *skdev,
1142                                 struct skd_sg_io *sksgio);
1143
1144 static void skd_complete_special(struct skd_device *skdev,
1145                                  volatile struct fit_completion_entry_v1
1146                                  *skcomp,
1147                                  volatile struct fit_comp_error_info *skerr,
1148                                  struct skd_special_context *skspcl);
1149
1150 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1151                           uint cmd_in, ulong arg)
1152 {
1153         static const int sg_version_num = 30527;
1154         int rc = 0, timeout;
1155         struct gendisk *disk = bdev->bd_disk;
1156         struct skd_device *skdev = disk->private_data;
1157         int __user *p = (int __user *)arg;
1158
1159         dev_dbg(&skdev->pdev->dev,
1160                 "%s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1161                 disk->disk_name, current->comm, mode, cmd_in, arg);
1162
1163         if (!capable(CAP_SYS_ADMIN))
1164                 return -EPERM;
1165
1166         switch (cmd_in) {
1167         case SG_SET_TIMEOUT:
1168                 rc = get_user(timeout, p);
1169                 if (!rc)
1170                         disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1171                 break;
1172         case SG_GET_TIMEOUT:
1173                 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1174                 break;
1175         case SG_GET_VERSION_NUM:
1176                 rc = put_user(sg_version_num, p);
1177                 break;
1178         case SG_IO:
1179                 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1180                 break;
1181
1182         default:
1183                 rc = -ENOTTY;
1184                 break;
1185         }
1186
1187         dev_dbg(&skdev->pdev->dev, "%s:  completion rc %d\n", disk->disk_name,
1188                 rc);
1189         return rc;
1190 }
1191
1192 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1193                            void __user *argp)
1194 {
1195         int rc;
1196         struct skd_sg_io sksgio;
1197
1198         memset(&sksgio, 0, sizeof(sksgio));
1199         sksgio.mode = mode;
1200         sksgio.argp = argp;
1201         sksgio.iov = &sksgio.no_iov_iov;
1202
1203         switch (skdev->state) {
1204         case SKD_DRVR_STATE_ONLINE:
1205         case SKD_DRVR_STATE_BUSY_IMMINENT:
1206                 break;
1207
1208         default:
1209                 dev_dbg(&skdev->pdev->dev, "drive not online\n");
1210                 rc = -ENXIO;
1211                 goto out;
1212         }
1213
1214         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1215         if (rc)
1216                 goto out;
1217
1218         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1219         if (rc)
1220                 goto out;
1221
1222         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1223         if (rc)
1224                 goto out;
1225
1226         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1227         if (rc)
1228                 goto out;
1229
1230         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1231         if (rc)
1232                 goto out;
1233
1234         rc = skd_sg_io_await(skdev, &sksgio);
1235         if (rc)
1236                 goto out;
1237
1238         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1239         if (rc)
1240                 goto out;
1241
1242         rc = skd_sg_io_put_status(skdev, &sksgio);
1243         if (rc)
1244                 goto out;
1245
1246         rc = 0;
1247
1248 out:
1249         skd_sg_io_release_skspcl(skdev, &sksgio);
1250
1251         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1252                 kfree(sksgio.iov);
1253         return rc;
1254 }
1255
1256 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1257                                         struct skd_sg_io *sksgio)
1258 {
1259         struct sg_io_hdr *sgp = &sksgio->sg;
1260         int i, __maybe_unused acc;
1261
1262         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1263                 dev_dbg(&skdev->pdev->dev, "access sg failed %p\n",
1264                         sksgio->argp);
1265                 return -EFAULT;
1266         }
1267
1268         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1269                 dev_dbg(&skdev->pdev->dev, "copy_from_user sg failed %p\n",
1270                         sksgio->argp);
1271                 return -EFAULT;
1272         }
1273
1274         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1275                 dev_dbg(&skdev->pdev->dev, "interface_id invalid 0x%x\n",
1276                         sgp->interface_id);
1277                 return -EINVAL;
1278         }
1279
1280         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1281                 dev_dbg(&skdev->pdev->dev, "cmd_len invalid %d\n",
1282                         sgp->cmd_len);
1283                 return -EINVAL;
1284         }
1285
1286         if (sgp->iovec_count > 256) {
1287                 dev_dbg(&skdev->pdev->dev, "iovec_count invalid %d\n",
1288                         sgp->iovec_count);
1289                 return -EINVAL;
1290         }
1291
1292         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1293                 dev_dbg(&skdev->pdev->dev, "dxfer_len invalid %d\n",
1294                         sgp->dxfer_len);
1295                 return -EINVAL;
1296         }
1297
1298         switch (sgp->dxfer_direction) {
1299         case SG_DXFER_NONE:
1300                 acc = -1;
1301                 break;
1302
1303         case SG_DXFER_TO_DEV:
1304                 acc = VERIFY_READ;
1305                 break;
1306
1307         case SG_DXFER_FROM_DEV:
1308         case SG_DXFER_TO_FROM_DEV:
1309                 acc = VERIFY_WRITE;
1310                 break;
1311
1312         default:
1313                 dev_dbg(&skdev->pdev->dev, "dxfer_dir invalid %d\n",
1314                         sgp->dxfer_direction);
1315                 return -EINVAL;
1316         }
1317
1318         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1319                 dev_dbg(&skdev->pdev->dev, "copy_from_user cmdp failed %p\n",
1320                         sgp->cmdp);
1321                 return -EFAULT;
1322         }
1323
1324         if (sgp->mx_sb_len != 0) {
1325                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1326                         dev_dbg(&skdev->pdev->dev, "access sbp failed %p\n",
1327                                 sgp->sbp);
1328                         return -EFAULT;
1329                 }
1330         }
1331
1332         if (sgp->iovec_count == 0) {
1333                 sksgio->iov[0].iov_base = sgp->dxferp;
1334                 sksgio->iov[0].iov_len = sgp->dxfer_len;
1335                 sksgio->iovcnt = 1;
1336                 sksgio->dxfer_len = sgp->dxfer_len;
1337         } else {
1338                 struct sg_iovec *iov;
1339                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1340                 size_t iov_data_len;
1341
1342                 iov = kmalloc(nbytes, GFP_KERNEL);
1343                 if (iov == NULL) {
1344                         dev_dbg(&skdev->pdev->dev, "alloc iovec failed %d\n",
1345                                 sgp->iovec_count);
1346                         return -ENOMEM;
1347                 }
1348                 sksgio->iov = iov;
1349                 sksgio->iovcnt = sgp->iovec_count;
1350
1351                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1352                         dev_dbg(&skdev->pdev->dev,
1353                                 "copy_from_user iovec failed %p\n",
1354                                 sgp->dxferp);
1355                         return -EFAULT;
1356                 }
1357
1358                 /*
1359                  * Sum up the vecs, making sure they don't overflow
1360                  */
1361                 iov_data_len = 0;
1362                 for (i = 0; i < sgp->iovec_count; i++) {
1363                         if (iov_data_len + iov[i].iov_len < iov_data_len)
1364                                 return -EINVAL;
1365                         iov_data_len += iov[i].iov_len;
1366                 }
1367
1368                 /* SG_IO howto says that the shorter of the two wins */
1369                 if (sgp->dxfer_len < iov_data_len) {
1370                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1371                                                      sgp->iovec_count,
1372                                                      sgp->dxfer_len);
1373                         sksgio->dxfer_len = sgp->dxfer_len;
1374                 } else
1375                         sksgio->dxfer_len = iov_data_len;
1376         }
1377
1378         if (sgp->dxfer_direction != SG_DXFER_NONE) {
1379                 struct sg_iovec *iov = sksgio->iov;
1380                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1381                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1382                                 dev_dbg(&skdev->pdev->dev,
1383                                         "access data failed %p/%zd\n",
1384                                         iov->iov_base, iov->iov_len);
1385                                 return -EFAULT;
1386                         }
1387                 }
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1394                                    struct skd_sg_io *sksgio)
1395 {
1396         struct skd_special_context *skspcl = NULL;
1397         int rc;
1398
1399         for (;;) {
1400                 ulong flags;
1401
1402                 spin_lock_irqsave(&skdev->lock, flags);
1403                 skspcl = skdev->skspcl_free_list;
1404                 if (skspcl != NULL) {
1405                         skdev->skspcl_free_list =
1406                                 (struct skd_special_context *)skspcl->req.next;
1407                         skspcl->req.id += SKD_ID_INCR;
1408                         skspcl->req.state = SKD_REQ_STATE_SETUP;
1409                         skspcl->orphaned = 0;
1410                         skspcl->req.n_sg = 0;
1411                 }
1412                 spin_unlock_irqrestore(&skdev->lock, flags);
1413
1414                 if (skspcl != NULL) {
1415                         rc = 0;
1416                         break;
1417                 }
1418
1419                 dev_dbg(&skdev->pdev->dev, "blocking\n");
1420
1421                 rc = wait_event_interruptible_timeout(
1422                                 skdev->waitq,
1423                                 (skdev->skspcl_free_list != NULL),
1424                                 msecs_to_jiffies(sksgio->sg.timeout));
1425
1426                 dev_dbg(&skdev->pdev->dev, "unblocking, rc=%d\n", rc);
1427
1428                 if (rc <= 0) {
1429                         if (rc == 0)
1430                                 rc = -ETIMEDOUT;
1431                         else
1432                                 rc = -EINTR;
1433                         break;
1434                 }
1435                 /*
1436                  * If we get here rc > 0 meaning the timeout to
1437                  * wait_event_interruptible_timeout() had time left, hence the
1438                  * sought event -- non-empty free list -- happened.
1439                  * Retry the allocation.
1440                  */
1441         }
1442         sksgio->skspcl = skspcl;
1443
1444         return rc;
1445 }
1446
1447 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1448                                     struct skd_request_context *skreq,
1449                                     u32 dxfer_len)
1450 {
1451         u32 resid = dxfer_len;
1452
1453         /*
1454          * The DMA engine must have aligned addresses and byte counts.
1455          */
1456         resid += (-resid) & 3;
1457         skreq->sg_byte_count = resid;
1458
1459         skreq->n_sg = 0;
1460
1461         while (resid > 0) {
1462                 u32 nbytes = PAGE_SIZE;
1463                 u32 ix = skreq->n_sg;
1464                 struct scatterlist *sg = &skreq->sg[ix];
1465                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1466                 struct page *page;
1467
1468                 if (nbytes > resid)
1469                         nbytes = resid;
1470
1471                 page = alloc_page(GFP_KERNEL);
1472                 if (page == NULL)
1473                         return -ENOMEM;
1474
1475                 sg_set_page(sg, page, nbytes, 0);
1476
1477                 /* TODO: This should be going through a pci_???()
1478                  * routine to do proper mapping. */
1479                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1480                 sksg->byte_count = nbytes;
1481
1482                 sksg->host_side_addr = sg_phys(sg);
1483
1484                 sksg->dev_side_addr = 0;
1485                 sksg->next_desc_ptr = skreq->sksg_dma_address +
1486                                       (ix + 1) * sizeof(*sksg);
1487
1488                 skreq->n_sg++;
1489                 resid -= nbytes;
1490         }
1491
1492         if (skreq->n_sg > 0) {
1493                 u32 ix = skreq->n_sg - 1;
1494                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1495
1496                 sksg->control = FIT_SGD_CONTROL_LAST;
1497                 sksg->next_desc_ptr = 0;
1498         }
1499
1500         if (unlikely(skdev->dbg_level > 1)) {
1501                 u32 i;
1502
1503                 dev_dbg(&skdev->pdev->dev,
1504                         "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1505                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1506                 for (i = 0; i < skreq->n_sg; i++) {
1507                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1508
1509                         dev_dbg(&skdev->pdev->dev,
1510                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1511                                 i, sgd->byte_count, sgd->control,
1512                                 sgd->host_side_addr, sgd->next_desc_ptr);
1513                 }
1514         }
1515
1516         return 0;
1517 }
1518
1519 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1520                                     struct skd_sg_io *sksgio)
1521 {
1522         struct skd_special_context *skspcl = sksgio->skspcl;
1523         struct skd_request_context *skreq = &skspcl->req;
1524         u32 dxfer_len = sksgio->dxfer_len;
1525         int rc;
1526
1527         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1528         /*
1529          * Eventually, errors or not, skd_release_special() is called
1530          * to recover allocations including partial allocations.
1531          */
1532         return rc;
1533 }
1534
1535 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1536                                  struct skd_sg_io *sksgio, int dxfer_dir)
1537 {
1538         struct skd_special_context *skspcl = sksgio->skspcl;
1539         u32 iov_ix = 0;
1540         struct sg_iovec curiov;
1541         u32 sksg_ix = 0;
1542         u8 *bufp = NULL;
1543         u32 buf_len = 0;
1544         u32 resid = sksgio->dxfer_len;
1545         int rc;
1546
1547         curiov.iov_len = 0;
1548         curiov.iov_base = NULL;
1549
1550         if (dxfer_dir != sksgio->sg.dxfer_direction) {
1551                 if (dxfer_dir != SG_DXFER_TO_DEV ||
1552                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1553                         return 0;
1554         }
1555
1556         while (resid > 0) {
1557                 u32 nbytes = PAGE_SIZE;
1558
1559                 if (curiov.iov_len == 0) {
1560                         curiov = sksgio->iov[iov_ix++];
1561                         continue;
1562                 }
1563
1564                 if (buf_len == 0) {
1565                         struct page *page;
1566                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
1567                         bufp = page_address(page);
1568                         buf_len = PAGE_SIZE;
1569                 }
1570
1571                 nbytes = min_t(u32, nbytes, resid);
1572                 nbytes = min_t(u32, nbytes, curiov.iov_len);
1573                 nbytes = min_t(u32, nbytes, buf_len);
1574
1575                 if (dxfer_dir == SG_DXFER_TO_DEV)
1576                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1577                 else
1578                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1579
1580                 if (rc)
1581                         return -EFAULT;
1582
1583                 resid -= nbytes;
1584                 curiov.iov_len -= nbytes;
1585                 curiov.iov_base += nbytes;
1586                 buf_len -= nbytes;
1587         }
1588
1589         return 0;
1590 }
1591
1592 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1593                                  struct skd_sg_io *sksgio)
1594 {
1595         struct skd_special_context *skspcl = sksgio->skspcl;
1596         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1597         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1598
1599         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1600
1601         /* Initialize the FIT msg header */
1602         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1603         fmh->num_protocol_cmds_coalesced = 1;
1604
1605         /* Initialize the SCSI request */
1606         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1607                 scsi_req->hdr.sg_list_dma_address =
1608                         cpu_to_be64(skspcl->req.sksg_dma_address);
1609         scsi_req->hdr.tag = skspcl->req.id;
1610         scsi_req->hdr.sg_list_len_bytes =
1611                 cpu_to_be32(skspcl->req.sg_byte_count);
1612         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1613
1614         skspcl->req.state = SKD_REQ_STATE_BUSY;
1615         skd_send_special_fitmsg(skdev, skspcl);
1616
1617         return 0;
1618 }
1619
1620 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1621 {
1622         unsigned long flags;
1623         int rc;
1624
1625         rc = wait_event_interruptible_timeout(skdev->waitq,
1626                                               (sksgio->skspcl->req.state !=
1627                                                SKD_REQ_STATE_BUSY),
1628                                               msecs_to_jiffies(sksgio->sg.
1629                                                                timeout));
1630
1631         spin_lock_irqsave(&skdev->lock, flags);
1632
1633         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1634                 dev_dbg(&skdev->pdev->dev, "skspcl %p aborted\n",
1635                         sksgio->skspcl);
1636
1637                 /* Build check cond, sense and let command finish. */
1638                 /* For a timeout, we must fabricate completion and sense
1639                  * data to complete the command */
1640                 sksgio->skspcl->req.completion.status =
1641                         SAM_STAT_CHECK_CONDITION;
1642
1643                 memset(&sksgio->skspcl->req.err_info, 0,
1644                        sizeof(sksgio->skspcl->req.err_info));
1645                 sksgio->skspcl->req.err_info.type = 0x70;
1646                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1647                 sksgio->skspcl->req.err_info.code = 0x44;
1648                 sksgio->skspcl->req.err_info.qual = 0;
1649                 rc = 0;
1650         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1651                 /* No longer on the adapter. We finish. */
1652                 rc = 0;
1653         else {
1654                 /* Something's gone wrong. Still busy. Timeout or
1655                  * user interrupted (control-C). Mark as an orphan
1656                  * so it will be disposed when completed. */
1657                 sksgio->skspcl->orphaned = 1;
1658                 sksgio->skspcl = NULL;
1659                 if (rc == 0) {
1660                         dev_dbg(&skdev->pdev->dev, "timed out %p (%u ms)\n",
1661                                 sksgio, sksgio->sg.timeout);
1662                         rc = -ETIMEDOUT;
1663                 } else {
1664                         dev_dbg(&skdev->pdev->dev, "cntlc %p\n", sksgio);
1665                         rc = -EINTR;
1666                 }
1667         }
1668
1669         spin_unlock_irqrestore(&skdev->lock, flags);
1670
1671         return rc;
1672 }
1673
1674 static int skd_sg_io_put_status(struct skd_device *skdev,
1675                                 struct skd_sg_io *sksgio)
1676 {
1677         struct sg_io_hdr *sgp = &sksgio->sg;
1678         struct skd_special_context *skspcl = sksgio->skspcl;
1679         int resid = 0;
1680
1681         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1682
1683         sgp->status = skspcl->req.completion.status;
1684         resid = sksgio->dxfer_len - nb;
1685
1686         sgp->masked_status = sgp->status & STATUS_MASK;
1687         sgp->msg_status = 0;
1688         sgp->host_status = 0;
1689         sgp->driver_status = 0;
1690         sgp->resid = resid;
1691         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1692                 sgp->info |= SG_INFO_CHECK;
1693
1694         dev_dbg(&skdev->pdev->dev, "status %x masked %x resid 0x%x\n",
1695                 sgp->status, sgp->masked_status, sgp->resid);
1696
1697         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1698                 if (sgp->mx_sb_len > 0) {
1699                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
1700                         u32 nbytes = sizeof(*ei);
1701
1702                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1703
1704                         sgp->sb_len_wr = nbytes;
1705
1706                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1707                                 dev_dbg(&skdev->pdev->dev,
1708                                         "copy_to_user sense failed %p\n",
1709                                         sgp->sbp);
1710                                 return -EFAULT;
1711                         }
1712                 }
1713         }
1714
1715         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1716                 dev_dbg(&skdev->pdev->dev, "copy_to_user sg failed %p\n",
1717                         sksgio->argp);
1718                 return -EFAULT;
1719         }
1720
1721         return 0;
1722 }
1723
1724 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1725                                     struct skd_sg_io *sksgio)
1726 {
1727         struct skd_special_context *skspcl = sksgio->skspcl;
1728
1729         if (skspcl != NULL) {
1730                 ulong flags;
1731
1732                 sksgio->skspcl = NULL;
1733
1734                 spin_lock_irqsave(&skdev->lock, flags);
1735                 skd_release_special(skdev, skspcl);
1736                 spin_unlock_irqrestore(&skdev->lock, flags);
1737         }
1738
1739         return 0;
1740 }
1741
1742 /*
1743  *****************************************************************************
1744  * INTERNAL REQUESTS -- generated by driver itself
1745  *****************************************************************************
1746  */
1747
1748 static int skd_format_internal_skspcl(struct skd_device *skdev)
1749 {
1750         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1751         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1752         struct fit_msg_hdr *fmh;
1753         uint64_t dma_address;
1754         struct skd_scsi_request *scsi;
1755
1756         fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1757         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1758         fmh->num_protocol_cmds_coalesced = 1;
1759
1760         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1761         memset(scsi, 0, sizeof(*scsi));
1762         dma_address = skspcl->req.sksg_dma_address;
1763         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1764         sgd->control = FIT_SGD_CONTROL_LAST;
1765         sgd->byte_count = 0;
1766         sgd->host_side_addr = skspcl->db_dma_address;
1767         sgd->dev_side_addr = 0;
1768         sgd->next_desc_ptr = 0LL;
1769
1770         return 1;
1771 }
1772
1773 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1774
1775 static void skd_send_internal_skspcl(struct skd_device *skdev,
1776                                      struct skd_special_context *skspcl,
1777                                      u8 opcode)
1778 {
1779         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1780         struct skd_scsi_request *scsi;
1781         unsigned char *buf = skspcl->data_buf;
1782         int i;
1783
1784         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1785                 /*
1786                  * A refresh is already in progress.
1787                  * Just wait for it to finish.
1788                  */
1789                 return;
1790
1791         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1792         skspcl->req.state = SKD_REQ_STATE_BUSY;
1793         skspcl->req.id += SKD_ID_INCR;
1794
1795         scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1796         scsi->hdr.tag = skspcl->req.id;
1797
1798         memset(scsi->cdb, 0, sizeof(scsi->cdb));
1799
1800         switch (opcode) {
1801         case TEST_UNIT_READY:
1802                 scsi->cdb[0] = TEST_UNIT_READY;
1803                 sgd->byte_count = 0;
1804                 scsi->hdr.sg_list_len_bytes = 0;
1805                 break;
1806
1807         case READ_CAPACITY:
1808                 scsi->cdb[0] = READ_CAPACITY;
1809                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1810                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1811                 break;
1812
1813         case INQUIRY:
1814                 scsi->cdb[0] = INQUIRY;
1815                 scsi->cdb[1] = 0x01;    /* evpd */
1816                 scsi->cdb[2] = 0x80;    /* serial number page */
1817                 scsi->cdb[4] = 0x10;
1818                 sgd->byte_count = 16;
1819                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1820                 break;
1821
1822         case SYNCHRONIZE_CACHE:
1823                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1824                 sgd->byte_count = 0;
1825                 scsi->hdr.sg_list_len_bytes = 0;
1826                 break;
1827
1828         case WRITE_BUFFER:
1829                 scsi->cdb[0] = WRITE_BUFFER;
1830                 scsi->cdb[1] = 0x02;
1831                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1832                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1833                 sgd->byte_count = WR_BUF_SIZE;
1834                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1835                 /* fill incrementing byte pattern */
1836                 for (i = 0; i < sgd->byte_count; i++)
1837                         buf[i] = i & 0xFF;
1838                 break;
1839
1840         case READ_BUFFER:
1841                 scsi->cdb[0] = READ_BUFFER;
1842                 scsi->cdb[1] = 0x02;
1843                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1844                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1845                 sgd->byte_count = WR_BUF_SIZE;
1846                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1847                 memset(skspcl->data_buf, 0, sgd->byte_count);
1848                 break;
1849
1850         default:
1851                 SKD_ASSERT("Don't know what to send");
1852                 return;
1853
1854         }
1855         skd_send_special_fitmsg(skdev, skspcl);
1856 }
1857
1858 static void skd_refresh_device_data(struct skd_device *skdev)
1859 {
1860         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1861
1862         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1863 }
1864
1865 static int skd_chk_read_buf(struct skd_device *skdev,
1866                             struct skd_special_context *skspcl)
1867 {
1868         unsigned char *buf = skspcl->data_buf;
1869         int i;
1870
1871         /* check for incrementing byte pattern */
1872         for (i = 0; i < WR_BUF_SIZE; i++)
1873                 if (buf[i] != (i & 0xFF))
1874                         return 1;
1875
1876         return 0;
1877 }
1878
1879 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1880                                  u8 code, u8 qual, u8 fruc)
1881 {
1882         /* If the check condition is of special interest, log a message */
1883         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1884             && (code == 0x04) && (qual == 0x06)) {
1885                 dev_err(&skdev->pdev->dev,
1886                         "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1887                         key, code, qual, fruc);
1888         }
1889 }
1890
1891 static void skd_complete_internal(struct skd_device *skdev,
1892                                   volatile struct fit_completion_entry_v1
1893                                   *skcomp,
1894                                   volatile struct fit_comp_error_info *skerr,
1895                                   struct skd_special_context *skspcl)
1896 {
1897         u8 *buf = skspcl->data_buf;
1898         u8 status;
1899         int i;
1900         struct skd_scsi_request *scsi =
1901                 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1902
1903         lockdep_assert_held(&skdev->lock);
1904
1905         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1906
1907         dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1908
1909         skspcl->req.completion = *skcomp;
1910         skspcl->req.state = SKD_REQ_STATE_IDLE;
1911         skspcl->req.id += SKD_ID_INCR;
1912
1913         status = skspcl->req.completion.status;
1914
1915         skd_log_check_status(skdev, status, skerr->key, skerr->code,
1916                              skerr->qual, skerr->fruc);
1917
1918         switch (scsi->cdb[0]) {
1919         case TEST_UNIT_READY:
1920                 if (status == SAM_STAT_GOOD)
1921                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1922                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1923                          (skerr->key == MEDIUM_ERROR))
1924                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1925                 else {
1926                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1927                                 dev_dbg(&skdev->pdev->dev,
1928                                         "TUR failed, don't send anymore state 0x%x\n",
1929                                         skdev->state);
1930                                 return;
1931                         }
1932                         dev_dbg(&skdev->pdev->dev,
1933                                 "**** TUR failed, retry skerr\n");
1934                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1935                 }
1936                 break;
1937
1938         case WRITE_BUFFER:
1939                 if (status == SAM_STAT_GOOD)
1940                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1941                 else {
1942                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1943                                 dev_dbg(&skdev->pdev->dev,
1944                                         "write buffer failed, don't send anymore state 0x%x\n",
1945                                         skdev->state);
1946                                 return;
1947                         }
1948                         dev_dbg(&skdev->pdev->dev,
1949                                 "**** write buffer failed, retry skerr\n");
1950                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1951                 }
1952                 break;
1953
1954         case READ_BUFFER:
1955                 if (status == SAM_STAT_GOOD) {
1956                         if (skd_chk_read_buf(skdev, skspcl) == 0)
1957                                 skd_send_internal_skspcl(skdev, skspcl,
1958                                                          READ_CAPACITY);
1959                         else {
1960                                 dev_err(&skdev->pdev->dev,
1961                                         "*** W/R Buffer mismatch %d ***\n",
1962                                         skdev->connect_retries);
1963                                 if (skdev->connect_retries <
1964                                     SKD_MAX_CONNECT_RETRIES) {
1965                                         skdev->connect_retries++;
1966                                         skd_soft_reset(skdev);
1967                                 } else {
1968                                         dev_err(&skdev->pdev->dev,
1969                                                 "W/R Buffer Connect Error\n");
1970                                         return;
1971                                 }
1972                         }
1973
1974                 } else {
1975                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1976                                 dev_dbg(&skdev->pdev->dev,
1977                                         "read buffer failed, don't send anymore state 0x%x\n",
1978                                         skdev->state);
1979                                 return;
1980                         }
1981                         dev_dbg(&skdev->pdev->dev,
1982                                 "**** read buffer failed, retry skerr\n");
1983                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1984                 }
1985                 break;
1986
1987         case READ_CAPACITY:
1988                 skdev->read_cap_is_valid = 0;
1989                 if (status == SAM_STAT_GOOD) {
1990                         skdev->read_cap_last_lba =
1991                                 (buf[0] << 24) | (buf[1] << 16) |
1992                                 (buf[2] << 8) | buf[3];
1993                         skdev->read_cap_blocksize =
1994                                 (buf[4] << 24) | (buf[5] << 16) |
1995                                 (buf[6] << 8) | buf[7];
1996
1997                         dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1998                                 skdev->read_cap_last_lba,
1999                                 skdev->read_cap_blocksize);
2000
2001                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2002
2003                         skdev->read_cap_is_valid = 1;
2004
2005                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2006                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2007                            (skerr->key == MEDIUM_ERROR)) {
2008                         skdev->read_cap_last_lba = ~0;
2009                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2010                         dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
2011                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2012                 } else {
2013                         dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
2014                         skd_send_internal_skspcl(skdev, skspcl,
2015                                                  TEST_UNIT_READY);
2016                 }
2017                 break;
2018
2019         case INQUIRY:
2020                 skdev->inquiry_is_valid = 0;
2021                 if (status == SAM_STAT_GOOD) {
2022                         skdev->inquiry_is_valid = 1;
2023
2024                         for (i = 0; i < 12; i++)
2025                                 skdev->inq_serial_num[i] = buf[i + 4];
2026                         skdev->inq_serial_num[12] = 0;
2027                 }
2028
2029                 if (skd_unquiesce_dev(skdev) < 0)
2030                         dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
2031                  /* connection is complete */
2032                 skdev->connect_retries = 0;
2033                 break;
2034
2035         case SYNCHRONIZE_CACHE:
2036                 if (status == SAM_STAT_GOOD)
2037                         skdev->sync_done = 1;
2038                 else
2039                         skdev->sync_done = -1;
2040                 wake_up_interruptible(&skdev->waitq);
2041                 break;
2042
2043         default:
2044                 SKD_ASSERT("we didn't send this");
2045         }
2046 }
2047
2048 /*
2049  *****************************************************************************
2050  * FIT MESSAGES
2051  *****************************************************************************
2052  */
2053
2054 static void skd_send_fitmsg(struct skd_device *skdev,
2055                             struct skd_fitmsg_context *skmsg)
2056 {
2057         u64 qcmd;
2058         struct fit_msg_hdr *fmh;
2059
2060         dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
2061                 skmsg->mb_dma_address, skdev->in_flight);
2062         dev_dbg(&skdev->pdev->dev, "msg_buf 0x%p, offset %x\n", skmsg->msg_buf,
2063                 skmsg->offset);
2064
2065         qcmd = skmsg->mb_dma_address;
2066         qcmd |= FIT_QCMD_QID_NORMAL;
2067
2068         fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2069         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2070
2071         if (unlikely(skdev->dbg_level > 1)) {
2072                 u8 *bp = (u8 *)skmsg->msg_buf;
2073                 int i;
2074                 for (i = 0; i < skmsg->length; i += 8) {
2075                         dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
2076                                 &bp[i]);
2077                         if (i == 0)
2078                                 i = 64 - 8;
2079                 }
2080         }
2081
2082         if (skmsg->length > 256)
2083                 qcmd |= FIT_QCMD_MSGSIZE_512;
2084         else if (skmsg->length > 128)
2085                 qcmd |= FIT_QCMD_MSGSIZE_256;
2086         else if (skmsg->length > 64)
2087                 qcmd |= FIT_QCMD_MSGSIZE_128;
2088         else
2089                 /*
2090                  * This makes no sense because the FIT msg header is
2091                  * 64 bytes. If the msg is only 64 bytes long it has
2092                  * no payload.
2093                  */
2094                 qcmd |= FIT_QCMD_MSGSIZE_64;
2095
2096         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2097         smp_wmb();
2098
2099         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2100 }
2101
2102 static void skd_send_special_fitmsg(struct skd_device *skdev,
2103                                     struct skd_special_context *skspcl)
2104 {
2105         u64 qcmd;
2106
2107         if (unlikely(skdev->dbg_level > 1)) {
2108                 u8 *bp = (u8 *)skspcl->msg_buf;
2109                 int i;
2110
2111                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2112                         dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
2113                                 &bp[i]);
2114                         if (i == 0)
2115                                 i = 64 - 8;
2116                 }
2117
2118                 dev_dbg(&skdev->pdev->dev,
2119                         "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2120                         skspcl, skspcl->req.id, skspcl->req.sksg_list,
2121                         skspcl->req.sksg_dma_address);
2122                 for (i = 0; i < skspcl->req.n_sg; i++) {
2123                         struct fit_sg_descriptor *sgd =
2124                                 &skspcl->req.sksg_list[i];
2125
2126                         dev_dbg(&skdev->pdev->dev,
2127                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
2128                                 i, sgd->byte_count, sgd->control,
2129                                 sgd->host_side_addr, sgd->next_desc_ptr);
2130                 }
2131         }
2132
2133         /*
2134          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2135          * and one 64-byte SSDI command.
2136          */
2137         qcmd = skspcl->mb_dma_address;
2138         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2139
2140         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2141         smp_wmb();
2142
2143         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2144 }
2145
2146 /*
2147  *****************************************************************************
2148  * COMPLETION QUEUE
2149  *****************************************************************************
2150  */
2151
2152 static void skd_complete_other(struct skd_device *skdev,
2153                                volatile struct fit_completion_entry_v1 *skcomp,
2154                                volatile struct fit_comp_error_info *skerr);
2155
2156 struct sns_info {
2157         u8 type;
2158         u8 stat;
2159         u8 key;
2160         u8 asc;
2161         u8 ascq;
2162         u8 mask;
2163         enum skd_check_status_action action;
2164 };
2165
2166 static struct sns_info skd_chkstat_table[] = {
2167         /* Good */
2168         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2169           SKD_CHECK_STATUS_REPORT_GOOD },
2170
2171         /* Smart alerts */
2172         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2173           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2174         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2175           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2176         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2177           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2178
2179         /* Retry (with limits) */
2180         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2181           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2182         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2183           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2184         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2185           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2186         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2187           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2188
2189         /* Busy (or about to be) */
2190         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2191           SKD_CHECK_STATUS_BUSY_IMMINENT },
2192 };
2193
2194 /*
2195  * Look up status and sense data to decide how to handle the error
2196  * from the device.
2197  * mask says which fields must match e.g., mask=0x18 means check
2198  * type and stat, ignore key, asc, ascq.
2199  */
2200
2201 static enum skd_check_status_action
2202 skd_check_status(struct skd_device *skdev,
2203                  u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2204 {
2205         int i, n;
2206
2207         dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2208                 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2209
2210         dev_dbg(&skdev->pdev->dev,
2211                 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2212                 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
2213                 skerr->fruc);
2214
2215         /* Does the info match an entry in the good category? */
2216         n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2217         for (i = 0; i < n; i++) {
2218                 struct sns_info *sns = &skd_chkstat_table[i];
2219
2220                 if (sns->mask & 0x10)
2221                         if (skerr->type != sns->type)
2222                                 continue;
2223
2224                 if (sns->mask & 0x08)
2225                         if (cmp_status != sns->stat)
2226                                 continue;
2227
2228                 if (sns->mask & 0x04)
2229                         if (skerr->key != sns->key)
2230                                 continue;
2231
2232                 if (sns->mask & 0x02)
2233                         if (skerr->code != sns->asc)
2234                                 continue;
2235
2236                 if (sns->mask & 0x01)
2237                         if (skerr->qual != sns->ascq)
2238                                 continue;
2239
2240                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2241                         dev_err(&skdev->pdev->dev,
2242                                 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
2243                                 skerr->key, skerr->code, skerr->qual);
2244                 }
2245                 return sns->action;
2246         }
2247
2248         /* No other match, so nonzero status means error,
2249          * zero status means good
2250          */
2251         if (cmp_status) {
2252                 dev_dbg(&skdev->pdev->dev, "status check: error\n");
2253                 return SKD_CHECK_STATUS_REPORT_ERROR;
2254         }
2255
2256         dev_dbg(&skdev->pdev->dev, "status check good default\n");
2257         return SKD_CHECK_STATUS_REPORT_GOOD;
2258 }
2259
2260 static void skd_resolve_req_exception(struct skd_device *skdev,
2261                                       struct skd_request_context *skreq)
2262 {
2263         u8 cmp_status = skreq->completion.status;
2264
2265         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2266         case SKD_CHECK_STATUS_REPORT_GOOD:
2267         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2268                 skd_end_request(skdev, skreq, BLK_STS_OK);
2269                 break;
2270
2271         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2272                 skd_log_skreq(skdev, skreq, "retry(busy)");
2273                 blk_requeue_request(skdev->queue, skreq->req);
2274                 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
2275                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2276                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2277                 skd_quiesce_dev(skdev);
2278                 break;
2279
2280         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2281                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2282                         skd_log_skreq(skdev, skreq, "retry");
2283                         blk_requeue_request(skdev->queue, skreq->req);
2284                         break;
2285                 }
2286                 /* fall through */
2287
2288         case SKD_CHECK_STATUS_REPORT_ERROR:
2289         default:
2290                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
2291                 break;
2292         }
2293 }
2294
2295 /* assume spinlock is already held */
2296 static void skd_release_skreq(struct skd_device *skdev,
2297                               struct skd_request_context *skreq)
2298 {
2299         u32 msg_slot;
2300         struct skd_fitmsg_context *skmsg;
2301
2302         u32 timo_slot;
2303
2304         /*
2305          * Reclaim the FIT msg buffer if this is
2306          * the first of the requests it carried to
2307          * be completed. The FIT msg buffer used to
2308          * send this request cannot be reused until
2309          * we are sure the s1120 card has copied
2310          * it to its memory. The FIT msg might have
2311          * contained several requests. As soon as
2312          * any of them are completed we know that
2313          * the entire FIT msg was transferred.
2314          * Only the first completed request will
2315          * match the FIT msg buffer id. The FIT
2316          * msg buffer id is immediately updated.
2317          * When subsequent requests complete the FIT
2318          * msg buffer id won't match, so we know
2319          * quite cheaply that it is already done.
2320          */
2321         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2322         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2323
2324         skmsg = &skdev->skmsg_table[msg_slot];
2325         if (skmsg->id == skreq->fitmsg_id) {
2326                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2327                 SKD_ASSERT(skmsg->outstanding > 0);
2328                 skmsg->outstanding--;
2329                 if (skmsg->outstanding == 0) {
2330                         skmsg->state = SKD_MSG_STATE_IDLE;
2331                         skmsg->id += SKD_ID_INCR;
2332                         skmsg->next = skdev->skmsg_free_list;
2333                         skdev->skmsg_free_list = skmsg;
2334                 }
2335         }
2336
2337         /*
2338          * Decrease the number of active requests.
2339          * Also decrements the count in the timeout slot.
2340          */
2341         SKD_ASSERT(skdev->in_flight > 0);
2342         skdev->in_flight -= 1;
2343
2344         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2345         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2346         skdev->timeout_slot[timo_slot] -= 1;
2347
2348         /*
2349          * Reset backpointer
2350          */
2351         skreq->req = NULL;
2352
2353         /*
2354          * Reclaim the skd_request_context
2355          */
2356         skreq->state = SKD_REQ_STATE_IDLE;
2357         skreq->id += SKD_ID_INCR;
2358         skreq->next = skdev->skreq_free_list;
2359         skdev->skreq_free_list = skreq;
2360 }
2361
2362 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2363
2364 static void skd_do_inq_page_00(struct skd_device *skdev,
2365                                volatile struct fit_completion_entry_v1 *skcomp,
2366                                volatile struct fit_comp_error_info *skerr,
2367                                uint8_t *cdb, uint8_t *buf)
2368 {
2369         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2370
2371         /* Caller requested "supported pages".  The driver needs to insert
2372          * its page.
2373          */
2374         dev_dbg(&skdev->pdev->dev,
2375                 "skd_do_driver_inquiry: modify supported pages.\n");
2376
2377         /* If the device rejected the request because the CDB was
2378          * improperly formed, then just leave.
2379          */
2380         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2381             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2382                 return;
2383
2384         /* Get the amount of space the caller allocated */
2385         max_bytes = (cdb[3] << 8) | cdb[4];
2386
2387         /* Get the number of pages actually returned by the device */
2388         drive_pages = (buf[2] << 8) | buf[3];
2389         drive_bytes = drive_pages + 4;
2390         new_size = drive_pages + 1;
2391
2392         /* Supported pages must be in numerical order, so find where
2393          * the driver page needs to be inserted into the list of
2394          * pages returned by the device.
2395          */
2396         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2397                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2398                         return; /* Device using this page code. abort */
2399                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2400                         break;
2401         }
2402
2403         if (insert_pt < max_bytes) {
2404                 uint16_t u;
2405
2406                 /* Shift everything up one byte to make room. */
2407                 for (u = new_size + 3; u > insert_pt; u--)
2408                         buf[u] = buf[u - 1];
2409                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2410
2411                 /* SCSI byte order increment of num_returned_bytes by 1 */
2412                 skcomp->num_returned_bytes =
2413                         cpu_to_be32(be32_to_cpu(skcomp->num_returned_bytes) + 1);
2414         }
2415
2416         /* update page length field to reflect the driver's page too */
2417         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2418         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2419 }
2420
2421 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2422 {
2423         int pcie_reg;
2424         u16 pci_bus_speed;
2425         u8 pci_lanes;
2426
2427         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2428         if (pcie_reg) {
2429                 u16 linksta;
2430                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2431
2432                 pci_bus_speed = linksta & 0xF;
2433                 pci_lanes = (linksta & 0x3F0) >> 4;
2434         } else {
2435                 *speed = STEC_LINK_UNKNOWN;
2436                 *width = 0xFF;
2437                 return;
2438         }
2439
2440         switch (pci_bus_speed) {
2441         case 1:
2442                 *speed = STEC_LINK_2_5GTS;
2443                 break;
2444         case 2:
2445                 *speed = STEC_LINK_5GTS;
2446                 break;
2447         case 3:
2448                 *speed = STEC_LINK_8GTS;
2449                 break;
2450         default:
2451                 *speed = STEC_LINK_UNKNOWN;
2452                 break;
2453         }
2454
2455         if (pci_lanes <= 0x20)
2456                 *width = pci_lanes;
2457         else
2458                 *width = 0xFF;
2459 }
2460
2461 static void skd_do_inq_page_da(struct skd_device *skdev,
2462                                volatile struct fit_completion_entry_v1 *skcomp,
2463                                volatile struct fit_comp_error_info *skerr,
2464                                uint8_t *cdb, uint8_t *buf)
2465 {
2466         struct pci_dev *pdev = skdev->pdev;
2467         unsigned max_bytes;
2468         struct driver_inquiry_data inq;
2469         u16 val;
2470
2471         dev_dbg(&skdev->pdev->dev, "skd_do_driver_inquiry: return driver page\n");
2472
2473         memset(&inq, 0, sizeof(inq));
2474
2475         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2476
2477         skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2478         inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2479         inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2480         inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2481
2482         pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2483         inq.pcie_vendor_id = cpu_to_be16(val);
2484
2485         pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2486         inq.pcie_device_id = cpu_to_be16(val);
2487
2488         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2489         inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2490
2491         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2492         inq.pcie_subsystem_device_id = cpu_to_be16(val);
2493
2494         /* Driver version, fixed lenth, padded with spaces on the right */
2495         inq.driver_version_length = sizeof(inq.driver_version);
2496         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2497         memcpy(inq.driver_version, DRV_VER_COMPL,
2498                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2499
2500         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2501
2502         /* Clear the error set by the device */
2503         skcomp->status = SAM_STAT_GOOD;
2504         memset((void *)skerr, 0, sizeof(*skerr));
2505
2506         /* copy response into output buffer */
2507         max_bytes = (cdb[3] << 8) | cdb[4];
2508         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2509
2510         skcomp->num_returned_bytes =
2511                 cpu_to_be32(min_t(uint16_t, max_bytes, sizeof(inq)));
2512 }
2513
2514 static void skd_do_driver_inq(struct skd_device *skdev,
2515                               volatile struct fit_completion_entry_v1 *skcomp,
2516                               volatile struct fit_comp_error_info *skerr,
2517                               uint8_t *cdb, uint8_t *buf)
2518 {
2519         if (!buf)
2520                 return;
2521         else if (cdb[0] != INQUIRY)
2522                 return;         /* Not an INQUIRY */
2523         else if ((cdb[1] & 1) == 0)
2524                 return;         /* EVPD not set */
2525         else if (cdb[2] == 0)
2526                 /* Need to add driver's page to supported pages list */
2527                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2528         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2529                 /* Caller requested driver's page */
2530                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2531 }
2532
2533 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2534 {
2535         if (!sg)
2536                 return NULL;
2537         if (!sg_page(sg))
2538                 return NULL;
2539         return sg_virt(sg);
2540 }
2541
2542 static void skd_process_scsi_inq(struct skd_device *skdev,
2543                                  volatile struct fit_completion_entry_v1
2544                                  *skcomp,
2545                                  volatile struct fit_comp_error_info *skerr,
2546                                  struct skd_special_context *skspcl)
2547 {
2548         uint8_t *buf;
2549         struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2550         struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2551
2552         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2553                             skspcl->req.sg_data_dir);
2554         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2555
2556         if (buf)
2557                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2558 }
2559
2560 static int skd_isr_completion_posted(struct skd_device *skdev,
2561                                         int limit, int *enqueued)
2562 {
2563         volatile struct fit_completion_entry_v1 *skcmp = NULL;
2564         volatile struct fit_comp_error_info *skerr;
2565         u16 req_id;
2566         u32 req_slot;
2567         struct skd_request_context *skreq;
2568         u16 cmp_cntxt = 0;
2569         u8 cmp_status = 0;
2570         u8 cmp_cycle = 0;
2571         u32 cmp_bytes = 0;
2572         int rc = 0;
2573         int processed = 0;
2574
2575         lockdep_assert_held(&skdev->lock);
2576
2577         for (;; ) {
2578                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2579
2580                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2581                 cmp_cycle = skcmp->cycle;
2582                 cmp_cntxt = skcmp->tag;
2583                 cmp_status = skcmp->status;
2584                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2585
2586                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2587
2588                 dev_dbg(&skdev->pdev->dev,
2589                         "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
2590                         skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
2591                         cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
2592                         skdev->proto_ver);
2593
2594                 if (cmp_cycle != skdev->skcomp_cycle) {
2595                         dev_dbg(&skdev->pdev->dev, "end of completions\n");
2596                         break;
2597                 }
2598                 /*
2599                  * Update the completion queue head index and possibly
2600                  * the completion cycle count. 8-bit wrap-around.
2601                  */
2602                 skdev->skcomp_ix++;
2603                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2604                         skdev->skcomp_ix = 0;
2605                         skdev->skcomp_cycle++;
2606                 }
2607
2608                 /*
2609                  * The command context is a unique 32-bit ID. The low order
2610                  * bits help locate the request. The request is usually a
2611                  * r/w request (see skd_start() above) or a special request.
2612                  */
2613                 req_id = cmp_cntxt;
2614                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2615
2616                 /* Is this other than a r/w request? */
2617                 if (req_slot >= skdev->num_req_context) {
2618                         /*
2619                          * This is not a completion for a r/w request.
2620                          */
2621                         skd_complete_other(skdev, skcmp, skerr);
2622                         continue;
2623                 }
2624
2625                 skreq = &skdev->skreq_table[req_slot];
2626
2627                 /*
2628                  * Make sure the request ID for the slot matches.
2629                  */
2630                 if (skreq->id != req_id) {
2631                         dev_dbg(&skdev->pdev->dev,
2632                                 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
2633                                 skreq->id);
2634                         {
2635                                 u16 new_id = cmp_cntxt;
2636                                 dev_err(&skdev->pdev->dev,
2637                                         "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2638                                         req_id, skreq->id, new_id);
2639
2640                                 continue;
2641                         }
2642                 }
2643
2644                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2645
2646                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2647                         dev_dbg(&skdev->pdev->dev, "reclaim req %p id=%04x\n",
2648                                 skreq, skreq->id);
2649                         /* a previously timed out command can
2650                          * now be cleaned up */
2651                         skd_release_skreq(skdev, skreq);
2652                         continue;
2653                 }
2654
2655                 skreq->completion = *skcmp;
2656                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2657                         skreq->err_info = *skerr;
2658                         skd_log_check_status(skdev, cmp_status, skerr->key,
2659                                              skerr->code, skerr->qual,
2660                                              skerr->fruc);
2661                 }
2662                 /* Release DMA resources for the request. */
2663                 if (skreq->n_sg > 0)
2664                         skd_postop_sg_list(skdev, skreq);
2665
2666                 if (!skreq->req) {
2667                         dev_dbg(&skdev->pdev->dev,
2668                                 "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
2669                                 skreq, skreq->id, req_id);
2670                 } else {
2671                         /*
2672                          * Capture the outcome and post it back to the
2673                          * native request.
2674                          */
2675                         if (likely(cmp_status == SAM_STAT_GOOD))
2676                                 skd_end_request(skdev, skreq, BLK_STS_OK);
2677                         else
2678                                 skd_resolve_req_exception(skdev, skreq);
2679                 }
2680
2681                 /*
2682                  * Release the skreq, its FIT msg (if one), timeout slot,
2683                  * and queue depth.
2684                  */
2685                 skd_release_skreq(skdev, skreq);
2686
2687                 /* skd_isr_comp_limit equal zero means no limit */
2688                 if (limit) {
2689                         if (++processed >= limit) {
2690                                 rc = 1;
2691                                 break;
2692                         }
2693                 }
2694         }
2695
2696         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2697                 && (skdev->in_flight) == 0) {
2698                 skdev->state = SKD_DRVR_STATE_PAUSED;
2699                 wake_up_interruptible(&skdev->waitq);
2700         }
2701
2702         return rc;
2703 }
2704
2705 static void skd_complete_other(struct skd_device *skdev,
2706                                volatile struct fit_completion_entry_v1 *skcomp,
2707                                volatile struct fit_comp_error_info *skerr)
2708 {
2709         u32 req_id = 0;
2710         u32 req_table;
2711         u32 req_slot;
2712         struct skd_special_context *skspcl;
2713
2714         lockdep_assert_held(&skdev->lock);
2715
2716         req_id = skcomp->tag;
2717         req_table = req_id & SKD_ID_TABLE_MASK;
2718         req_slot = req_id & SKD_ID_SLOT_MASK;
2719
2720         dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
2721                 req_id, req_slot);
2722
2723         /*
2724          * Based on the request id, determine how to dispatch this completion.
2725          * This swich/case is finding the good cases and forwarding the
2726          * completion entry. Errors are reported below the switch.
2727          */
2728         switch (req_table) {
2729         case SKD_ID_RW_REQUEST:
2730                 /*
2731                  * The caller, skd_isr_completion_posted() above,
2732                  * handles r/w requests. The only way we get here
2733                  * is if the req_slot is out of bounds.
2734                  */
2735                 break;
2736
2737         case SKD_ID_SPECIAL_REQUEST:
2738                 /*
2739                  * Make sure the req_slot is in bounds and that the id
2740                  * matches.
2741                  */
2742                 if (req_slot < skdev->n_special) {
2743                         skspcl = &skdev->skspcl_table[req_slot];
2744                         if (skspcl->req.id == req_id &&
2745                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2746                                 skd_complete_special(skdev,
2747                                                      skcomp, skerr, skspcl);
2748                                 return;
2749                         }
2750                 }
2751                 break;
2752
2753         case SKD_ID_INTERNAL:
2754                 if (req_slot == 0) {
2755                         skspcl = &skdev->internal_skspcl;
2756                         if (skspcl->req.id == req_id &&
2757                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2758                                 skd_complete_internal(skdev,
2759                                                       skcomp, skerr, skspcl);
2760                                 return;
2761                         }
2762                 }
2763                 break;
2764
2765         case SKD_ID_FIT_MSG:
2766                 /*
2767                  * These id's should never appear in a completion record.
2768                  */
2769                 break;
2770
2771         default:
2772                 /*
2773                  * These id's should never appear anywhere;
2774                  */
2775                 break;
2776         }
2777
2778         /*
2779          * If we get here it is a bad or stale id.
2780          */
2781 }
2782
2783 static void skd_complete_special(struct skd_device *skdev,
2784                                  volatile struct fit_completion_entry_v1
2785                                  *skcomp,
2786                                  volatile struct fit_comp_error_info *skerr,
2787                                  struct skd_special_context *skspcl)
2788 {
2789         lockdep_assert_held(&skdev->lock);
2790
2791         dev_dbg(&skdev->pdev->dev, " completing special request %p\n", skspcl);
2792         if (skspcl->orphaned) {
2793                 /* Discard orphaned request */
2794                 /* ?: Can this release directly or does it need
2795                  * to use a worker? */
2796                 dev_dbg(&skdev->pdev->dev, "release orphaned %p\n", skspcl);
2797                 skd_release_special(skdev, skspcl);
2798                 return;
2799         }
2800
2801         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2802
2803         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2804         skspcl->req.completion = *skcomp;
2805         skspcl->req.err_info = *skerr;
2806
2807         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2808                              skerr->code, skerr->qual, skerr->fruc);
2809
2810         wake_up_interruptible(&skdev->waitq);
2811 }
2812
2813 /* assume spinlock is already held */
2814 static void skd_release_special(struct skd_device *skdev,
2815                                 struct skd_special_context *skspcl)
2816 {
2817         int i, was_depleted;
2818
2819         for (i = 0; i < skspcl->req.n_sg; i++) {
2820                 struct page *page = sg_page(&skspcl->req.sg[i]);
2821                 __free_page(page);
2822         }
2823
2824         was_depleted = (skdev->skspcl_free_list == NULL);
2825
2826         skspcl->req.state = SKD_REQ_STATE_IDLE;
2827         skspcl->req.id += SKD_ID_INCR;
2828         skspcl->req.next =
2829                 (struct skd_request_context *)skdev->skspcl_free_list;
2830         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2831
2832         if (was_depleted) {
2833                 dev_dbg(&skdev->pdev->dev, "skspcl was depleted\n");
2834                 /* Free list was depleted. Their might be waiters. */
2835                 wake_up_interruptible(&skdev->waitq);
2836         }
2837 }
2838
2839 static void skd_reset_skcomp(struct skd_device *skdev)
2840 {
2841         memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
2842
2843         skdev->skcomp_ix = 0;
2844         skdev->skcomp_cycle = 1;
2845 }
2846
2847 /*
2848  *****************************************************************************
2849  * INTERRUPTS
2850  *****************************************************************************
2851  */
2852 static void skd_completion_worker(struct work_struct *work)
2853 {
2854         struct skd_device *skdev =
2855                 container_of(work, struct skd_device, completion_worker);
2856         unsigned long flags;
2857         int flush_enqueued = 0;
2858
2859         spin_lock_irqsave(&skdev->lock, flags);
2860
2861         /*
2862          * pass in limit=0, which means no limit..
2863          * process everything in compq
2864          */
2865         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2866         skd_request_fn(skdev->queue);
2867
2868         spin_unlock_irqrestore(&skdev->lock, flags);
2869 }
2870
2871 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2872
2873 static irqreturn_t
2874 skd_isr(int irq, void *ptr)
2875 {
2876         struct skd_device *skdev;
2877         u32 intstat;
2878         u32 ack;
2879         int rc = 0;
2880         int deferred = 0;
2881         int flush_enqueued = 0;
2882
2883         skdev = (struct skd_device *)ptr;
2884         spin_lock(&skdev->lock);
2885
2886         for (;; ) {
2887                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2888
2889                 ack = FIT_INT_DEF_MASK;
2890                 ack &= intstat;
2891
2892                 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
2893                         ack);
2894
2895                 /* As long as there is an int pending on device, keep
2896                  * running loop.  When none, get out, but if we've never
2897                  * done any processing, call completion handler?
2898                  */
2899                 if (ack == 0) {
2900                         /* No interrupts on device, but run the completion
2901                          * processor anyway?
2902                          */
2903                         if (rc == 0)
2904                                 if (likely (skdev->state
2905                                         == SKD_DRVR_STATE_ONLINE))
2906                                         deferred = 1;
2907                         break;
2908                 }
2909
2910                 rc = IRQ_HANDLED;
2911
2912                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2913
2914                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2915                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2916                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
2917                                 /*
2918                                  * If we have already deferred completion
2919                                  * processing, don't bother running it again
2920                                  */
2921                                 if (deferred == 0)
2922                                         deferred =
2923                                                 skd_isr_completion_posted(skdev,
2924                                                 skd_isr_comp_limit, &flush_enqueued);
2925                         }
2926
2927                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2928                                 skd_isr_fwstate(skdev);
2929                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2930                                     skdev->state ==
2931                                     SKD_DRVR_STATE_DISAPPEARED) {
2932                                         spin_unlock(&skdev->lock);
2933                                         return rc;
2934                                 }
2935                         }
2936
2937                         if (intstat & FIT_ISH_MSG_FROM_DEV)
2938                                 skd_isr_msg_from_dev(skdev);
2939                 }
2940         }
2941
2942         if (unlikely(flush_enqueued))
2943                 skd_request_fn(skdev->queue);
2944
2945         if (deferred)
2946                 schedule_work(&skdev->completion_worker);
2947         else if (!flush_enqueued)
2948                 skd_request_fn(skdev->queue);
2949
2950         spin_unlock(&skdev->lock);
2951
2952         return rc;
2953 }
2954
2955 static void skd_drive_fault(struct skd_device *skdev)
2956 {
2957         skdev->state = SKD_DRVR_STATE_FAULT;
2958         dev_err(&skdev->pdev->dev, "Drive FAULT\n");
2959 }
2960
2961 static void skd_drive_disappeared(struct skd_device *skdev)
2962 {
2963         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2964         dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
2965 }
2966
2967 static void skd_isr_fwstate(struct skd_device *skdev)
2968 {
2969         u32 sense;
2970         u32 state;
2971         u32 mtd;
2972         int prev_driver_state = skdev->state;
2973
2974         sense = SKD_READL(skdev, FIT_STATUS);
2975         state = sense & FIT_SR_DRIVE_STATE_MASK;
2976
2977         dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
2978                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2979                 skd_drive_state_to_str(state), state);
2980
2981         skdev->drive_state = state;
2982
2983         switch (skdev->drive_state) {
2984         case FIT_SR_DRIVE_INIT:
2985                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2986                         skd_disable_interrupts(skdev);
2987                         break;
2988                 }
2989                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
2990                         skd_recover_requests(skdev, 0);
2991                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2992                         skdev->timer_countdown = SKD_STARTING_TIMO;
2993                         skdev->state = SKD_DRVR_STATE_STARTING;
2994                         skd_soft_reset(skdev);
2995                         break;
2996                 }
2997                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2998                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2999                 skdev->last_mtd = mtd;
3000                 break;
3001
3002         case FIT_SR_DRIVE_ONLINE:
3003                 skdev->cur_max_queue_depth = skd_max_queue_depth;
3004                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3005                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3006
3007                 skdev->queue_low_water_mark =
3008                         skdev->cur_max_queue_depth * 2 / 3 + 1;
3009                 if (skdev->queue_low_water_mark < 1)
3010                         skdev->queue_low_water_mark = 1;
3011                 dev_info(&skdev->pdev->dev,
3012                          "Queue depth limit=%d dev=%d lowat=%d\n",
3013                          skdev->cur_max_queue_depth,
3014                          skdev->dev_max_queue_depth,
3015                          skdev->queue_low_water_mark);
3016
3017                 skd_refresh_device_data(skdev);
3018                 break;
3019
3020         case FIT_SR_DRIVE_BUSY:
3021                 skdev->state = SKD_DRVR_STATE_BUSY;
3022                 skdev->timer_countdown = SKD_BUSY_TIMO;
3023                 skd_quiesce_dev(skdev);
3024                 break;
3025         case FIT_SR_DRIVE_BUSY_SANITIZE:
3026                 /* set timer for 3 seconds, we'll abort any unfinished
3027                  * commands after that expires
3028                  */
3029                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3030                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3031                 blk_start_queue(skdev->queue);
3032                 break;
3033         case FIT_SR_DRIVE_BUSY_ERASE:
3034                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3035                 skdev->timer_countdown = SKD_BUSY_TIMO;
3036                 break;
3037         case FIT_SR_DRIVE_OFFLINE:
3038                 skdev->state = SKD_DRVR_STATE_IDLE;
3039                 break;
3040         case FIT_SR_DRIVE_SOFT_RESET:
3041                 switch (skdev->state) {
3042                 case SKD_DRVR_STATE_STARTING:
3043                 case SKD_DRVR_STATE_RESTARTING:
3044                         /* Expected by a caller of skd_soft_reset() */
3045                         break;
3046                 default:
3047                         skdev->state = SKD_DRVR_STATE_RESTARTING;
3048                         break;
3049                 }
3050                 break;
3051         case FIT_SR_DRIVE_FW_BOOTING:
3052                 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
3053                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3054                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3055                 break;
3056
3057         case FIT_SR_DRIVE_DEGRADED:
3058         case FIT_SR_PCIE_LINK_DOWN:
3059         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3060                 break;
3061
3062         case FIT_SR_DRIVE_FAULT:
3063                 skd_drive_fault(skdev);
3064                 skd_recover_requests(skdev, 0);
3065                 blk_start_queue(skdev->queue);
3066                 break;
3067
3068         /* PCIe bus returned all Fs? */
3069         case 0xFF:
3070                 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
3071                          sense);
3072                 skd_drive_disappeared(skdev);
3073                 skd_recover_requests(skdev, 0);
3074                 blk_start_queue(skdev->queue);
3075                 break;
3076         default:
3077                 /*
3078                  * Uknown FW State. Wait for a state we recognize.
3079                  */
3080                 break;
3081         }
3082         dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3083                 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3084                 skd_skdev_state_to_str(skdev->state), skdev->state);
3085 }
3086
3087 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3088 {
3089         int i;
3090
3091         for (i = 0; i < skdev->num_req_context; i++) {
3092                 struct skd_request_context *skreq = &skdev->skreq_table[i];
3093
3094                 if (skreq->state == SKD_REQ_STATE_BUSY) {
3095                         skd_log_skreq(skdev, skreq, "recover");
3096
3097                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3098                         SKD_ASSERT(skreq->req != NULL);
3099
3100                         /* Release DMA resources for the request. */
3101                         if (skreq->n_sg > 0)
3102                                 skd_postop_sg_list(skdev, skreq);
3103
3104                         if (requeue &&
3105                             (unsigned long) ++skreq->req->special <
3106                             SKD_MAX_RETRIES)
3107                                 blk_requeue_request(skdev->queue, skreq->req);
3108                         else
3109                                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
3110
3111                         skreq->req = NULL;
3112
3113                         skreq->state = SKD_REQ_STATE_IDLE;
3114                         skreq->id += SKD_ID_INCR;
3115                 }
3116                 if (i > 0)
3117                         skreq[-1].next = skreq;
3118                 skreq->next = NULL;
3119         }
3120         skdev->skreq_free_list = skdev->skreq_table;
3121
3122         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3123                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3124
3125                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3126                         skd_log_skmsg(skdev, skmsg, "salvaged");
3127                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3128                         skmsg->state = SKD_MSG_STATE_IDLE;
3129                         skmsg->id += SKD_ID_INCR;
3130                 }
3131                 if (i > 0)
3132                         skmsg[-1].next = skmsg;
3133                 skmsg->next = NULL;
3134         }
3135         skdev->skmsg_free_list = skdev->skmsg_table;
3136
3137         for (i = 0; i < skdev->n_special; i++) {
3138                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3139
3140                 /* If orphaned, reclaim it because it has already been reported
3141                  * to the process as an error (it was just waiting for
3142                  * a completion that didn't come, and now it will never come)
3143                  * If busy, change to a state that will cause it to error
3144                  * out in the wait routine and let it do the normal
3145                  * reporting and reclaiming
3146                  */
3147                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3148                         if (skspcl->orphaned) {
3149                                 dev_dbg(&skdev->pdev->dev, "orphaned %p\n",
3150                                         skspcl);
3151                                 skd_release_special(skdev, skspcl);
3152                         } else {
3153                                 dev_dbg(&skdev->pdev->dev, "not orphaned %p\n",
3154                                         skspcl);
3155                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3156                         }
3157                 }
3158         }
3159         skdev->skspcl_free_list = skdev->skspcl_table;
3160
3161         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3162                 skdev->timeout_slot[i] = 0;
3163
3164         skdev->in_flight = 0;
3165 }
3166
3167 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3168 {
3169         u32 mfd;
3170         u32 mtd;
3171         u32 data;
3172
3173         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3174
3175         dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
3176                 skdev->last_mtd);
3177
3178         /* ignore any mtd that is an ack for something we didn't send */
3179         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3180                 return;
3181
3182         switch (FIT_MXD_TYPE(mfd)) {
3183         case FIT_MTD_FITFW_INIT:
3184                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3185
3186                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3187                         dev_err(&skdev->pdev->dev, "protocol mismatch\n");
3188                         dev_err(&skdev->pdev->dev, "  got=%d support=%d\n",
3189                                 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
3190                         dev_err(&skdev->pdev->dev, "  please upgrade driver\n");
3191                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3192                         skd_soft_reset(skdev);
3193                         break;
3194                 }
3195                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3196                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3197                 skdev->last_mtd = mtd;
3198                 break;
3199
3200         case FIT_MTD_GET_CMDQ_DEPTH:
3201                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3202                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3203                                    SKD_N_COMPLETION_ENTRY);
3204                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3205                 skdev->last_mtd = mtd;
3206                 break;
3207
3208         case FIT_MTD_SET_COMPQ_DEPTH:
3209                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3210                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3211                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3212                 skdev->last_mtd = mtd;
3213                 break;
3214
3215         case FIT_MTD_SET_COMPQ_ADDR:
3216                 skd_reset_skcomp(skdev);
3217                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3218                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3219                 skdev->last_mtd = mtd;
3220                 break;
3221
3222         case FIT_MTD_CMD_LOG_HOST_ID:
3223                 skdev->connect_time_stamp = get_seconds();
3224                 data = skdev->connect_time_stamp & 0xFFFF;
3225                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3226                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3227                 skdev->last_mtd = mtd;
3228                 break;
3229
3230         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3231                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3232                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3233                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3234                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3235                 skdev->last_mtd = mtd;
3236                 break;
3237
3238         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3239                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3240                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3241                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3242                 skdev->last_mtd = mtd;
3243
3244                 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
3245                         skdev->connect_time_stamp, skdev->drive_jiffies);
3246                 break;
3247
3248         case FIT_MTD_ARM_QUEUE:
3249                 skdev->last_mtd = 0;
3250                 /*
3251                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3252                  */
3253                 break;
3254
3255         default:
3256                 break;
3257         }
3258 }
3259
3260 static void skd_disable_interrupts(struct skd_device *skdev)
3261 {
3262         u32 sense;
3263
3264         sense = SKD_READL(skdev, FIT_CONTROL);
3265         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3266         SKD_WRITEL(skdev, sense, FIT_CONTROL);
3267         dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
3268
3269         /* Note that the 1s is written. A 1-bit means
3270          * disable, a 0 means enable.
3271          */
3272         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3273 }
3274
3275 static void skd_enable_interrupts(struct skd_device *skdev)
3276 {
3277         u32 val;
3278
3279         /* unmask interrupts first */
3280         val = FIT_ISH_FW_STATE_CHANGE +
3281               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3282
3283         /* Note that the compliment of mask is written. A 1-bit means
3284          * disable, a 0 means enable. */
3285         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3286         dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
3287
3288         val = SKD_READL(skdev, FIT_CONTROL);
3289         val |= FIT_CR_ENABLE_INTERRUPTS;
3290         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
3291         SKD_WRITEL(skdev, val, FIT_CONTROL);
3292 }
3293
3294 /*
3295  *****************************************************************************
3296  * START, STOP, RESTART, QUIESCE, UNQUIESCE
3297  *****************************************************************************
3298  */
3299
3300 static void skd_soft_reset(struct skd_device *skdev)
3301 {
3302         u32 val;
3303
3304         val = SKD_READL(skdev, FIT_CONTROL);
3305         val |= (FIT_CR_SOFT_RESET);
3306         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
3307         SKD_WRITEL(skdev, val, FIT_CONTROL);
3308 }
3309
3310 static void skd_start_device(struct skd_device *skdev)
3311 {
3312         unsigned long flags;
3313         u32 sense;
3314         u32 state;
3315
3316         spin_lock_irqsave(&skdev->lock, flags);
3317
3318         /* ack all ghost interrupts */
3319         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3320
3321         sense = SKD_READL(skdev, FIT_STATUS);
3322
3323         dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
3324
3325         state = sense & FIT_SR_DRIVE_STATE_MASK;
3326         skdev->drive_state = state;
3327         skdev->last_mtd = 0;
3328
3329         skdev->state = SKD_DRVR_STATE_STARTING;
3330         skdev->timer_countdown = SKD_STARTING_TIMO;
3331
3332         skd_enable_interrupts(skdev);
3333
3334         switch (skdev->drive_state) {
3335         case FIT_SR_DRIVE_OFFLINE:
3336                 dev_err(&skdev->pdev->dev, "Drive offline...\n");
3337                 break;
3338
3339         case FIT_SR_DRIVE_FW_BOOTING:
3340                 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
3341                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3342                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3343                 break;
3344
3345         case FIT_SR_DRIVE_BUSY_SANITIZE:
3346                 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
3347                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3348                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3349                 break;
3350
3351         case FIT_SR_DRIVE_BUSY_ERASE:
3352                 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
3353                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3354                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3355                 break;
3356
3357         case FIT_SR_DRIVE_INIT:
3358         case FIT_SR_DRIVE_ONLINE:
3359                 skd_soft_reset(skdev);
3360                 break;
3361
3362         case FIT_SR_DRIVE_BUSY:
3363                 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
3364                 skdev->state = SKD_DRVR_STATE_BUSY;
3365                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3366                 break;
3367
3368         case FIT_SR_DRIVE_SOFT_RESET:
3369                 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
3370                 break;
3371
3372         case FIT_SR_DRIVE_FAULT:
3373                 /* Fault state is bad...soft reset won't do it...
3374                  * Hard reset, maybe, but does it work on device?
3375                  * For now, just fault so the system doesn't hang.
3376                  */
3377                 skd_drive_fault(skdev);
3378                 /*start the queue so we can respond with error to requests */
3379                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3380                 blk_start_queue(skdev->queue);
3381                 skdev->gendisk_on = -1;
3382                 wake_up_interruptible(&skdev->waitq);
3383                 break;
3384
3385         case 0xFF:
3386                 /* Most likely the device isn't there or isn't responding
3387                  * to the BAR1 addresses. */
3388                 skd_drive_disappeared(skdev);
3389                 /*start the queue so we can respond with error to requests */
3390                 dev_dbg(&skdev->pdev->dev,
3391                         "starting queue to error-out reqs\n");
3392                 blk_start_queue(skdev->queue);
3393                 skdev->gendisk_on = -1;
3394                 wake_up_interruptible(&skdev->waitq);
3395                 break;
3396
3397         default:
3398                 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
3399                         skdev->drive_state);
3400                 break;
3401         }
3402
3403         state = SKD_READL(skdev, FIT_CONTROL);
3404         dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
3405
3406         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3407         dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
3408
3409         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3410         dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
3411
3412         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3413         dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
3414
3415         state = SKD_READL(skdev, FIT_HW_VERSION);
3416         dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
3417
3418         spin_unlock_irqrestore(&skdev->lock, flags);
3419 }
3420
3421 static void skd_stop_device(struct skd_device *skdev)
3422 {
3423         unsigned long flags;
3424         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3425         u32 dev_state;
3426         int i;
3427
3428         spin_lock_irqsave(&skdev->lock, flags);
3429
3430         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3431                 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
3432                 goto stop_out;
3433         }
3434
3435         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3436                 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
3437                 goto stop_out;
3438         }
3439
3440         skdev->state = SKD_DRVR_STATE_SYNCING;
3441         skdev->sync_done = 0;
3442
3443         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3444
3445         spin_unlock_irqrestore(&skdev->lock, flags);
3446
3447         wait_event_interruptible_timeout(skdev->waitq,
3448                                          (skdev->sync_done), (10 * HZ));
3449
3450         spin_lock_irqsave(&skdev->lock, flags);
3451
3452         switch (skdev->sync_done) {
3453         case 0:
3454                 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
3455                 break;
3456         case 1:
3457                 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
3458                 break;
3459         default:
3460                 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
3461         }
3462
3463 stop_out:
3464         skdev->state = SKD_DRVR_STATE_STOPPING;
3465         spin_unlock_irqrestore(&skdev->lock, flags);
3466
3467         skd_kill_timer(skdev);
3468
3469         spin_lock_irqsave(&skdev->lock, flags);
3470         skd_disable_interrupts(skdev);
3471
3472         /* ensure all ints on device are cleared */
3473         /* soft reset the device to unload with a clean slate */
3474         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3475         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3476
3477         spin_unlock_irqrestore(&skdev->lock, flags);
3478
3479         /* poll every 100ms, 1 second timeout */
3480         for (i = 0; i < 10; i++) {
3481                 dev_state =
3482                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3483                 if (dev_state == FIT_SR_DRIVE_INIT)
3484                         break;
3485                 set_current_state(TASK_INTERRUPTIBLE);
3486                 schedule_timeout(msecs_to_jiffies(100));
3487         }
3488
3489         if (dev_state != FIT_SR_DRIVE_INIT)
3490                 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
3491                         dev_state);
3492 }
3493
3494 /* assume spinlock is held */
3495 static void skd_restart_device(struct skd_device *skdev)
3496 {
3497         u32 state;
3498
3499         /* ack all ghost interrupts */
3500         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3501
3502         state = SKD_READL(skdev, FIT_STATUS);
3503
3504         dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
3505
3506         state &= FIT_SR_DRIVE_STATE_MASK;
3507         skdev->drive_state = state;
3508         skdev->last_mtd = 0;
3509
3510         skdev->state = SKD_DRVR_STATE_RESTARTING;
3511         skdev->timer_countdown = SKD_RESTARTING_TIMO;
3512
3513         skd_soft_reset(skdev);
3514 }
3515
3516 /* assume spinlock is held */
3517 static int skd_quiesce_dev(struct skd_device *skdev)
3518 {
3519         int rc = 0;
3520
3521         switch (skdev->state) {
3522         case SKD_DRVR_STATE_BUSY:
3523         case SKD_DRVR_STATE_BUSY_IMMINENT:
3524                 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
3525                 blk_stop_queue(skdev->queue);
3526                 break;
3527         case SKD_DRVR_STATE_ONLINE:
3528         case SKD_DRVR_STATE_STOPPING:
3529         case SKD_DRVR_STATE_SYNCING:
3530         case SKD_DRVR_STATE_PAUSING:
3531         case SKD_DRVR_STATE_PAUSED:
3532         case SKD_DRVR_STATE_STARTING:
3533         case SKD_DRVR_STATE_RESTARTING:
3534         case SKD_DRVR_STATE_RESUMING:
3535         default:
3536                 rc = -EINVAL;
3537                 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
3538                         skdev->state);
3539         }
3540         return rc;
3541 }
3542
3543 /* assume spinlock is held */
3544 static int skd_unquiesce_dev(struct skd_device *skdev)
3545 {
3546         int prev_driver_state = skdev->state;
3547
3548         skd_log_skdev(skdev, "unquiesce");
3549         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3550                 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
3551                 return 0;
3552         }
3553         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3554                 /*
3555                  * If there has been an state change to other than
3556                  * ONLINE, we will rely on controller state change
3557                  * to come back online and restart the queue.
3558                  * The BUSY state means that driver is ready to
3559                  * continue normal processing but waiting for controller
3560                  * to become available.
3561                  */
3562                 skdev->state = SKD_DRVR_STATE_BUSY;
3563                 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
3564                 return 0;
3565         }
3566
3567         /*
3568          * Drive has just come online, driver is either in startup,
3569          * paused performing a task, or bust waiting for hardware.
3570          */
3571         switch (skdev->state) {
3572         case SKD_DRVR_STATE_PAUSED:
3573         case SKD_DRVR_STATE_BUSY:
3574         case SKD_DRVR_STATE_BUSY_IMMINENT:
3575         case SKD_DRVR_STATE_BUSY_ERASE:
3576         case SKD_DRVR_STATE_STARTING:
3577         case SKD_DRVR_STATE_RESTARTING:
3578         case SKD_DRVR_STATE_FAULT:
3579         case SKD_DRVR_STATE_IDLE:
3580         case SKD_DRVR_STATE_LOAD:
3581                 skdev->state = SKD_DRVR_STATE_ONLINE;
3582                 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3583                         skd_skdev_state_to_str(prev_driver_state),
3584                         prev_driver_state, skd_skdev_state_to_str(skdev->state),
3585                         skdev->state);
3586                 dev_dbg(&skdev->pdev->dev,
3587                         "**** device ONLINE...starting block queue\n");
3588                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3589                 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
3590                 blk_start_queue(skdev->queue);
3591                 skdev->gendisk_on = 1;
3592                 wake_up_interruptible(&skdev->waitq);
3593                 break;
3594
3595         case SKD_DRVR_STATE_DISAPPEARED:
3596         default:
3597                 dev_dbg(&skdev->pdev->dev,
3598                         "**** driver state %d, not implemented\n",
3599                         skdev->state);
3600                 return -EBUSY;
3601         }
3602         return 0;
3603 }
3604
3605 /*
3606  *****************************************************************************
3607  * PCIe MSI/MSI-X INTERRUPT HANDLERS
3608  *****************************************************************************
3609  */
3610
3611 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3612 {
3613         struct skd_device *skdev = skd_host_data;
3614         unsigned long flags;
3615
3616         spin_lock_irqsave(&skdev->lock, flags);
3617         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3618                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3619         dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
3620                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3621         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3622         spin_unlock_irqrestore(&skdev->lock, flags);
3623         return IRQ_HANDLED;
3624 }
3625
3626 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3627 {
3628         struct skd_device *skdev = skd_host_data;
3629         unsigned long flags;
3630
3631         spin_lock_irqsave(&skdev->lock, flags);
3632         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3633                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3634         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3635         skd_isr_fwstate(skdev);
3636         spin_unlock_irqrestore(&skdev->lock, flags);
3637         return IRQ_HANDLED;
3638 }
3639
3640 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3641 {
3642         struct skd_device *skdev = skd_host_data;
3643         unsigned long flags;
3644         int flush_enqueued = 0;
3645         int deferred;
3646
3647         spin_lock_irqsave(&skdev->lock, flags);
3648         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3649                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3650         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3651         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3652                                                 &flush_enqueued);
3653         if (flush_enqueued)
3654                 skd_request_fn(skdev->queue);
3655
3656         if (deferred)
3657                 schedule_work(&skdev->completion_worker);
3658         else if (!flush_enqueued)
3659                 skd_request_fn(skdev->queue);
3660
3661         spin_unlock_irqrestore(&skdev->lock, flags);
3662
3663         return IRQ_HANDLED;
3664 }
3665
3666 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3667 {
3668         struct skd_device *skdev = skd_host_data;
3669         unsigned long flags;
3670
3671         spin_lock_irqsave(&skdev->lock, flags);
3672         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3673                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3674         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3675         skd_isr_msg_from_dev(skdev);
3676         spin_unlock_irqrestore(&skdev->lock, flags);
3677         return IRQ_HANDLED;
3678 }
3679
3680 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3681 {
3682         struct skd_device *skdev = skd_host_data;
3683         unsigned long flags;
3684
3685         spin_lock_irqsave(&skdev->lock, flags);
3686         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3687                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3688         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3689         spin_unlock_irqrestore(&skdev->lock, flags);
3690         return IRQ_HANDLED;
3691 }
3692
3693 /*
3694  *****************************************************************************
3695  * PCIe MSI/MSI-X SETUP
3696  *****************************************************************************
3697  */
3698
3699 struct skd_msix_entry {
3700         char isr_name[30];
3701 };
3702
3703 struct skd_init_msix_entry {
3704         const char *name;
3705         irq_handler_t handler;
3706 };
3707
3708 #define SKD_MAX_MSIX_COUNT              13
3709 #define SKD_MIN_MSIX_COUNT              7
3710 #define SKD_BASE_MSIX_IRQ               4
3711
3712 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3713         { "(DMA 0)",        skd_reserved_isr },
3714         { "(DMA 1)",        skd_reserved_isr },
3715         { "(DMA 2)",        skd_reserved_isr },
3716         { "(DMA 3)",        skd_reserved_isr },
3717         { "(State Change)", skd_statec_isr   },
3718         { "(COMPL_Q)",      skd_comp_q       },
3719         { "(MSG)",          skd_msg_isr      },
3720         { "(Reserved)",     skd_reserved_isr },
3721         { "(Reserved)",     skd_reserved_isr },
3722         { "(Queue Full 0)", skd_qfull_isr    },
3723         { "(Queue Full 1)", skd_qfull_isr    },
3724         { "(Queue Full 2)", skd_qfull_isr    },
3725         { "(Queue Full 3)", skd_qfull_isr    },
3726 };
3727
3728 static int skd_acquire_msix(struct skd_device *skdev)
3729 {
3730         int i, rc;
3731         struct pci_dev *pdev = skdev->pdev;
3732
3733         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3734                         PCI_IRQ_MSIX);
3735         if (rc < 0) {
3736                 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
3737                 goto out;
3738         }
3739
3740         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3741                         sizeof(struct skd_msix_entry), GFP_KERNEL);
3742         if (!skdev->msix_entries) {
3743                 rc = -ENOMEM;
3744                 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
3745                 goto out;
3746         }
3747
3748         /* Enable MSI-X vectors for the base queue */
3749         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3750                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3751
3752                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3753                          "%s%d-msix %s", DRV_NAME, skdev->devno,
3754                          msix_entries[i].name);
3755
3756                 rc = devm_request_irq(&skdev->pdev->dev,
3757                                 pci_irq_vector(skdev->pdev, i),
3758                                 msix_entries[i].handler, 0,
3759                                 qentry->isr_name, skdev);
3760                 if (rc) {
3761                         dev_err(&skdev->pdev->dev,
3762                                 "Unable to register(%d) MSI-X handler %d: %s\n",
3763                                 rc, i, qentry->isr_name);
3764                         goto msix_out;
3765                 }
3766         }
3767
3768         dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
3769                 SKD_MAX_MSIX_COUNT);
3770         return 0;
3771
3772 msix_out:
3773         while (--i >= 0)
3774                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3775 out:
3776         kfree(skdev->msix_entries);
3777         skdev->msix_entries = NULL;
3778         return rc;
3779 }
3780
3781 static int skd_acquire_irq(struct skd_device *skdev)
3782 {
3783         struct pci_dev *pdev = skdev->pdev;
3784         unsigned int irq_flag = PCI_IRQ_LEGACY;
3785         int rc;
3786
3787         if (skd_isr_type == SKD_IRQ_MSIX) {
3788                 rc = skd_acquire_msix(skdev);
3789                 if (!rc)
3790                         return 0;
3791
3792                 dev_err(&skdev->pdev->dev,
3793                         "failed to enable MSI-X, re-trying with MSI %d\n", rc);
3794         }
3795
3796         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3797                         skdev->devno);
3798
3799         if (skd_isr_type != SKD_IRQ_LEGACY)
3800                 irq_flag |= PCI_IRQ_MSI;
3801         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3802         if (rc < 0) {
3803                 dev_err(&skdev->pdev->dev,
3804                         "failed to allocate the MSI interrupt %d\n", rc);
3805                 return rc;
3806         }
3807
3808         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3809                         pdev->msi_enabled ? 0 : IRQF_SHARED,
3810                         skdev->isr_name, skdev);
3811         if (rc) {
3812                 pci_free_irq_vectors(pdev);
3813                 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
3814                         rc);
3815                 return rc;
3816         }
3817
3818         return 0;
3819 }
3820
3821 static void skd_release_irq(struct skd_device *skdev)
3822 {
3823         struct pci_dev *pdev = skdev->pdev;
3824
3825         if (skdev->msix_entries) {
3826                 int i;
3827
3828                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3829                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3830                                         skdev);
3831                 }
3832
3833                 kfree(skdev->msix_entries);
3834                 skdev->msix_entries = NULL;
3835         } else {
3836                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3837         }
3838
3839         pci_free_irq_vectors(pdev);
3840 }
3841
3842 /*
3843  *****************************************************************************
3844  * CONSTRUCT
3845  *****************************************************************************
3846  */
3847
3848 static int skd_cons_skcomp(struct skd_device *skdev)
3849 {
3850         int rc = 0;
3851         struct fit_completion_entry_v1 *skcomp;
3852
3853         dev_dbg(&skdev->pdev->dev,
3854                 "comp pci_alloc, total bytes %zd entries %d\n",
3855                 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
3856
3857         skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
3858                                        &skdev->cq_dma_address);
3859
3860         if (skcomp == NULL) {
3861                 rc = -ENOMEM;
3862                 goto err_out;
3863         }
3864
3865         skdev->skcomp_table = skcomp;
3866         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3867                                                            sizeof(*skcomp) *
3868                                                            SKD_N_COMPLETION_ENTRY);
3869
3870 err_out:
3871         return rc;
3872 }
3873
3874 static int skd_cons_skmsg(struct skd_device *skdev)
3875 {
3876         int rc = 0;
3877         u32 i;
3878
3879         dev_dbg(&skdev->pdev->dev,
3880                 "skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3881                 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
3882                 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
3883
3884         skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
3885                                      *skdev->num_fitmsg_context, GFP_KERNEL);
3886         if (skdev->skmsg_table == NULL) {
3887                 rc = -ENOMEM;
3888                 goto err_out;
3889         }
3890
3891         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3892                 struct skd_fitmsg_context *skmsg;
3893
3894                 skmsg = &skdev->skmsg_table[i];
3895
3896                 skmsg->id = i + SKD_ID_FIT_MSG;
3897
3898                 skmsg->state = SKD_MSG_STATE_IDLE;
3899                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
3900                                                       SKD_N_FITMSG_BYTES + 64,
3901                                                       &skmsg->mb_dma_address);
3902
3903                 if (skmsg->msg_buf == NULL) {
3904                         rc = -ENOMEM;
3905                         goto err_out;
3906                 }
3907
3908                 skmsg->offset = (u32)((u64)skmsg->msg_buf &
3909                                       (~FIT_QCMD_BASE_ADDRESS_MASK));
3910                 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
3911                 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
3912                                        FIT_QCMD_BASE_ADDRESS_MASK);
3913                 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
3914                 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
3915                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
3916
3917                 skmsg->next = &skmsg[1];
3918         }
3919
3920         /* Free list is in order starting with the 0th entry. */
3921         skdev->skmsg_table[i - 1].next = NULL;
3922         skdev->skmsg_free_list = skdev->skmsg_table;
3923
3924 err_out:
3925         return rc;
3926 }
3927
3928 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
3929                                                   u32 n_sg,
3930                                                   dma_addr_t *ret_dma_addr)
3931 {
3932         struct fit_sg_descriptor *sg_list;
3933         u32 nbytes;
3934
3935         nbytes = sizeof(*sg_list) * n_sg;
3936
3937         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
3938
3939         if (sg_list != NULL) {
3940                 uint64_t dma_address = *ret_dma_addr;
3941                 u32 i;
3942
3943                 memset(sg_list, 0, nbytes);
3944
3945                 for (i = 0; i < n_sg - 1; i++) {
3946                         uint64_t ndp_off;
3947                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
3948
3949                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
3950                 }
3951                 sg_list[i].next_desc_ptr = 0LL;
3952         }
3953
3954         return sg_list;
3955 }
3956
3957 static int skd_cons_skreq(struct skd_device *skdev)
3958 {
3959         int rc = 0;
3960         u32 i;
3961
3962         dev_dbg(&skdev->pdev->dev,
3963                 "skreq_table kzalloc, struct %lu, count %u total %lu\n",
3964                 sizeof(struct skd_request_context), skdev->num_req_context,
3965                 sizeof(struct skd_request_context) * skdev->num_req_context);
3966
3967         skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
3968                                      * skdev->num_req_context, GFP_KERNEL);
3969         if (skdev->skreq_table == NULL) {
3970                 rc = -ENOMEM;
3971                 goto err_out;
3972         }
3973
3974         dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
3975                 skdev->sgs_per_request, sizeof(struct scatterlist),
3976                 skdev->sgs_per_request * sizeof(struct scatterlist));
3977
3978         for (i = 0; i < skdev->num_req_context; i++) {
3979                 struct skd_request_context *skreq;
3980
3981                 skreq = &skdev->skreq_table[i];
3982
3983                 skreq->id = i + SKD_ID_RW_REQUEST;
3984                 skreq->state = SKD_REQ_STATE_IDLE;
3985
3986                 skreq->sg = kzalloc(sizeof(struct scatterlist) *
3987                                     skdev->sgs_per_request, GFP_KERNEL);
3988                 if (skreq->sg == NULL) {
3989                         rc = -ENOMEM;
3990                         goto err_out;
3991                 }
3992                 sg_init_table(skreq->sg, skdev->sgs_per_request);
3993
3994                 skreq->sksg_list = skd_cons_sg_list(skdev,
3995                                                     skdev->sgs_per_request,
3996                                                     &skreq->sksg_dma_address);
3997
3998                 if (skreq->sksg_list == NULL) {
3999                         rc = -ENOMEM;
4000                         goto err_out;
4001                 }
4002
4003                 skreq->next = &skreq[1];
4004         }
4005
4006         /* Free list is in order starting with the 0th entry. */
4007         skdev->skreq_table[i - 1].next = NULL;
4008         skdev->skreq_free_list = skdev->skreq_table;
4009
4010 err_out:
4011         return rc;
4012 }
4013
4014 static int skd_cons_skspcl(struct skd_device *skdev)
4015 {
4016         int rc = 0;
4017         u32 i, nbytes;
4018
4019         dev_dbg(&skdev->pdev->dev,
4020                 "skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4021                 sizeof(struct skd_special_context), skdev->n_special,
4022                 sizeof(struct skd_special_context) * skdev->n_special);
4023
4024         skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4025                                       * skdev->n_special, GFP_KERNEL);
4026         if (skdev->skspcl_table == NULL) {
4027                 rc = -ENOMEM;
4028                 goto err_out;
4029         }
4030
4031         for (i = 0; i < skdev->n_special; i++) {
4032                 struct skd_special_context *skspcl;
4033
4034                 skspcl = &skdev->skspcl_table[i];
4035
4036                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4037                 skspcl->req.state = SKD_REQ_STATE_IDLE;
4038
4039                 skspcl->req.next = &skspcl[1].req;
4040
4041                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4042
4043                 skspcl->msg_buf =
4044                         pci_zalloc_consistent(skdev->pdev, nbytes,
4045                                               &skspcl->mb_dma_address);
4046                 if (skspcl->msg_buf == NULL) {
4047                         rc = -ENOMEM;
4048                         goto err_out;
4049                 }
4050
4051                 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4052                                          SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4053                 if (skspcl->req.sg == NULL) {
4054                         rc = -ENOMEM;
4055                         goto err_out;
4056                 }
4057
4058                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4059                                                          SKD_N_SG_PER_SPECIAL,
4060                                                          &skspcl->req.
4061                                                          sksg_dma_address);
4062                 if (skspcl->req.sksg_list == NULL) {
4063                         rc = -ENOMEM;
4064                         goto err_out;
4065                 }
4066         }
4067
4068         /* Free list is in order starting with the 0th entry. */
4069         skdev->skspcl_table[i - 1].req.next = NULL;
4070         skdev->skspcl_free_list = skdev->skspcl_table;
4071
4072         return rc;
4073
4074 err_out:
4075         return rc;
4076 }
4077
4078 static int skd_cons_sksb(struct skd_device *skdev)
4079 {
4080         int rc = 0;
4081         struct skd_special_context *skspcl;
4082         u32 nbytes;
4083
4084         skspcl = &skdev->internal_skspcl;
4085
4086         skspcl->req.id = 0 + SKD_ID_INTERNAL;
4087         skspcl->req.state = SKD_REQ_STATE_IDLE;
4088
4089         nbytes = SKD_N_INTERNAL_BYTES;
4090
4091         skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4092                                                  &skspcl->db_dma_address);
4093         if (skspcl->data_buf == NULL) {
4094                 rc = -ENOMEM;
4095                 goto err_out;
4096         }
4097
4098         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4099         skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4100                                                 &skspcl->mb_dma_address);
4101         if (skspcl->msg_buf == NULL) {
4102                 rc = -ENOMEM;
4103                 goto err_out;
4104         }
4105
4106         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4107                                                  &skspcl->req.sksg_dma_address);
4108         if (skspcl->req.sksg_list == NULL) {
4109                 rc = -ENOMEM;
4110                 goto err_out;
4111         }
4112
4113         if (!skd_format_internal_skspcl(skdev)) {
4114                 rc = -EINVAL;
4115                 goto err_out;
4116         }
4117
4118 err_out:
4119         return rc;
4120 }
4121
4122 static int skd_cons_disk(struct skd_device *skdev)
4123 {
4124         int rc = 0;
4125         struct gendisk *disk;
4126         struct request_queue *q;
4127         unsigned long flags;
4128
4129         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4130         if (!disk) {
4131                 rc = -ENOMEM;
4132                 goto err_out;
4133         }
4134
4135         skdev->disk = disk;
4136         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4137
4138         disk->major = skdev->major;
4139         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4140         disk->fops = &skd_blockdev_ops;
4141         disk->private_data = skdev;
4142
4143         q = blk_init_queue(skd_request_fn, &skdev->lock);
4144         if (!q) {
4145                 rc = -ENOMEM;
4146                 goto err_out;
4147         }
4148         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
4149
4150         skdev->queue = q;
4151         disk->queue = q;
4152         q->queuedata = skdev;
4153
4154         blk_queue_write_cache(q, true, true);
4155         blk_queue_max_segments(q, skdev->sgs_per_request);
4156         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4157
4158         /* set optimal I/O size to 8KB */
4159         blk_queue_io_opt(q, 8192);
4160
4161         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4162         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4163
4164         spin_lock_irqsave(&skdev->lock, flags);
4165         dev_dbg(&skdev->pdev->dev, "stopping queue\n");
4166         blk_stop_queue(skdev->queue);
4167         spin_unlock_irqrestore(&skdev->lock, flags);
4168
4169 err_out:
4170         return rc;
4171 }
4172
4173 #define SKD_N_DEV_TABLE         16u
4174 static u32 skd_next_devno;
4175
4176 static struct skd_device *skd_construct(struct pci_dev *pdev)
4177 {
4178         struct skd_device *skdev;
4179         int blk_major = skd_major;
4180         int rc;
4181
4182         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4183
4184         if (!skdev) {
4185                 dev_err(&pdev->dev, "memory alloc failure\n");
4186                 return NULL;
4187         }
4188
4189         skdev->state = SKD_DRVR_STATE_LOAD;
4190         skdev->pdev = pdev;
4191         skdev->devno = skd_next_devno++;
4192         skdev->major = blk_major;
4193         skdev->dev_max_queue_depth = 0;
4194
4195         skdev->num_req_context = skd_max_queue_depth;
4196         skdev->num_fitmsg_context = skd_max_queue_depth;
4197         skdev->n_special = skd_max_pass_thru;
4198         skdev->cur_max_queue_depth = 1;
4199         skdev->queue_low_water_mark = 1;
4200         skdev->proto_ver = 99;
4201         skdev->sgs_per_request = skd_sgs_per_request;
4202         skdev->dbg_level = skd_dbg_level;
4203
4204         spin_lock_init(&skdev->lock);
4205
4206         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4207
4208         dev_dbg(&skdev->pdev->dev, "skcomp\n");
4209         rc = skd_cons_skcomp(skdev);
4210         if (rc < 0)
4211                 goto err_out;
4212
4213         dev_dbg(&skdev->pdev->dev, "skmsg\n");
4214         rc = skd_cons_skmsg(skdev);
4215         if (rc < 0)
4216                 goto err_out;
4217
4218         dev_dbg(&skdev->pdev->dev, "skreq\n");
4219         rc = skd_cons_skreq(skdev);
4220         if (rc < 0)
4221                 goto err_out;
4222
4223         dev_dbg(&skdev->pdev->dev, "skspcl\n");
4224         rc = skd_cons_skspcl(skdev);
4225         if (rc < 0)
4226                 goto err_out;
4227
4228         dev_dbg(&skdev->pdev->dev, "sksb\n");
4229         rc = skd_cons_sksb(skdev);
4230         if (rc < 0)
4231                 goto err_out;
4232
4233         dev_dbg(&skdev->pdev->dev, "disk\n");
4234         rc = skd_cons_disk(skdev);
4235         if (rc < 0)
4236                 goto err_out;
4237
4238         dev_dbg(&skdev->pdev->dev, "VICTORY\n");
4239         return skdev;
4240
4241 err_out:
4242         dev_dbg(&skdev->pdev->dev, "construct failed\n");
4243         skd_destruct(skdev);
4244         return NULL;
4245 }
4246
4247 /*
4248  *****************************************************************************
4249  * DESTRUCT (FREE)
4250  *****************************************************************************
4251  */
4252
4253 static void skd_free_skcomp(struct skd_device *skdev)
4254 {
4255         if (skdev->skcomp_table)
4256                 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
4257                                     skdev->skcomp_table, skdev->cq_dma_address);
4258
4259         skdev->skcomp_table = NULL;
4260         skdev->cq_dma_address = 0;
4261 }
4262
4263 static void skd_free_skmsg(struct skd_device *skdev)
4264 {
4265         u32 i;
4266
4267         if (skdev->skmsg_table == NULL)
4268                 return;
4269
4270         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4271                 struct skd_fitmsg_context *skmsg;
4272
4273                 skmsg = &skdev->skmsg_table[i];
4274
4275                 if (skmsg->msg_buf != NULL) {
4276                         skmsg->msg_buf += skmsg->offset;
4277                         skmsg->mb_dma_address += skmsg->offset;
4278                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4279                                             skmsg->msg_buf,
4280                                             skmsg->mb_dma_address);
4281                 }
4282                 skmsg->msg_buf = NULL;
4283                 skmsg->mb_dma_address = 0;
4284         }
4285
4286         kfree(skdev->skmsg_table);
4287         skdev->skmsg_table = NULL;
4288 }
4289
4290 static void skd_free_sg_list(struct skd_device *skdev,
4291                              struct fit_sg_descriptor *sg_list,
4292                              u32 n_sg, dma_addr_t dma_addr)
4293 {
4294         if (sg_list != NULL) {
4295                 u32 nbytes;
4296
4297                 nbytes = sizeof(*sg_list) * n_sg;
4298
4299                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4300         }
4301 }
4302
4303 static void skd_free_skreq(struct skd_device *skdev)
4304 {
4305         u32 i;
4306
4307         if (skdev->skreq_table == NULL)
4308                 return;
4309
4310         for (i = 0; i < skdev->num_req_context; i++) {
4311                 struct skd_request_context *skreq;
4312
4313                 skreq = &skdev->skreq_table[i];
4314
4315                 skd_free_sg_list(skdev, skreq->sksg_list,
4316                                  skdev->sgs_per_request,
4317                                  skreq->sksg_dma_address);
4318
4319                 skreq->sksg_list = NULL;
4320                 skreq->sksg_dma_address = 0;
4321
4322                 kfree(skreq->sg);
4323         }
4324
4325         kfree(skdev->skreq_table);
4326         skdev->skreq_table = NULL;
4327 }
4328
4329 static void skd_free_skspcl(struct skd_device *skdev)
4330 {
4331         u32 i;
4332         u32 nbytes;
4333
4334         if (skdev->skspcl_table == NULL)
4335                 return;
4336
4337         for (i = 0; i < skdev->n_special; i++) {
4338                 struct skd_special_context *skspcl;
4339
4340                 skspcl = &skdev->skspcl_table[i];
4341
4342                 if (skspcl->msg_buf != NULL) {
4343                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4344                         pci_free_consistent(skdev->pdev, nbytes,
4345                                             skspcl->msg_buf,
4346                                             skspcl->mb_dma_address);
4347                 }
4348
4349                 skspcl->msg_buf = NULL;
4350                 skspcl->mb_dma_address = 0;
4351
4352                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4353                                  SKD_N_SG_PER_SPECIAL,
4354                                  skspcl->req.sksg_dma_address);
4355
4356                 skspcl->req.sksg_list = NULL;
4357                 skspcl->req.sksg_dma_address = 0;
4358
4359                 kfree(skspcl->req.sg);
4360         }
4361
4362         kfree(skdev->skspcl_table);
4363         skdev->skspcl_table = NULL;
4364 }
4365
4366 static void skd_free_sksb(struct skd_device *skdev)
4367 {
4368         struct skd_special_context *skspcl;
4369         u32 nbytes;
4370
4371         skspcl = &skdev->internal_skspcl;
4372
4373         if (skspcl->data_buf != NULL) {
4374                 nbytes = SKD_N_INTERNAL_BYTES;
4375
4376                 pci_free_consistent(skdev->pdev, nbytes,
4377                                     skspcl->data_buf, skspcl->db_dma_address);
4378         }
4379
4380         skspcl->data_buf = NULL;
4381         skspcl->db_dma_address = 0;
4382
4383         if (skspcl->msg_buf != NULL) {
4384                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4385                 pci_free_consistent(skdev->pdev, nbytes,
4386                                     skspcl->msg_buf, skspcl->mb_dma_address);
4387         }
4388
4389         skspcl->msg_buf = NULL;
4390         skspcl->mb_dma_address = 0;
4391
4392         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4393                          skspcl->req.sksg_dma_address);
4394
4395         skspcl->req.sksg_list = NULL;
4396         skspcl->req.sksg_dma_address = 0;
4397 }
4398
4399 static void skd_free_disk(struct skd_device *skdev)
4400 {
4401         struct gendisk *disk = skdev->disk;
4402
4403         if (disk && (disk->flags & GENHD_FL_UP))
4404                 del_gendisk(disk);
4405
4406         if (skdev->queue) {
4407                 blk_cleanup_queue(skdev->queue);
4408                 skdev->queue = NULL;
4409                 disk->queue = NULL;
4410         }
4411
4412         put_disk(disk);
4413         skdev->disk = NULL;
4414 }
4415
4416 static void skd_destruct(struct skd_device *skdev)
4417 {
4418         if (skdev == NULL)
4419                 return;
4420
4421         dev_dbg(&skdev->pdev->dev, "disk\n");
4422         skd_free_disk(skdev);
4423
4424         dev_dbg(&skdev->pdev->dev, "sksb\n");
4425         skd_free_sksb(skdev);
4426
4427         dev_dbg(&skdev->pdev->dev, "skspcl\n");
4428         skd_free_skspcl(skdev);
4429
4430         dev_dbg(&skdev->pdev->dev, "skreq\n");
4431         skd_free_skreq(skdev);
4432
4433         dev_dbg(&skdev->pdev->dev, "skmsg\n");
4434         skd_free_skmsg(skdev);
4435
4436         dev_dbg(&skdev->pdev->dev, "skcomp\n");
4437         skd_free_skcomp(skdev);
4438
4439         dev_dbg(&skdev->pdev->dev, "skdev\n");
4440         kfree(skdev);
4441 }
4442
4443 /*
4444  *****************************************************************************
4445  * BLOCK DEVICE (BDEV) GLUE
4446  *****************************************************************************
4447  */
4448
4449 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4450 {
4451         struct skd_device *skdev;
4452         u64 capacity;
4453
4454         skdev = bdev->bd_disk->private_data;
4455
4456         dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
4457                 bdev->bd_disk->disk_name, current->comm);
4458
4459         if (skdev->read_cap_is_valid) {
4460                 capacity = get_capacity(skdev->disk);
4461                 geo->heads = 64;
4462                 geo->sectors = 255;
4463                 geo->cylinders = (capacity) / (255 * 64);
4464
4465                 return 0;
4466         }
4467         return -EIO;
4468 }
4469
4470 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4471 {
4472         dev_dbg(&skdev->pdev->dev, "add_disk\n");
4473         device_add_disk(parent, skdev->disk);
4474         return 0;
4475 }
4476
4477 static const struct block_device_operations skd_blockdev_ops = {
4478         .owner          = THIS_MODULE,
4479         .ioctl          = skd_bdev_ioctl,
4480         .getgeo         = skd_bdev_getgeo,
4481 };
4482
4483 /*
4484  *****************************************************************************
4485  * PCIe DRIVER GLUE
4486  *****************************************************************************
4487  */
4488
4489 static const struct pci_device_id skd_pci_tbl[] = {
4490         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4491           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4492         { 0 }                     /* terminate list */
4493 };
4494
4495 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4496
4497 static char *skd_pci_info(struct skd_device *skdev, char *str)
4498 {
4499         int pcie_reg;
4500
4501         strcpy(str, "PCIe (");
4502         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4503
4504         if (pcie_reg) {
4505
4506                 char lwstr[6];
4507                 uint16_t pcie_lstat, lspeed, lwidth;
4508
4509                 pcie_reg += 0x12;
4510                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4511                 lspeed = pcie_lstat & (0xF);
4512                 lwidth = (pcie_lstat & 0x3F0) >> 4;
4513
4514                 if (lspeed == 1)
4515                         strcat(str, "2.5GT/s ");
4516                 else if (lspeed == 2)
4517                         strcat(str, "5.0GT/s ");
4518                 else
4519                         strcat(str, "<unknown> ");
4520                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4521                 strcat(str, lwstr);
4522         }
4523         return str;
4524 }
4525
4526 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4527 {
4528         int i;
4529         int rc = 0;
4530         char pci_str[32];
4531         struct skd_device *skdev;
4532
4533         dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
4534                  DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4535         dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
4536                  pdev->device);
4537
4538         rc = pci_enable_device(pdev);
4539         if (rc)
4540                 return rc;
4541         rc = pci_request_regions(pdev, DRV_NAME);
4542         if (rc)
4543                 goto err_out;
4544         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4545         if (!rc) {
4546                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4547                         dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4548                                 rc);
4549                 }
4550         } else {
4551                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4552                 if (rc) {
4553                         dev_err(&pdev->dev, "DMA mask error %d\n", rc);
4554                         goto err_out_regions;
4555                 }
4556         }
4557
4558         if (!skd_major) {
4559                 rc = register_blkdev(0, DRV_NAME);
4560                 if (rc < 0)
4561                         goto err_out_regions;
4562                 BUG_ON(!rc);
4563                 skd_major = rc;
4564         }
4565
4566         skdev = skd_construct(pdev);
4567         if (skdev == NULL) {
4568                 rc = -ENOMEM;
4569                 goto err_out_regions;
4570         }
4571
4572         skd_pci_info(skdev, pci_str);
4573         dev_info(&pdev->dev, "%s 64bit\n", pci_str);
4574
4575         pci_set_master(pdev);
4576         rc = pci_enable_pcie_error_reporting(pdev);
4577         if (rc) {
4578                 dev_err(&pdev->dev,
4579                         "bad enable of PCIe error reporting rc=%d\n", rc);
4580                 skdev->pcie_error_reporting_is_enabled = 0;
4581         } else
4582                 skdev->pcie_error_reporting_is_enabled = 1;
4583
4584         pci_set_drvdata(pdev, skdev);
4585
4586         for (i = 0; i < SKD_MAX_BARS; i++) {
4587                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4588                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4589                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4590                                             skdev->mem_size[i]);
4591                 if (!skdev->mem_map[i]) {
4592                         dev_err(&pdev->dev,
4593                                 "Unable to map adapter memory!\n");
4594                         rc = -ENODEV;
4595                         goto err_out_iounmap;
4596                 }
4597                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4598                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4599                         skdev->mem_size[i]);
4600         }
4601
4602         rc = skd_acquire_irq(skdev);
4603         if (rc) {
4604                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
4605                 goto err_out_iounmap;
4606         }
4607
4608         rc = skd_start_timer(skdev);
4609         if (rc)
4610                 goto err_out_timer;
4611
4612         init_waitqueue_head(&skdev->waitq);
4613
4614         skd_start_device(skdev);
4615
4616         rc = wait_event_interruptible_timeout(skdev->waitq,
4617                                               (skdev->gendisk_on),
4618                                               (SKD_START_WAIT_SECONDS * HZ));
4619         if (skdev->gendisk_on > 0) {
4620                 /* device came on-line after reset */
4621                 skd_bdev_attach(&pdev->dev, skdev);
4622                 rc = 0;
4623         } else {
4624                 /* we timed out, something is wrong with the device,
4625                    don't add the disk structure */
4626                 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
4627                         rc);
4628                 /* in case of no error; we timeout with ENXIO */
4629                 if (!rc)
4630                         rc = -ENXIO;
4631                 goto err_out_timer;
4632         }
4633
4634         return rc;
4635
4636 err_out_timer:
4637         skd_stop_device(skdev);
4638         skd_release_irq(skdev);
4639
4640 err_out_iounmap:
4641         for (i = 0; i < SKD_MAX_BARS; i++)
4642                 if (skdev->mem_map[i])
4643                         iounmap(skdev->mem_map[i]);
4644
4645         if (skdev->pcie_error_reporting_is_enabled)
4646                 pci_disable_pcie_error_reporting(pdev);
4647
4648         skd_destruct(skdev);
4649
4650 err_out_regions:
4651         pci_release_regions(pdev);
4652
4653 err_out:
4654         pci_disable_device(pdev);
4655         pci_set_drvdata(pdev, NULL);
4656         return rc;
4657 }
4658
4659 static void skd_pci_remove(struct pci_dev *pdev)
4660 {
4661         int i;
4662         struct skd_device *skdev;
4663
4664         skdev = pci_get_drvdata(pdev);
4665         if (!skdev) {
4666                 dev_err(&pdev->dev, "no device data for PCI\n");
4667                 return;
4668         }
4669         skd_stop_device(skdev);
4670         skd_release_irq(skdev);
4671
4672         for (i = 0; i < SKD_MAX_BARS; i++)
4673                 if (skdev->mem_map[i])
4674                         iounmap(skdev->mem_map[i]);
4675
4676         if (skdev->pcie_error_reporting_is_enabled)
4677                 pci_disable_pcie_error_reporting(pdev);
4678
4679         skd_destruct(skdev);
4680
4681         pci_release_regions(pdev);
4682         pci_disable_device(pdev);
4683         pci_set_drvdata(pdev, NULL);
4684
4685         return;
4686 }
4687
4688 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4689 {
4690         int i;
4691         struct skd_device *skdev;
4692
4693         skdev = pci_get_drvdata(pdev);
4694         if (!skdev) {
4695                 dev_err(&pdev->dev, "no device data for PCI\n");
4696                 return -EIO;
4697         }
4698
4699         skd_stop_device(skdev);
4700
4701         skd_release_irq(skdev);
4702
4703         for (i = 0; i < SKD_MAX_BARS; i++)
4704                 if (skdev->mem_map[i])
4705                         iounmap(skdev->mem_map[i]);
4706
4707         if (skdev->pcie_error_reporting_is_enabled)
4708                 pci_disable_pcie_error_reporting(pdev);
4709
4710         pci_release_regions(pdev);
4711         pci_save_state(pdev);
4712         pci_disable_device(pdev);
4713         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4714         return 0;
4715 }
4716
4717 static int skd_pci_resume(struct pci_dev *pdev)
4718 {
4719         int i;
4720         int rc = 0;
4721         struct skd_device *skdev;
4722
4723         skdev = pci_get_drvdata(pdev);
4724         if (!skdev) {
4725                 dev_err(&pdev->dev, "no device data for PCI\n");
4726                 return -1;
4727         }
4728
4729         pci_set_power_state(pdev, PCI_D0);
4730         pci_enable_wake(pdev, PCI_D0, 0);
4731         pci_restore_state(pdev);
4732
4733         rc = pci_enable_device(pdev);
4734         if (rc)
4735                 return rc;
4736         rc = pci_request_regions(pdev, DRV_NAME);
4737         if (rc)
4738                 goto err_out;
4739         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4740         if (!rc) {
4741                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4742
4743                         dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4744                                 rc);
4745                 }
4746         } else {
4747                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4748                 if (rc) {
4749
4750                         dev_err(&pdev->dev, "DMA mask error %d\n", rc);
4751                         goto err_out_regions;
4752                 }
4753         }
4754
4755         pci_set_master(pdev);
4756         rc = pci_enable_pcie_error_reporting(pdev);
4757         if (rc) {
4758                 dev_err(&pdev->dev,
4759                         "bad enable of PCIe error reporting rc=%d\n", rc);
4760                 skdev->pcie_error_reporting_is_enabled = 0;
4761         } else
4762                 skdev->pcie_error_reporting_is_enabled = 1;
4763
4764         for (i = 0; i < SKD_MAX_BARS; i++) {
4765
4766                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4767                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4768                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4769                                             skdev->mem_size[i]);
4770                 if (!skdev->mem_map[i]) {
4771                         dev_err(&pdev->dev, "Unable to map adapter memory!\n");
4772                         rc = -ENODEV;
4773                         goto err_out_iounmap;
4774                 }
4775                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4776                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4777                         skdev->mem_size[i]);
4778         }
4779         rc = skd_acquire_irq(skdev);
4780         if (rc) {
4781                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
4782                 goto err_out_iounmap;
4783         }
4784
4785         rc = skd_start_timer(skdev);
4786         if (rc)
4787                 goto err_out_timer;
4788
4789         init_waitqueue_head(&skdev->waitq);
4790
4791         skd_start_device(skdev);
4792
4793         return rc;
4794
4795 err_out_timer:
4796         skd_stop_device(skdev);
4797         skd_release_irq(skdev);
4798
4799 err_out_iounmap:
4800         for (i = 0; i < SKD_MAX_BARS; i++)
4801                 if (skdev->mem_map[i])
4802                         iounmap(skdev->mem_map[i]);
4803
4804         if (skdev->pcie_error_reporting_is_enabled)
4805                 pci_disable_pcie_error_reporting(pdev);
4806
4807 err_out_regions:
4808         pci_release_regions(pdev);
4809
4810 err_out:
4811         pci_disable_device(pdev);
4812         return rc;
4813 }
4814
4815 static void skd_pci_shutdown(struct pci_dev *pdev)
4816 {
4817         struct skd_device *skdev;
4818
4819         dev_err(&pdev->dev, "%s called\n", __func__);
4820
4821         skdev = pci_get_drvdata(pdev);
4822         if (!skdev) {
4823                 dev_err(&pdev->dev, "no device data for PCI\n");
4824                 return;
4825         }
4826
4827         dev_err(&pdev->dev, "calling stop\n");
4828         skd_stop_device(skdev);
4829 }
4830
4831 static struct pci_driver skd_driver = {
4832         .name           = DRV_NAME,
4833         .id_table       = skd_pci_tbl,
4834         .probe          = skd_pci_probe,
4835         .remove         = skd_pci_remove,
4836         .suspend        = skd_pci_suspend,
4837         .resume         = skd_pci_resume,
4838         .shutdown       = skd_pci_shutdown,
4839 };
4840
4841 /*
4842  *****************************************************************************
4843  * LOGGING SUPPORT
4844  *****************************************************************************
4845  */
4846
4847 const char *skd_drive_state_to_str(int state)
4848 {
4849         switch (state) {
4850         case FIT_SR_DRIVE_OFFLINE:
4851                 return "OFFLINE";
4852         case FIT_SR_DRIVE_INIT:
4853                 return "INIT";
4854         case FIT_SR_DRIVE_ONLINE:
4855                 return "ONLINE";
4856         case FIT_SR_DRIVE_BUSY:
4857                 return "BUSY";
4858         case FIT_SR_DRIVE_FAULT:
4859                 return "FAULT";
4860         case FIT_SR_DRIVE_DEGRADED:
4861                 return "DEGRADED";
4862         case FIT_SR_PCIE_LINK_DOWN:
4863                 return "INK_DOWN";
4864         case FIT_SR_DRIVE_SOFT_RESET:
4865                 return "SOFT_RESET";
4866         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
4867                 return "NEED_FW";
4868         case FIT_SR_DRIVE_INIT_FAULT:
4869                 return "INIT_FAULT";
4870         case FIT_SR_DRIVE_BUSY_SANITIZE:
4871                 return "BUSY_SANITIZE";
4872         case FIT_SR_DRIVE_BUSY_ERASE:
4873                 return "BUSY_ERASE";
4874         case FIT_SR_DRIVE_FW_BOOTING:
4875                 return "FW_BOOTING";
4876         default:
4877                 return "???";
4878         }
4879 }
4880
4881 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
4882 {
4883         switch (state) {
4884         case SKD_DRVR_STATE_LOAD:
4885                 return "LOAD";
4886         case SKD_DRVR_STATE_IDLE:
4887                 return "IDLE";
4888         case SKD_DRVR_STATE_BUSY:
4889                 return "BUSY";
4890         case SKD_DRVR_STATE_STARTING:
4891                 return "STARTING";
4892         case SKD_DRVR_STATE_ONLINE:
4893                 return "ONLINE";
4894         case SKD_DRVR_STATE_PAUSING:
4895                 return "PAUSING";
4896         case SKD_DRVR_STATE_PAUSED:
4897                 return "PAUSED";
4898         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
4899                 return "DRAINING_TIMEOUT";
4900         case SKD_DRVR_STATE_RESTARTING:
4901                 return "RESTARTING";
4902         case SKD_DRVR_STATE_RESUMING:
4903                 return "RESUMING";
4904         case SKD_DRVR_STATE_STOPPING:
4905                 return "STOPPING";
4906         case SKD_DRVR_STATE_SYNCING:
4907                 return "SYNCING";
4908         case SKD_DRVR_STATE_FAULT:
4909                 return "FAULT";
4910         case SKD_DRVR_STATE_DISAPPEARED:
4911                 return "DISAPPEARED";
4912         case SKD_DRVR_STATE_BUSY_ERASE:
4913                 return "BUSY_ERASE";
4914         case SKD_DRVR_STATE_BUSY_SANITIZE:
4915                 return "BUSY_SANITIZE";
4916         case SKD_DRVR_STATE_BUSY_IMMINENT:
4917                 return "BUSY_IMMINENT";
4918         case SKD_DRVR_STATE_WAIT_BOOT:
4919                 return "WAIT_BOOT";
4920
4921         default:
4922                 return "???";
4923         }
4924 }
4925
4926 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
4927 {
4928         switch (state) {
4929         case SKD_MSG_STATE_IDLE:
4930                 return "IDLE";
4931         case SKD_MSG_STATE_BUSY:
4932                 return "BUSY";
4933         default:
4934                 return "???";
4935         }
4936 }
4937
4938 static const char *skd_skreq_state_to_str(enum skd_req_state state)
4939 {
4940         switch (state) {
4941         case SKD_REQ_STATE_IDLE:
4942                 return "IDLE";
4943         case SKD_REQ_STATE_SETUP:
4944                 return "SETUP";
4945         case SKD_REQ_STATE_BUSY:
4946                 return "BUSY";
4947         case SKD_REQ_STATE_COMPLETED:
4948                 return "COMPLETED";
4949         case SKD_REQ_STATE_TIMEOUT:
4950                 return "TIMEOUT";
4951         case SKD_REQ_STATE_ABORTED:
4952                 return "ABORTED";
4953         default:
4954                 return "???";
4955         }
4956 }
4957
4958 static void skd_log_skdev(struct skd_device *skdev, const char *event)
4959 {
4960         dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
4961         dev_dbg(&skdev->pdev->dev, "  drive_state=%s(%d) driver_state=%s(%d)\n",
4962                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
4963                 skd_skdev_state_to_str(skdev->state), skdev->state);
4964         dev_dbg(&skdev->pdev->dev, "  busy=%d limit=%d dev=%d lowat=%d\n",
4965                 skdev->in_flight, skdev->cur_max_queue_depth,
4966                 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
4967         dev_dbg(&skdev->pdev->dev, "  timestamp=0x%x cycle=%d cycle_ix=%d\n",
4968                 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
4969 }
4970
4971 static void skd_log_skmsg(struct skd_device *skdev,
4972                           struct skd_fitmsg_context *skmsg, const char *event)
4973 {
4974         dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
4975         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x length=%d\n",
4976                 skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
4977                 skmsg->length);
4978 }
4979
4980 static void skd_log_skreq(struct skd_device *skdev,
4981                           struct skd_request_context *skreq, const char *event)
4982 {
4983         dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
4984         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
4985                 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
4986                 skreq->fitmsg_id);
4987         dev_dbg(&skdev->pdev->dev, "  timo=0x%x sg_dir=%d n_sg=%d\n",
4988                 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
4989
4990         if (skreq->req != NULL) {
4991                 struct request *req = skreq->req;
4992                 u32 lba = (u32)blk_rq_pos(req);
4993                 u32 count = blk_rq_sectors(req);
4994
4995                 dev_dbg(&skdev->pdev->dev,
4996                         "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
4997                         lba, lba, count, count, (int)rq_data_dir(req));
4998         } else
4999                 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
5000 }
5001
5002 /*
5003  *****************************************************************************
5004  * MODULE GLUE
5005  *****************************************************************************
5006  */
5007
5008 static int __init skd_init(void)
5009 {
5010         BUILD_BUG_ON(sizeof(struct fit_msg_hdr) + SKD_MAX_REQ_PER_MSG *
5011                      sizeof(struct skd_scsi_request) != SKD_N_FITMSG_BYTES);
5012
5013         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5014
5015         switch (skd_isr_type) {
5016         case SKD_IRQ_LEGACY:
5017         case SKD_IRQ_MSI:
5018         case SKD_IRQ_MSIX:
5019                 break;
5020         default:
5021                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5022                        skd_isr_type, SKD_IRQ_DEFAULT);
5023                 skd_isr_type = SKD_IRQ_DEFAULT;
5024         }
5025
5026         if (skd_max_queue_depth < 1 ||
5027             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5028                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5029                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5030                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5031         }
5032
5033         if (skd_max_req_per_msg < 1 ||
5034             skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
5035                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5036                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5037                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5038         }
5039
5040         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5041                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5042                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5043                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5044         }
5045
5046         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5047                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5048                        skd_dbg_level, 0);
5049                 skd_dbg_level = 0;
5050         }
5051
5052         if (skd_isr_comp_limit < 0) {
5053                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5054                        skd_isr_comp_limit, 0);
5055                 skd_isr_comp_limit = 0;
5056         }
5057
5058         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5059                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5060                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5061                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5062         }
5063
5064         return pci_register_driver(&skd_driver);
5065 }
5066
5067 static void __exit skd_exit(void)
5068 {
5069         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5070
5071         pci_unregister_driver(&skd_driver);
5072
5073         if (skd_major)
5074                 unregister_blkdev(skd_major, DRV_NAME);
5075 }
5076
5077 module_init(skd_init);
5078 module_exit(skd_exit);