2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
5 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/compiler.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/time.h>
25 #include <linux/hdreg.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/completion.h>
28 #include <linux/scatterlist.h>
29 #include <linux/version.h>
30 #include <linux/err.h>
31 #include <linux/aer.h>
32 #include <linux/wait.h>
33 #include <linux/uio.h>
34 #include <scsi/scsi.h>
37 #include <linux/uaccess.h>
38 #include <asm/unaligned.h>
40 #include "skd_s1120.h"
42 static int skd_dbg_level;
43 static int skd_isr_comp_limit = 4;
49 STEC_LINK_UNKNOWN = 0xFF
53 SKD_FLUSH_INITIALIZER,
54 SKD_FLUSH_ZERO_SIZE_FIRST,
55 SKD_FLUSH_DATA_SECOND,
58 #define SKD_ASSERT(expr) \
60 if (unlikely(!(expr))) { \
61 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
62 # expr, __FILE__, __func__, __LINE__); \
66 #define DRV_NAME "skd"
67 #define DRV_VERSION "2.2.1"
68 #define DRV_BUILD_ID "0260"
69 #define PFX DRV_NAME ": "
70 #define DRV_BIN_VERSION 0x100
71 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
73 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
76 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
78 #define PCI_VENDOR_ID_STEC 0x1B39
79 #define PCI_DEVICE_ID_S1120 0x0001
81 #define SKD_FUA_NV (1 << 1)
82 #define SKD_MINORS_PER_DEVICE 16
84 #define SKD_MAX_QUEUE_DEPTH 200u
86 #define SKD_PAUSE_TIMEOUT (5 * 1000)
88 #define SKD_N_FITMSG_BYTES (512u)
90 #define SKD_N_SPECIAL_CONTEXT 32u
91 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
93 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
94 * 128KB limit. That allows 4096*4K = 16M xfer size
96 #define SKD_N_SG_PER_REQ_DEFAULT 256u
97 #define SKD_N_SG_PER_SPECIAL 256u
99 #define SKD_N_COMPLETION_ENTRY 256u
100 #define SKD_N_READ_CAP_BYTES (8u)
102 #define SKD_N_INTERNAL_BYTES (512u)
104 /* 5 bits of uniqifier, 0xF800 */
105 #define SKD_ID_INCR (0x400)
106 #define SKD_ID_TABLE_MASK (3u << 8u)
107 #define SKD_ID_RW_REQUEST (0u << 8u)
108 #define SKD_ID_INTERNAL (1u << 8u)
109 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
110 #define SKD_ID_FIT_MSG (3u << 8u)
111 #define SKD_ID_SLOT_MASK 0x00FFu
112 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
114 #define SKD_N_TIMEOUT_SLOT 4u
115 #define SKD_TIMEOUT_SLOT_MASK 3u
117 #define SKD_N_MAX_SECTORS 2048u
119 #define SKD_MAX_RETRIES 2u
121 #define SKD_TIMER_SECONDS(seconds) (seconds)
122 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
124 #define INQ_STD_NBYTES 36
126 enum skd_drvr_state {
130 SKD_DRVR_STATE_STARTING,
131 SKD_DRVR_STATE_ONLINE,
132 SKD_DRVR_STATE_PAUSING,
133 SKD_DRVR_STATE_PAUSED,
134 SKD_DRVR_STATE_DRAINING_TIMEOUT,
135 SKD_DRVR_STATE_RESTARTING,
136 SKD_DRVR_STATE_RESUMING,
137 SKD_DRVR_STATE_STOPPING,
138 SKD_DRVR_STATE_FAULT,
139 SKD_DRVR_STATE_DISAPPEARED,
140 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
141 SKD_DRVR_STATE_BUSY_ERASE,
142 SKD_DRVR_STATE_BUSY_SANITIZE,
143 SKD_DRVR_STATE_BUSY_IMMINENT,
144 SKD_DRVR_STATE_WAIT_BOOT,
145 SKD_DRVR_STATE_SYNCING,
148 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
149 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
150 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
151 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
152 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
153 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
154 #define SKD_START_WAIT_SECONDS 90u
160 SKD_REQ_STATE_COMPLETED,
161 SKD_REQ_STATE_TIMEOUT,
162 SKD_REQ_STATE_ABORTED,
165 enum skd_fit_msg_state {
170 enum skd_check_status_action {
171 SKD_CHECK_STATUS_REPORT_GOOD,
172 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
173 SKD_CHECK_STATUS_REQUEUE_REQUEST,
174 SKD_CHECK_STATUS_REPORT_ERROR,
175 SKD_CHECK_STATUS_BUSY_IMMINENT,
178 struct skd_fitmsg_context {
179 enum skd_fit_msg_state state;
181 struct skd_fitmsg_context *next;
190 dma_addr_t mb_dma_address;
193 struct skd_request_context {
194 enum skd_req_state state;
196 struct skd_request_context *next;
206 struct scatterlist *sg;
210 struct fit_sg_descriptor *sksg_list;
211 dma_addr_t sksg_dma_address;
213 struct fit_completion_entry_v1 completion;
215 struct fit_comp_error_info err_info;
218 #define SKD_DATA_DIR_HOST_TO_CARD 1
219 #define SKD_DATA_DIR_CARD_TO_HOST 2
221 struct skd_special_context {
222 struct skd_request_context req;
227 dma_addr_t db_dma_address;
230 dma_addr_t mb_dma_address;
243 struct sg_iovec *iov;
244 struct sg_iovec no_iov_iov;
246 struct skd_special_context *skspcl;
249 typedef enum skd_irq_type {
255 #define SKD_MAX_BARS 2
258 volatile void __iomem *mem_map[SKD_MAX_BARS];
259 resource_size_t mem_phys[SKD_MAX_BARS];
260 u32 mem_size[SKD_MAX_BARS];
262 struct skd_msix_entry *msix_entries;
264 struct pci_dev *pdev;
265 int pcie_error_reporting_is_enabled;
268 struct gendisk *disk;
269 struct request_queue *queue;
270 struct device *class_dev;
274 atomic_t device_count;
280 enum skd_drvr_state state;
284 u32 cur_max_queue_depth;
285 u32 queue_low_water_mark;
286 u32 dev_max_queue_depth;
288 u32 num_fitmsg_context;
291 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
293 struct skd_fitmsg_context *skmsg_free_list;
294 struct skd_fitmsg_context *skmsg_table;
296 struct skd_request_context *skreq_free_list;
297 struct skd_request_context *skreq_table;
299 struct skd_special_context *skspcl_free_list;
300 struct skd_special_context *skspcl_table;
302 struct skd_special_context internal_skspcl;
303 u32 read_cap_blocksize;
304 u32 read_cap_last_lba;
305 int read_cap_is_valid;
306 int inquiry_is_valid;
307 u8 inq_serial_num[13]; /*12 chars plus null term */
308 u8 id_str[80]; /* holds a composite name (pci + sernum) */
312 struct fit_completion_entry_v1 *skcomp_table;
313 struct fit_comp_error_info *skerr_table;
314 dma_addr_t cq_dma_address;
316 wait_queue_head_t waitq;
318 struct timer_list timer;
329 u32 connect_time_stamp;
331 #define SKD_MAX_CONNECT_RETRIES 16
336 struct work_struct completion_worker;
339 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
340 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
341 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
343 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
347 if (likely(skdev->dbg_level < 2))
348 return readl(skdev->mem_map[1] + offset);
351 val = readl(skdev->mem_map[1] + offset);
353 pr_debug("%s:%s:%d offset %x = %x\n",
354 skdev->name, __func__, __LINE__, offset, val);
360 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
363 if (likely(skdev->dbg_level < 2)) {
364 writel(val, skdev->mem_map[1] + offset);
368 writel(val, skdev->mem_map[1] + offset);
370 pr_debug("%s:%s:%d offset %x = %x\n",
371 skdev->name, __func__, __LINE__, offset, val);
375 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
378 if (likely(skdev->dbg_level < 2)) {
379 writeq(val, skdev->mem_map[1] + offset);
383 writeq(val, skdev->mem_map[1] + offset);
385 pr_debug("%s:%s:%d offset %x = %016llx\n",
386 skdev->name, __func__, __LINE__, offset, val);
391 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
392 static int skd_isr_type = SKD_IRQ_DEFAULT;
394 module_param(skd_isr_type, int, 0444);
395 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
396 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
398 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
399 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
401 module_param(skd_max_req_per_msg, int, 0444);
402 MODULE_PARM_DESC(skd_max_req_per_msg,
403 "Maximum SCSI requests packed in a single message."
404 " (1-14, default==1)");
406 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
407 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
408 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
410 module_param(skd_max_queue_depth, int, 0444);
411 MODULE_PARM_DESC(skd_max_queue_depth,
412 "Maximum SCSI requests issued to s1120."
413 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
415 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
416 module_param(skd_sgs_per_request, int, 0444);
417 MODULE_PARM_DESC(skd_sgs_per_request,
418 "Maximum SG elements per block request."
419 " (1-4096, default==256)");
421 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
422 module_param(skd_max_pass_thru, int, 0444);
423 MODULE_PARM_DESC(skd_max_pass_thru,
424 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
426 module_param(skd_dbg_level, int, 0444);
427 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
429 module_param(skd_isr_comp_limit, int, 0444);
430 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
432 /* Major device number dynamically assigned. */
433 static u32 skd_major;
435 static void skd_destruct(struct skd_device *skdev);
436 static const struct block_device_operations skd_blockdev_ops;
437 static void skd_send_fitmsg(struct skd_device *skdev,
438 struct skd_fitmsg_context *skmsg);
439 static void skd_send_special_fitmsg(struct skd_device *skdev,
440 struct skd_special_context *skspcl);
441 static void skd_request_fn(struct request_queue *rq);
442 static void skd_end_request(struct skd_device *skdev,
443 struct skd_request_context *skreq, blk_status_t status);
444 static bool skd_preop_sg_list(struct skd_device *skdev,
445 struct skd_request_context *skreq);
446 static void skd_postop_sg_list(struct skd_device *skdev,
447 struct skd_request_context *skreq);
449 static void skd_restart_device(struct skd_device *skdev);
450 static int skd_quiesce_dev(struct skd_device *skdev);
451 static int skd_unquiesce_dev(struct skd_device *skdev);
452 static void skd_release_special(struct skd_device *skdev,
453 struct skd_special_context *skspcl);
454 static void skd_disable_interrupts(struct skd_device *skdev);
455 static void skd_isr_fwstate(struct skd_device *skdev);
456 static void skd_recover_requests(struct skd_device *skdev, int requeue);
457 static void skd_soft_reset(struct skd_device *skdev);
459 static const char *skd_name(struct skd_device *skdev);
460 const char *skd_drive_state_to_str(int state);
461 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
462 static void skd_log_skdev(struct skd_device *skdev, const char *event);
463 static void skd_log_skmsg(struct skd_device *skdev,
464 struct skd_fitmsg_context *skmsg, const char *event);
465 static void skd_log_skreq(struct skd_device *skdev,
466 struct skd_request_context *skreq, const char *event);
469 *****************************************************************************
470 * READ/WRITE REQUESTS
471 *****************************************************************************
473 static void skd_fail_all_pending(struct skd_device *skdev)
475 struct request_queue *q = skdev->queue;
479 req = blk_peek_request(q);
482 blk_start_request(req);
483 __blk_end_request_all(req, BLK_STS_IOERR);
488 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
489 int data_dir, unsigned lba,
492 if (data_dir == READ)
493 scsi_req->cdb[0] = 0x28;
495 scsi_req->cdb[0] = 0x2a;
497 scsi_req->cdb[1] = 0;
498 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
499 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
500 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
501 scsi_req->cdb[5] = (lba & 0xff);
502 scsi_req->cdb[6] = 0;
503 scsi_req->cdb[7] = (count & 0xff00) >> 8;
504 scsi_req->cdb[8] = count & 0xff;
505 scsi_req->cdb[9] = 0;
509 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
510 struct skd_request_context *skreq)
512 skreq->flush_cmd = 1;
514 scsi_req->cdb[0] = 0x35;
515 scsi_req->cdb[1] = 0;
516 scsi_req->cdb[2] = 0;
517 scsi_req->cdb[3] = 0;
518 scsi_req->cdb[4] = 0;
519 scsi_req->cdb[5] = 0;
520 scsi_req->cdb[6] = 0;
521 scsi_req->cdb[7] = 0;
522 scsi_req->cdb[8] = 0;
523 scsi_req->cdb[9] = 0;
526 static void skd_request_fn_not_online(struct request_queue *q);
528 static void skd_request_fn(struct request_queue *q)
530 struct skd_device *skdev = q->queuedata;
531 struct skd_fitmsg_context *skmsg = NULL;
532 struct fit_msg_hdr *fmh = NULL;
533 struct skd_request_context *skreq;
534 struct request *req = NULL;
535 struct skd_scsi_request *scsi_req;
536 unsigned long io_flags;
546 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
547 skd_request_fn_not_online(q);
551 if (blk_queue_stopped(skdev->queue)) {
552 if (skdev->skmsg_free_list == NULL ||
553 skdev->skreq_free_list == NULL ||
554 skdev->in_flight >= skdev->queue_low_water_mark)
555 /* There is still some kind of shortage */
558 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
563 * - There are no more native requests
564 * - There are already the maximum number of requests in progress
565 * - There are no more skd_request_context entries
566 * - There are no more FIT msg buffers
572 req = blk_peek_request(q);
574 /* Are there any native requests to start? */
578 lba = (u32)blk_rq_pos(req);
579 count = blk_rq_sectors(req);
580 data_dir = rq_data_dir(req);
581 io_flags = req->cmd_flags;
583 if (req_op(req) == REQ_OP_FLUSH)
586 if (io_flags & REQ_FUA)
589 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
590 "count=%u(0x%x) dir=%d\n",
591 skdev->name, __func__, __LINE__,
592 req, lba, lba, count, count, data_dir);
594 /* At this point we know there is a request */
596 /* Are too many requets already in progress? */
597 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
598 pr_debug("%s:%s:%d qdepth %d, limit %d\n",
599 skdev->name, __func__, __LINE__,
600 skdev->in_flight, skdev->cur_max_queue_depth);
604 /* Is a skd_request_context available? */
605 skreq = skdev->skreq_free_list;
607 pr_debug("%s:%s:%d Out of req=%p\n",
608 skdev->name, __func__, __LINE__, q);
611 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
612 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
614 /* Now we check to see if we can get a fit msg */
616 if (skdev->skmsg_free_list == NULL) {
617 pr_debug("%s:%s:%d Out of msg\n",
618 skdev->name, __func__, __LINE__);
623 skreq->flush_cmd = 0;
625 skreq->sg_byte_count = 0;
628 * OK to now dequeue request from q.
630 * At this point we are comitted to either start or reject
631 * the native request. Note that skd_request_context is
632 * available but is still at the head of the free list.
634 blk_start_request(req);
636 skreq->fitmsg_id = 0;
638 /* Either a FIT msg is in progress or we have to start one. */
640 /* Are there any FIT msg buffers available? */
641 skmsg = skdev->skmsg_free_list;
643 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
644 skdev->name, __func__, __LINE__,
648 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
649 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
651 skdev->skmsg_free_list = skmsg->next;
653 skmsg->state = SKD_MSG_STATE_BUSY;
654 skmsg->id += SKD_ID_INCR;
656 /* Initialize the FIT msg header */
657 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
658 memset(fmh, 0, sizeof(*fmh));
659 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
660 skmsg->length = sizeof(*fmh);
663 skreq->fitmsg_id = skmsg->id;
666 * Note that a FIT msg may have just been started
667 * but contains no SoFIT requests yet.
671 * Transcode the request, checking as we go. The outcome of
672 * the transcoding is represented by the error variable.
674 cmd_ptr = &skmsg->msg_buf[skmsg->length];
675 memset(cmd_ptr, 0, 32);
677 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
678 cmdctxt = skreq->id + SKD_ID_INCR;
681 scsi_req->hdr.tag = cmdctxt;
682 scsi_req->hdr.sg_list_dma_address = be_dmaa;
684 if (data_dir == READ)
685 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
687 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
689 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
690 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
691 SKD_ASSERT(skreq->flush_cmd == 1);
693 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
697 scsi_req->cdb[1] |= SKD_FUA_NV;
702 if (!skd_preop_sg_list(skdev, skreq)) {
704 * Complete the native request with error.
705 * Note that the request context is still at the
706 * head of the free list, and that the SoFIT request
707 * was encoded into the FIT msg buffer but the FIT
708 * msg length has not been updated. In short, the
709 * only resource that has been allocated but might
710 * not be used is that the FIT msg could be empty.
712 pr_debug("%s:%s:%d error Out\n",
713 skdev->name, __func__, __LINE__);
714 skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
719 scsi_req->hdr.sg_list_len_bytes =
720 cpu_to_be32(skreq->sg_byte_count);
722 /* Complete resource allocations. */
723 skdev->skreq_free_list = skreq->next;
724 skreq->state = SKD_REQ_STATE_BUSY;
725 skreq->id += SKD_ID_INCR;
727 skmsg->length += sizeof(struct skd_scsi_request);
728 fmh->num_protocol_cmds_coalesced++;
731 * Update the active request counts.
732 * Capture the timeout timestamp.
734 skreq->timeout_stamp = skdev->timeout_stamp;
735 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
736 skdev->timeout_slot[timo_slot]++;
738 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
739 skdev->name, __func__, __LINE__,
740 skreq->id, skdev->in_flight);
743 * If the FIT msg buffer is full send it.
745 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
746 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
747 skd_send_fitmsg(skdev, skmsg);
754 * Is a FIT msg in progress? If it is empty put the buffer back
755 * on the free list. If it is non-empty send what we got.
756 * This minimizes latency when there are fewer requests than
757 * what fits in a FIT msg.
760 /* Bigger than just a FIT msg header? */
761 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
762 pr_debug("%s:%s:%d sending msg=%p, len %d\n",
763 skdev->name, __func__, __LINE__,
764 skmsg, skmsg->length);
765 skd_send_fitmsg(skdev, skmsg);
768 * The FIT msg is empty. It means we got started
769 * on the msg, but the requests were rejected.
771 skmsg->state = SKD_MSG_STATE_IDLE;
772 skmsg->id += SKD_ID_INCR;
773 skmsg->next = skdev->skmsg_free_list;
774 skdev->skmsg_free_list = skmsg;
781 * If req is non-NULL it means there is something to do but
782 * we are out of a resource.
785 blk_stop_queue(skdev->queue);
788 static void skd_end_request(struct skd_device *skdev,
789 struct skd_request_context *skreq, blk_status_t error)
791 if (unlikely(error)) {
792 struct request *req = skreq->req;
793 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
794 u32 lba = (u32)blk_rq_pos(req);
795 u32 count = blk_rq_sectors(req);
797 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
798 skd_name(skdev), cmd, lba, count, skreq->id);
800 pr_debug("%s:%s:%d id=0x%x error=%d\n",
801 skdev->name, __func__, __LINE__, skreq->id, error);
803 __blk_end_request_all(skreq->req, error);
806 static bool skd_preop_sg_list(struct skd_device *skdev,
807 struct skd_request_context *skreq)
809 struct request *req = skreq->req;
810 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
811 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
812 struct scatterlist *sg = &skreq->sg[0];
816 skreq->sg_byte_count = 0;
818 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
819 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
821 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
826 * Map scatterlist to PCI bus addresses.
827 * Note PCI might change the number of entries.
829 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
833 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
837 for (i = 0; i < n_sg; i++) {
838 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
839 u32 cnt = sg_dma_len(&sg[i]);
840 uint64_t dma_addr = sg_dma_address(&sg[i]);
842 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
843 sgd->byte_count = cnt;
844 skreq->sg_byte_count += cnt;
845 sgd->host_side_addr = dma_addr;
846 sgd->dev_side_addr = 0;
849 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
850 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
852 if (unlikely(skdev->dbg_level > 1)) {
853 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
854 skdev->name, __func__, __LINE__,
855 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
856 for (i = 0; i < n_sg; i++) {
857 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
858 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
859 "addr=0x%llx next=0x%llx\n",
860 skdev->name, __func__, __LINE__,
861 i, sgd->byte_count, sgd->control,
862 sgd->host_side_addr, sgd->next_desc_ptr);
869 static void skd_postop_sg_list(struct skd_device *skdev,
870 struct skd_request_context *skreq)
872 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
873 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
876 * restore the next ptr for next IO request so we
877 * don't have to set it every time.
879 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
880 skreq->sksg_dma_address +
881 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
882 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
885 static void skd_request_fn_not_online(struct request_queue *q)
887 struct skd_device *skdev = q->queuedata;
889 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
891 skd_log_skdev(skdev, "req_not_online");
892 switch (skdev->state) {
893 case SKD_DRVR_STATE_PAUSING:
894 case SKD_DRVR_STATE_PAUSED:
895 case SKD_DRVR_STATE_STARTING:
896 case SKD_DRVR_STATE_RESTARTING:
897 case SKD_DRVR_STATE_WAIT_BOOT:
898 /* In case of starting, we haven't started the queue,
899 * so we can't get here... but requests are
900 * possibly hanging out waiting for us because we
901 * reported the dev/skd0 already. They'll wait
902 * forever if connect doesn't complete.
903 * What to do??? delay dev/skd0 ??
905 case SKD_DRVR_STATE_BUSY:
906 case SKD_DRVR_STATE_BUSY_IMMINENT:
907 case SKD_DRVR_STATE_BUSY_ERASE:
908 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
911 case SKD_DRVR_STATE_BUSY_SANITIZE:
912 case SKD_DRVR_STATE_STOPPING:
913 case SKD_DRVR_STATE_SYNCING:
914 case SKD_DRVR_STATE_FAULT:
915 case SKD_DRVR_STATE_DISAPPEARED:
920 /* If we get here, terminate all pending block requeusts
921 * with EIO and any scsi pass thru with appropriate sense
924 skd_fail_all_pending(skdev);
928 *****************************************************************************
930 *****************************************************************************
933 static void skd_timer_tick_not_online(struct skd_device *skdev);
935 static void skd_timer_tick(ulong arg)
937 struct skd_device *skdev = (struct skd_device *)arg;
940 unsigned long reqflags;
943 if (skdev->state == SKD_DRVR_STATE_FAULT)
944 /* The driver has declared fault, and we want it to
945 * stay that way until driver is reloaded.
949 spin_lock_irqsave(&skdev->lock, reqflags);
951 state = SKD_READL(skdev, FIT_STATUS);
952 state &= FIT_SR_DRIVE_STATE_MASK;
953 if (state != skdev->drive_state)
954 skd_isr_fwstate(skdev);
956 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
957 skd_timer_tick_not_online(skdev);
960 skdev->timeout_stamp++;
961 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
964 * All requests that happened during the previous use of
965 * this slot should be done by now. The previous use was
966 * over 7 seconds ago.
968 if (skdev->timeout_slot[timo_slot] == 0)
971 /* Something is overdue */
972 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
973 skdev->name, __func__, __LINE__,
974 skdev->timeout_slot[timo_slot], skdev->in_flight);
975 pr_err("(%s): Overdue IOs (%d), busy %d\n",
976 skd_name(skdev), skdev->timeout_slot[timo_slot],
979 skdev->timer_countdown = SKD_DRAINING_TIMO;
980 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
981 skdev->timo_slot = timo_slot;
982 blk_stop_queue(skdev->queue);
985 mod_timer(&skdev->timer, (jiffies + HZ));
987 spin_unlock_irqrestore(&skdev->lock, reqflags);
990 static void skd_timer_tick_not_online(struct skd_device *skdev)
992 switch (skdev->state) {
993 case SKD_DRVR_STATE_IDLE:
994 case SKD_DRVR_STATE_LOAD:
996 case SKD_DRVR_STATE_BUSY_SANITIZE:
997 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
998 skdev->name, __func__, __LINE__,
999 skdev->drive_state, skdev->state);
1000 /* If we've been in sanitize for 3 seconds, we figure we're not
1001 * going to get anymore completions, so recover requests now
1003 if (skdev->timer_countdown > 0) {
1004 skdev->timer_countdown--;
1007 skd_recover_requests(skdev, 0);
1010 case SKD_DRVR_STATE_BUSY:
1011 case SKD_DRVR_STATE_BUSY_IMMINENT:
1012 case SKD_DRVR_STATE_BUSY_ERASE:
1013 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1014 skdev->name, __func__, __LINE__,
1015 skdev->state, skdev->timer_countdown);
1016 if (skdev->timer_countdown > 0) {
1017 skdev->timer_countdown--;
1020 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1021 skdev->name, __func__, __LINE__,
1022 skdev->state, skdev->timer_countdown);
1023 skd_restart_device(skdev);
1026 case SKD_DRVR_STATE_WAIT_BOOT:
1027 case SKD_DRVR_STATE_STARTING:
1028 if (skdev->timer_countdown > 0) {
1029 skdev->timer_countdown--;
1032 /* For now, we fault the drive. Could attempt resets to
1033 * revcover at some point. */
1034 skdev->state = SKD_DRVR_STATE_FAULT;
1036 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1037 skd_name(skdev), skdev->drive_state);
1039 /*start the queue so we can respond with error to requests */
1040 /* wakeup anyone waiting for startup complete */
1041 blk_start_queue(skdev->queue);
1042 skdev->gendisk_on = -1;
1043 wake_up_interruptible(&skdev->waitq);
1046 case SKD_DRVR_STATE_ONLINE:
1047 /* shouldn't get here. */
1050 case SKD_DRVR_STATE_PAUSING:
1051 case SKD_DRVR_STATE_PAUSED:
1054 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1055 pr_debug("%s:%s:%d "
1056 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1057 skdev->name, __func__, __LINE__,
1059 skdev->timer_countdown,
1061 skdev->timeout_slot[skdev->timo_slot]);
1062 /* if the slot has cleared we can let the I/O continue */
1063 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1064 pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1065 skdev->name, __func__, __LINE__);
1066 skdev->state = SKD_DRVR_STATE_ONLINE;
1067 blk_start_queue(skdev->queue);
1070 if (skdev->timer_countdown > 0) {
1071 skdev->timer_countdown--;
1074 skd_restart_device(skdev);
1077 case SKD_DRVR_STATE_RESTARTING:
1078 if (skdev->timer_countdown > 0) {
1079 skdev->timer_countdown--;
1082 /* For now, we fault the drive. Could attempt resets to
1083 * revcover at some point. */
1084 skdev->state = SKD_DRVR_STATE_FAULT;
1085 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1086 skd_name(skdev), skdev->drive_state);
1089 * Recovering does two things:
1090 * 1. completes IO with error
1091 * 2. reclaims dma resources
1092 * When is it safe to recover requests?
1093 * - if the drive state is faulted
1094 * - if the state is still soft reset after out timeout
1095 * - if the drive registers are dead (state = FF)
1096 * If it is "unsafe", we still need to recover, so we will
1097 * disable pci bus mastering and disable our interrupts.
1100 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1101 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1102 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1103 /* It never came out of soft reset. Try to
1104 * recover the requests and then let them
1105 * fail. This is to mitigate hung processes. */
1106 skd_recover_requests(skdev, 0);
1108 pr_err("(%s): Disable BusMaster (%x)\n",
1109 skd_name(skdev), skdev->drive_state);
1110 pci_disable_device(skdev->pdev);
1111 skd_disable_interrupts(skdev);
1112 skd_recover_requests(skdev, 0);
1115 /*start the queue so we can respond with error to requests */
1116 /* wakeup anyone waiting for startup complete */
1117 blk_start_queue(skdev->queue);
1118 skdev->gendisk_on = -1;
1119 wake_up_interruptible(&skdev->waitq);
1122 case SKD_DRVR_STATE_RESUMING:
1123 case SKD_DRVR_STATE_STOPPING:
1124 case SKD_DRVR_STATE_SYNCING:
1125 case SKD_DRVR_STATE_FAULT:
1126 case SKD_DRVR_STATE_DISAPPEARED:
1132 static int skd_start_timer(struct skd_device *skdev)
1136 init_timer(&skdev->timer);
1137 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1139 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1141 pr_err("%s: failed to start timer %d\n",
1146 static void skd_kill_timer(struct skd_device *skdev)
1148 del_timer_sync(&skdev->timer);
1152 *****************************************************************************
1154 *****************************************************************************
1156 static int skd_ioctl_sg_io(struct skd_device *skdev,
1157 fmode_t mode, void __user *argp);
1158 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1159 struct skd_sg_io *sksgio);
1160 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1161 struct skd_sg_io *sksgio);
1162 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1163 struct skd_sg_io *sksgio);
1164 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1165 struct skd_sg_io *sksgio, int dxfer_dir);
1166 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1167 struct skd_sg_io *sksgio);
1168 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1169 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1170 struct skd_sg_io *sksgio);
1171 static int skd_sg_io_put_status(struct skd_device *skdev,
1172 struct skd_sg_io *sksgio);
1174 static void skd_complete_special(struct skd_device *skdev,
1175 volatile struct fit_completion_entry_v1
1177 volatile struct fit_comp_error_info *skerr,
1178 struct skd_special_context *skspcl);
1180 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1181 uint cmd_in, ulong arg)
1183 static const int sg_version_num = 30527;
1184 int rc = 0, timeout;
1185 struct gendisk *disk = bdev->bd_disk;
1186 struct skd_device *skdev = disk->private_data;
1187 int __user *p = (int __user *)arg;
1189 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1190 skdev->name, __func__, __LINE__,
1191 disk->disk_name, current->comm, mode, cmd_in, arg);
1193 if (!capable(CAP_SYS_ADMIN))
1197 case SG_SET_TIMEOUT:
1198 rc = get_user(timeout, p);
1200 disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1202 case SG_GET_TIMEOUT:
1203 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1205 case SG_GET_VERSION_NUM:
1206 rc = put_user(sg_version_num, p);
1209 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1217 pr_debug("%s:%s:%d %s: completion rc %d\n",
1218 skdev->name, __func__, __LINE__, disk->disk_name, rc);
1222 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1226 struct skd_sg_io sksgio;
1228 memset(&sksgio, 0, sizeof(sksgio));
1231 sksgio.iov = &sksgio.no_iov_iov;
1233 switch (skdev->state) {
1234 case SKD_DRVR_STATE_ONLINE:
1235 case SKD_DRVR_STATE_BUSY_IMMINENT:
1239 pr_debug("%s:%s:%d drive not online\n",
1240 skdev->name, __func__, __LINE__);
1245 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1249 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1253 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1257 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1261 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1265 rc = skd_sg_io_await(skdev, &sksgio);
1269 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1273 rc = skd_sg_io_put_status(skdev, &sksgio);
1280 skd_sg_io_release_skspcl(skdev, &sksgio);
1282 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1287 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1288 struct skd_sg_io *sksgio)
1290 struct sg_io_hdr *sgp = &sksgio->sg;
1291 int i, __maybe_unused acc;
1293 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1294 pr_debug("%s:%s:%d access sg failed %p\n",
1295 skdev->name, __func__, __LINE__, sksgio->argp);
1299 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1300 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1301 skdev->name, __func__, __LINE__, sksgio->argp);
1305 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1306 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1307 skdev->name, __func__, __LINE__, sgp->interface_id);
1311 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1312 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1313 skdev->name, __func__, __LINE__, sgp->cmd_len);
1317 if (sgp->iovec_count > 256) {
1318 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1319 skdev->name, __func__, __LINE__, sgp->iovec_count);
1323 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1324 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1325 skdev->name, __func__, __LINE__, sgp->dxfer_len);
1329 switch (sgp->dxfer_direction) {
1334 case SG_DXFER_TO_DEV:
1338 case SG_DXFER_FROM_DEV:
1339 case SG_DXFER_TO_FROM_DEV:
1344 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1345 skdev->name, __func__, __LINE__, sgp->dxfer_direction);
1349 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1350 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1351 skdev->name, __func__, __LINE__, sgp->cmdp);
1355 if (sgp->mx_sb_len != 0) {
1356 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1357 pr_debug("%s:%s:%d access sbp failed %p\n",
1358 skdev->name, __func__, __LINE__, sgp->sbp);
1363 if (sgp->iovec_count == 0) {
1364 sksgio->iov[0].iov_base = sgp->dxferp;
1365 sksgio->iov[0].iov_len = sgp->dxfer_len;
1367 sksgio->dxfer_len = sgp->dxfer_len;
1369 struct sg_iovec *iov;
1370 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1371 size_t iov_data_len;
1373 iov = kmalloc(nbytes, GFP_KERNEL);
1375 pr_debug("%s:%s:%d alloc iovec failed %d\n",
1376 skdev->name, __func__, __LINE__,
1381 sksgio->iovcnt = sgp->iovec_count;
1383 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1384 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1385 skdev->name, __func__, __LINE__, sgp->dxferp);
1390 * Sum up the vecs, making sure they don't overflow
1393 for (i = 0; i < sgp->iovec_count; i++) {
1394 if (iov_data_len + iov[i].iov_len < iov_data_len)
1396 iov_data_len += iov[i].iov_len;
1399 /* SG_IO howto says that the shorter of the two wins */
1400 if (sgp->dxfer_len < iov_data_len) {
1401 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1404 sksgio->dxfer_len = sgp->dxfer_len;
1406 sksgio->dxfer_len = iov_data_len;
1409 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1410 struct sg_iovec *iov = sksgio->iov;
1411 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1412 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1413 pr_debug("%s:%s:%d access data failed %p/%d\n",
1414 skdev->name, __func__, __LINE__,
1415 iov->iov_base, (int)iov->iov_len);
1424 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1425 struct skd_sg_io *sksgio)
1427 struct skd_special_context *skspcl = NULL;
1433 spin_lock_irqsave(&skdev->lock, flags);
1434 skspcl = skdev->skspcl_free_list;
1435 if (skspcl != NULL) {
1436 skdev->skspcl_free_list =
1437 (struct skd_special_context *)skspcl->req.next;
1438 skspcl->req.id += SKD_ID_INCR;
1439 skspcl->req.state = SKD_REQ_STATE_SETUP;
1440 skspcl->orphaned = 0;
1441 skspcl->req.n_sg = 0;
1443 spin_unlock_irqrestore(&skdev->lock, flags);
1445 if (skspcl != NULL) {
1450 pr_debug("%s:%s:%d blocking\n",
1451 skdev->name, __func__, __LINE__);
1453 rc = wait_event_interruptible_timeout(
1455 (skdev->skspcl_free_list != NULL),
1456 msecs_to_jiffies(sksgio->sg.timeout));
1458 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1459 skdev->name, __func__, __LINE__, rc);
1469 * If we get here rc > 0 meaning the timeout to
1470 * wait_event_interruptible_timeout() had time left, hence the
1471 * sought event -- non-empty free list -- happened.
1472 * Retry the allocation.
1475 sksgio->skspcl = skspcl;
1480 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1481 struct skd_request_context *skreq,
1484 u32 resid = dxfer_len;
1487 * The DMA engine must have aligned addresses and byte counts.
1489 resid += (-resid) & 3;
1490 skreq->sg_byte_count = resid;
1495 u32 nbytes = PAGE_SIZE;
1496 u32 ix = skreq->n_sg;
1497 struct scatterlist *sg = &skreq->sg[ix];
1498 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1504 page = alloc_page(GFP_KERNEL);
1508 sg_set_page(sg, page, nbytes, 0);
1510 /* TODO: This should be going through a pci_???()
1511 * routine to do proper mapping. */
1512 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1513 sksg->byte_count = nbytes;
1515 sksg->host_side_addr = sg_phys(sg);
1517 sksg->dev_side_addr = 0;
1518 sksg->next_desc_ptr = skreq->sksg_dma_address +
1519 (ix + 1) * sizeof(*sksg);
1525 if (skreq->n_sg > 0) {
1526 u32 ix = skreq->n_sg - 1;
1527 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1529 sksg->control = FIT_SGD_CONTROL_LAST;
1530 sksg->next_desc_ptr = 0;
1533 if (unlikely(skdev->dbg_level > 1)) {
1536 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1537 skdev->name, __func__, __LINE__,
1538 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1539 for (i = 0; i < skreq->n_sg; i++) {
1540 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1542 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1543 "addr=0x%llx next=0x%llx\n",
1544 skdev->name, __func__, __LINE__,
1545 i, sgd->byte_count, sgd->control,
1546 sgd->host_side_addr, sgd->next_desc_ptr);
1553 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1554 struct skd_sg_io *sksgio)
1556 struct skd_special_context *skspcl = sksgio->skspcl;
1557 struct skd_request_context *skreq = &skspcl->req;
1558 u32 dxfer_len = sksgio->dxfer_len;
1561 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1563 * Eventually, errors or not, skd_release_special() is called
1564 * to recover allocations including partial allocations.
1569 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1570 struct skd_sg_io *sksgio, int dxfer_dir)
1572 struct skd_special_context *skspcl = sksgio->skspcl;
1574 struct sg_iovec curiov;
1578 u32 resid = sksgio->dxfer_len;
1582 curiov.iov_base = NULL;
1584 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1585 if (dxfer_dir != SG_DXFER_TO_DEV ||
1586 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1591 u32 nbytes = PAGE_SIZE;
1593 if (curiov.iov_len == 0) {
1594 curiov = sksgio->iov[iov_ix++];
1600 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1601 bufp = page_address(page);
1602 buf_len = PAGE_SIZE;
1605 nbytes = min_t(u32, nbytes, resid);
1606 nbytes = min_t(u32, nbytes, curiov.iov_len);
1607 nbytes = min_t(u32, nbytes, buf_len);
1609 if (dxfer_dir == SG_DXFER_TO_DEV)
1610 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1612 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1618 curiov.iov_len -= nbytes;
1619 curiov.iov_base += nbytes;
1626 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1627 struct skd_sg_io *sksgio)
1629 struct skd_special_context *skspcl = sksgio->skspcl;
1630 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1631 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1633 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1635 /* Initialize the FIT msg header */
1636 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1637 fmh->num_protocol_cmds_coalesced = 1;
1639 /* Initialize the SCSI request */
1640 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1641 scsi_req->hdr.sg_list_dma_address =
1642 cpu_to_be64(skspcl->req.sksg_dma_address);
1643 scsi_req->hdr.tag = skspcl->req.id;
1644 scsi_req->hdr.sg_list_len_bytes =
1645 cpu_to_be32(skspcl->req.sg_byte_count);
1646 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1648 skspcl->req.state = SKD_REQ_STATE_BUSY;
1649 skd_send_special_fitmsg(skdev, skspcl);
1654 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1656 unsigned long flags;
1659 rc = wait_event_interruptible_timeout(skdev->waitq,
1660 (sksgio->skspcl->req.state !=
1661 SKD_REQ_STATE_BUSY),
1662 msecs_to_jiffies(sksgio->sg.
1665 spin_lock_irqsave(&skdev->lock, flags);
1667 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1668 pr_debug("%s:%s:%d skspcl %p aborted\n",
1669 skdev->name, __func__, __LINE__, sksgio->skspcl);
1671 /* Build check cond, sense and let command finish. */
1672 /* For a timeout, we must fabricate completion and sense
1673 * data to complete the command */
1674 sksgio->skspcl->req.completion.status =
1675 SAM_STAT_CHECK_CONDITION;
1677 memset(&sksgio->skspcl->req.err_info, 0,
1678 sizeof(sksgio->skspcl->req.err_info));
1679 sksgio->skspcl->req.err_info.type = 0x70;
1680 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1681 sksgio->skspcl->req.err_info.code = 0x44;
1682 sksgio->skspcl->req.err_info.qual = 0;
1684 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1685 /* No longer on the adapter. We finish. */
1688 /* Something's gone wrong. Still busy. Timeout or
1689 * user interrupted (control-C). Mark as an orphan
1690 * so it will be disposed when completed. */
1691 sksgio->skspcl->orphaned = 1;
1692 sksgio->skspcl = NULL;
1694 pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1695 skdev->name, __func__, __LINE__,
1696 sksgio, sksgio->sg.timeout);
1699 pr_debug("%s:%s:%d cntlc %p\n",
1700 skdev->name, __func__, __LINE__, sksgio);
1705 spin_unlock_irqrestore(&skdev->lock, flags);
1710 static int skd_sg_io_put_status(struct skd_device *skdev,
1711 struct skd_sg_io *sksgio)
1713 struct sg_io_hdr *sgp = &sksgio->sg;
1714 struct skd_special_context *skspcl = sksgio->skspcl;
1717 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1719 sgp->status = skspcl->req.completion.status;
1720 resid = sksgio->dxfer_len - nb;
1722 sgp->masked_status = sgp->status & STATUS_MASK;
1723 sgp->msg_status = 0;
1724 sgp->host_status = 0;
1725 sgp->driver_status = 0;
1727 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1728 sgp->info |= SG_INFO_CHECK;
1730 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1731 skdev->name, __func__, __LINE__,
1732 sgp->status, sgp->masked_status, sgp->resid);
1734 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1735 if (sgp->mx_sb_len > 0) {
1736 struct fit_comp_error_info *ei = &skspcl->req.err_info;
1737 u32 nbytes = sizeof(*ei);
1739 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1741 sgp->sb_len_wr = nbytes;
1743 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1744 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1745 skdev->name, __func__, __LINE__,
1752 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1753 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1754 skdev->name, __func__, __LINE__, sksgio->argp);
1761 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1762 struct skd_sg_io *sksgio)
1764 struct skd_special_context *skspcl = sksgio->skspcl;
1766 if (skspcl != NULL) {
1769 sksgio->skspcl = NULL;
1771 spin_lock_irqsave(&skdev->lock, flags);
1772 skd_release_special(skdev, skspcl);
1773 spin_unlock_irqrestore(&skdev->lock, flags);
1780 *****************************************************************************
1781 * INTERNAL REQUESTS -- generated by driver itself
1782 *****************************************************************************
1785 static int skd_format_internal_skspcl(struct skd_device *skdev)
1787 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1788 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1789 struct fit_msg_hdr *fmh;
1790 uint64_t dma_address;
1791 struct skd_scsi_request *scsi;
1793 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1794 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1795 fmh->num_protocol_cmds_coalesced = 1;
1797 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1798 memset(scsi, 0, sizeof(*scsi));
1799 dma_address = skspcl->req.sksg_dma_address;
1800 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1801 sgd->control = FIT_SGD_CONTROL_LAST;
1802 sgd->byte_count = 0;
1803 sgd->host_side_addr = skspcl->db_dma_address;
1804 sgd->dev_side_addr = 0;
1805 sgd->next_desc_ptr = 0LL;
1810 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1812 static void skd_send_internal_skspcl(struct skd_device *skdev,
1813 struct skd_special_context *skspcl,
1816 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1817 struct skd_scsi_request *scsi;
1818 unsigned char *buf = skspcl->data_buf;
1821 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1823 * A refresh is already in progress.
1824 * Just wait for it to finish.
1828 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1829 skspcl->req.state = SKD_REQ_STATE_BUSY;
1830 skspcl->req.id += SKD_ID_INCR;
1832 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1833 scsi->hdr.tag = skspcl->req.id;
1835 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1838 case TEST_UNIT_READY:
1839 scsi->cdb[0] = TEST_UNIT_READY;
1840 sgd->byte_count = 0;
1841 scsi->hdr.sg_list_len_bytes = 0;
1845 scsi->cdb[0] = READ_CAPACITY;
1846 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1847 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1851 scsi->cdb[0] = INQUIRY;
1852 scsi->cdb[1] = 0x01; /* evpd */
1853 scsi->cdb[2] = 0x80; /* serial number page */
1854 scsi->cdb[4] = 0x10;
1855 sgd->byte_count = 16;
1856 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1859 case SYNCHRONIZE_CACHE:
1860 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1861 sgd->byte_count = 0;
1862 scsi->hdr.sg_list_len_bytes = 0;
1866 scsi->cdb[0] = WRITE_BUFFER;
1867 scsi->cdb[1] = 0x02;
1868 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1869 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1870 sgd->byte_count = WR_BUF_SIZE;
1871 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1872 /* fill incrementing byte pattern */
1873 for (i = 0; i < sgd->byte_count; i++)
1878 scsi->cdb[0] = READ_BUFFER;
1879 scsi->cdb[1] = 0x02;
1880 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1881 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1882 sgd->byte_count = WR_BUF_SIZE;
1883 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1884 memset(skspcl->data_buf, 0, sgd->byte_count);
1888 SKD_ASSERT("Don't know what to send");
1892 skd_send_special_fitmsg(skdev, skspcl);
1895 static void skd_refresh_device_data(struct skd_device *skdev)
1897 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1899 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1902 static int skd_chk_read_buf(struct skd_device *skdev,
1903 struct skd_special_context *skspcl)
1905 unsigned char *buf = skspcl->data_buf;
1908 /* check for incrementing byte pattern */
1909 for (i = 0; i < WR_BUF_SIZE; i++)
1910 if (buf[i] != (i & 0xFF))
1916 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1917 u8 code, u8 qual, u8 fruc)
1919 /* If the check condition is of special interest, log a message */
1920 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1921 && (code == 0x04) && (qual == 0x06)) {
1922 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1923 "ascq/fruc %02x/%02x/%02x/%02x\n",
1924 skd_name(skdev), key, code, qual, fruc);
1928 static void skd_complete_internal(struct skd_device *skdev,
1929 volatile struct fit_completion_entry_v1
1931 volatile struct fit_comp_error_info *skerr,
1932 struct skd_special_context *skspcl)
1934 u8 *buf = skspcl->data_buf;
1937 struct skd_scsi_request *scsi =
1938 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1940 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1942 pr_debug("%s:%s:%d complete internal %x\n",
1943 skdev->name, __func__, __LINE__, scsi->cdb[0]);
1945 skspcl->req.completion = *skcomp;
1946 skspcl->req.state = SKD_REQ_STATE_IDLE;
1947 skspcl->req.id += SKD_ID_INCR;
1949 status = skspcl->req.completion.status;
1951 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1952 skerr->qual, skerr->fruc);
1954 switch (scsi->cdb[0]) {
1955 case TEST_UNIT_READY:
1956 if (status == SAM_STAT_GOOD)
1957 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1958 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1959 (skerr->key == MEDIUM_ERROR))
1960 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1962 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1963 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1964 skdev->name, __func__, __LINE__,
1968 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1969 skdev->name, __func__, __LINE__);
1970 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1975 if (status == SAM_STAT_GOOD)
1976 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1978 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1979 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
1980 skdev->name, __func__, __LINE__,
1984 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
1985 skdev->name, __func__, __LINE__);
1986 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1991 if (status == SAM_STAT_GOOD) {
1992 if (skd_chk_read_buf(skdev, skspcl) == 0)
1993 skd_send_internal_skspcl(skdev, skspcl,
1996 pr_err("(%s):*** W/R Buffer mismatch %d ***\n",
1997 skd_name(skdev), skdev->connect_retries);
1998 if (skdev->connect_retries <
1999 SKD_MAX_CONNECT_RETRIES) {
2000 skdev->connect_retries++;
2001 skd_soft_reset(skdev);
2003 pr_err("(%s): W/R Buffer Connect Error\n",
2010 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2011 pr_debug("%s:%s:%d "
2012 "read buffer failed, don't send anymore state 0x%x\n",
2013 skdev->name, __func__, __LINE__,
2017 pr_debug("%s:%s:%d "
2018 "**** read buffer failed, retry skerr\n",
2019 skdev->name, __func__, __LINE__);
2020 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2025 skdev->read_cap_is_valid = 0;
2026 if (status == SAM_STAT_GOOD) {
2027 skdev->read_cap_last_lba =
2028 (buf[0] << 24) | (buf[1] << 16) |
2029 (buf[2] << 8) | buf[3];
2030 skdev->read_cap_blocksize =
2031 (buf[4] << 24) | (buf[5] << 16) |
2032 (buf[6] << 8) | buf[7];
2034 pr_debug("%s:%s:%d last lba %d, bs %d\n",
2035 skdev->name, __func__, __LINE__,
2036 skdev->read_cap_last_lba,
2037 skdev->read_cap_blocksize);
2039 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2041 skdev->read_cap_is_valid = 1;
2043 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2044 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2045 (skerr->key == MEDIUM_ERROR)) {
2046 skdev->read_cap_last_lba = ~0;
2047 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2048 pr_debug("%s:%s:%d "
2049 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2050 skdev->name, __func__, __LINE__);
2051 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2053 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2054 skdev->name, __func__, __LINE__);
2055 skd_send_internal_skspcl(skdev, skspcl,
2061 skdev->inquiry_is_valid = 0;
2062 if (status == SAM_STAT_GOOD) {
2063 skdev->inquiry_is_valid = 1;
2065 for (i = 0; i < 12; i++)
2066 skdev->inq_serial_num[i] = buf[i + 4];
2067 skdev->inq_serial_num[12] = 0;
2070 if (skd_unquiesce_dev(skdev) < 0)
2071 pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2072 skdev->name, __func__, __LINE__);
2073 /* connection is complete */
2074 skdev->connect_retries = 0;
2077 case SYNCHRONIZE_CACHE:
2078 if (status == SAM_STAT_GOOD)
2079 skdev->sync_done = 1;
2081 skdev->sync_done = -1;
2082 wake_up_interruptible(&skdev->waitq);
2086 SKD_ASSERT("we didn't send this");
2091 *****************************************************************************
2093 *****************************************************************************
2096 static void skd_send_fitmsg(struct skd_device *skdev,
2097 struct skd_fitmsg_context *skmsg)
2100 struct fit_msg_hdr *fmh;
2102 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2103 skdev->name, __func__, __LINE__,
2104 skmsg->mb_dma_address, skdev->in_flight);
2105 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2106 skdev->name, __func__, __LINE__,
2107 skmsg->msg_buf, skmsg->offset);
2109 qcmd = skmsg->mb_dma_address;
2110 qcmd |= FIT_QCMD_QID_NORMAL;
2112 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2113 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2115 if (unlikely(skdev->dbg_level > 1)) {
2116 u8 *bp = (u8 *)skmsg->msg_buf;
2118 for (i = 0; i < skmsg->length; i += 8) {
2119 pr_debug("%s:%s:%d msg[%2d] %8ph\n",
2120 skdev->name, __func__, __LINE__, i, &bp[i]);
2126 if (skmsg->length > 256)
2127 qcmd |= FIT_QCMD_MSGSIZE_512;
2128 else if (skmsg->length > 128)
2129 qcmd |= FIT_QCMD_MSGSIZE_256;
2130 else if (skmsg->length > 64)
2131 qcmd |= FIT_QCMD_MSGSIZE_128;
2134 * This makes no sense because the FIT msg header is
2135 * 64 bytes. If the msg is only 64 bytes long it has
2138 qcmd |= FIT_QCMD_MSGSIZE_64;
2140 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2143 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2146 static void skd_send_special_fitmsg(struct skd_device *skdev,
2147 struct skd_special_context *skspcl)
2151 if (unlikely(skdev->dbg_level > 1)) {
2152 u8 *bp = (u8 *)skspcl->msg_buf;
2155 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2156 pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
2157 skdev->name, __func__, __LINE__, i, &bp[i]);
2162 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2163 skdev->name, __func__, __LINE__,
2164 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2165 skspcl->req.sksg_dma_address);
2166 for (i = 0; i < skspcl->req.n_sg; i++) {
2167 struct fit_sg_descriptor *sgd =
2168 &skspcl->req.sksg_list[i];
2170 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
2171 "addr=0x%llx next=0x%llx\n",
2172 skdev->name, __func__, __LINE__,
2173 i, sgd->byte_count, sgd->control,
2174 sgd->host_side_addr, sgd->next_desc_ptr);
2179 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2180 * and one 64-byte SSDI command.
2182 qcmd = skspcl->mb_dma_address;
2183 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2185 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2188 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2192 *****************************************************************************
2194 *****************************************************************************
2197 static void skd_complete_other(struct skd_device *skdev,
2198 volatile struct fit_completion_entry_v1 *skcomp,
2199 volatile struct fit_comp_error_info *skerr);
2208 enum skd_check_status_action action;
2211 static struct sns_info skd_chkstat_table[] = {
2213 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2214 SKD_CHECK_STATUS_REPORT_GOOD },
2217 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2218 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2219 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2220 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2221 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2222 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2224 /* Retry (with limits) */
2225 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2226 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2227 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2228 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2229 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2230 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2231 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2232 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2234 /* Busy (or about to be) */
2235 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2236 SKD_CHECK_STATUS_BUSY_IMMINENT },
2240 * Look up status and sense data to decide how to handle the error
2242 * mask says which fields must match e.g., mask=0x18 means check
2243 * type and stat, ignore key, asc, ascq.
2246 static enum skd_check_status_action
2247 skd_check_status(struct skd_device *skdev,
2248 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2252 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2253 skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2256 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2257 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2258 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2260 /* Does the info match an entry in the good category? */
2261 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2262 for (i = 0; i < n; i++) {
2263 struct sns_info *sns = &skd_chkstat_table[i];
2265 if (sns->mask & 0x10)
2266 if (skerr->type != sns->type)
2269 if (sns->mask & 0x08)
2270 if (cmp_status != sns->stat)
2273 if (sns->mask & 0x04)
2274 if (skerr->key != sns->key)
2277 if (sns->mask & 0x02)
2278 if (skerr->code != sns->asc)
2281 if (sns->mask & 0x01)
2282 if (skerr->qual != sns->ascq)
2285 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2286 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2288 skd_name(skdev), skerr->key,
2289 skerr->code, skerr->qual);
2294 /* No other match, so nonzero status means error,
2295 * zero status means good
2298 pr_debug("%s:%s:%d status check: error\n",
2299 skdev->name, __func__, __LINE__);
2300 return SKD_CHECK_STATUS_REPORT_ERROR;
2303 pr_debug("%s:%s:%d status check good default\n",
2304 skdev->name, __func__, __LINE__);
2305 return SKD_CHECK_STATUS_REPORT_GOOD;
2308 static void skd_resolve_req_exception(struct skd_device *skdev,
2309 struct skd_request_context *skreq)
2311 u8 cmp_status = skreq->completion.status;
2313 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2314 case SKD_CHECK_STATUS_REPORT_GOOD:
2315 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2316 skd_end_request(skdev, skreq, BLK_STS_OK);
2319 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2320 skd_log_skreq(skdev, skreq, "retry(busy)");
2321 blk_requeue_request(skdev->queue, skreq->req);
2322 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2323 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2324 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2325 skd_quiesce_dev(skdev);
2328 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2329 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2330 skd_log_skreq(skdev, skreq, "retry");
2331 blk_requeue_request(skdev->queue, skreq->req);
2336 case SKD_CHECK_STATUS_REPORT_ERROR:
2338 skd_end_request(skdev, skreq, BLK_STS_IOERR);
2343 /* assume spinlock is already held */
2344 static void skd_release_skreq(struct skd_device *skdev,
2345 struct skd_request_context *skreq)
2348 struct skd_fitmsg_context *skmsg;
2353 * Reclaim the FIT msg buffer if this is
2354 * the first of the requests it carried to
2355 * be completed. The FIT msg buffer used to
2356 * send this request cannot be reused until
2357 * we are sure the s1120 card has copied
2358 * it to its memory. The FIT msg might have
2359 * contained several requests. As soon as
2360 * any of them are completed we know that
2361 * the entire FIT msg was transferred.
2362 * Only the first completed request will
2363 * match the FIT msg buffer id. The FIT
2364 * msg buffer id is immediately updated.
2365 * When subsequent requests complete the FIT
2366 * msg buffer id won't match, so we know
2367 * quite cheaply that it is already done.
2369 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2370 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2372 skmsg = &skdev->skmsg_table[msg_slot];
2373 if (skmsg->id == skreq->fitmsg_id) {
2374 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2375 SKD_ASSERT(skmsg->outstanding > 0);
2376 skmsg->outstanding--;
2377 if (skmsg->outstanding == 0) {
2378 skmsg->state = SKD_MSG_STATE_IDLE;
2379 skmsg->id += SKD_ID_INCR;
2380 skmsg->next = skdev->skmsg_free_list;
2381 skdev->skmsg_free_list = skmsg;
2386 * Decrease the number of active requests.
2387 * Also decrements the count in the timeout slot.
2389 SKD_ASSERT(skdev->in_flight > 0);
2390 skdev->in_flight -= 1;
2392 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2393 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2394 skdev->timeout_slot[timo_slot] -= 1;
2402 * Reclaim the skd_request_context
2404 skreq->state = SKD_REQ_STATE_IDLE;
2405 skreq->id += SKD_ID_INCR;
2406 skreq->next = skdev->skreq_free_list;
2407 skdev->skreq_free_list = skreq;
2410 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2412 static void skd_do_inq_page_00(struct skd_device *skdev,
2413 volatile struct fit_completion_entry_v1 *skcomp,
2414 volatile struct fit_comp_error_info *skerr,
2415 uint8_t *cdb, uint8_t *buf)
2417 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2419 /* Caller requested "supported pages". The driver needs to insert
2422 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2423 skdev->name, __func__, __LINE__);
2425 /* If the device rejected the request because the CDB was
2426 * improperly formed, then just leave.
2428 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2429 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2432 /* Get the amount of space the caller allocated */
2433 max_bytes = (cdb[3] << 8) | cdb[4];
2435 /* Get the number of pages actually returned by the device */
2436 drive_pages = (buf[2] << 8) | buf[3];
2437 drive_bytes = drive_pages + 4;
2438 new_size = drive_pages + 1;
2440 /* Supported pages must be in numerical order, so find where
2441 * the driver page needs to be inserted into the list of
2442 * pages returned by the device.
2444 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2445 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2446 return; /* Device using this page code. abort */
2447 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2451 if (insert_pt < max_bytes) {
2454 /* Shift everything up one byte to make room. */
2455 for (u = new_size + 3; u > insert_pt; u--)
2456 buf[u] = buf[u - 1];
2457 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2459 /* SCSI byte order increment of num_returned_bytes by 1 */
2460 skcomp->num_returned_bytes =
2461 be32_to_cpu(skcomp->num_returned_bytes) + 1;
2462 skcomp->num_returned_bytes =
2463 be32_to_cpu(skcomp->num_returned_bytes);
2466 /* update page length field to reflect the driver's page too */
2467 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2468 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2471 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2477 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2480 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2482 pci_bus_speed = linksta & 0xF;
2483 pci_lanes = (linksta & 0x3F0) >> 4;
2485 *speed = STEC_LINK_UNKNOWN;
2490 switch (pci_bus_speed) {
2492 *speed = STEC_LINK_2_5GTS;
2495 *speed = STEC_LINK_5GTS;
2498 *speed = STEC_LINK_8GTS;
2501 *speed = STEC_LINK_UNKNOWN;
2505 if (pci_lanes <= 0x20)
2511 static void skd_do_inq_page_da(struct skd_device *skdev,
2512 volatile struct fit_completion_entry_v1 *skcomp,
2513 volatile struct fit_comp_error_info *skerr,
2514 uint8_t *cdb, uint8_t *buf)
2516 struct pci_dev *pdev = skdev->pdev;
2518 struct driver_inquiry_data inq;
2521 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2522 skdev->name, __func__, __LINE__);
2524 memset(&inq, 0, sizeof(inq));
2526 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2528 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2529 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2530 inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2531 inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2533 pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2534 inq.pcie_vendor_id = cpu_to_be16(val);
2536 pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2537 inq.pcie_device_id = cpu_to_be16(val);
2539 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2540 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2542 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2543 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2545 /* Driver version, fixed lenth, padded with spaces on the right */
2546 inq.driver_version_length = sizeof(inq.driver_version);
2547 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2548 memcpy(inq.driver_version, DRV_VER_COMPL,
2549 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2551 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2553 /* Clear the error set by the device */
2554 skcomp->status = SAM_STAT_GOOD;
2555 memset((void *)skerr, 0, sizeof(*skerr));
2557 /* copy response into output buffer */
2558 max_bytes = (cdb[3] << 8) | cdb[4];
2559 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2561 skcomp->num_returned_bytes =
2562 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
2565 static void skd_do_driver_inq(struct skd_device *skdev,
2566 volatile struct fit_completion_entry_v1 *skcomp,
2567 volatile struct fit_comp_error_info *skerr,
2568 uint8_t *cdb, uint8_t *buf)
2572 else if (cdb[0] != INQUIRY)
2573 return; /* Not an INQUIRY */
2574 else if ((cdb[1] & 1) == 0)
2575 return; /* EVPD not set */
2576 else if (cdb[2] == 0)
2577 /* Need to add driver's page to supported pages list */
2578 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2579 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2580 /* Caller requested driver's page */
2581 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2584 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2593 static void skd_process_scsi_inq(struct skd_device *skdev,
2594 volatile struct fit_completion_entry_v1
2596 volatile struct fit_comp_error_info *skerr,
2597 struct skd_special_context *skspcl)
2600 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2601 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2603 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2604 skspcl->req.sg_data_dir);
2605 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2608 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2611 static int skd_isr_completion_posted(struct skd_device *skdev,
2612 int limit, int *enqueued)
2614 volatile struct fit_completion_entry_v1 *skcmp = NULL;
2615 volatile struct fit_comp_error_info *skerr;
2618 struct skd_request_context *skreq;
2627 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2629 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2630 cmp_cycle = skcmp->cycle;
2631 cmp_cntxt = skcmp->tag;
2632 cmp_status = skcmp->status;
2633 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2635 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2637 pr_debug("%s:%s:%d "
2638 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2639 "busy=%d rbytes=0x%x proto=%d\n",
2640 skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
2641 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
2642 skdev->in_flight, cmp_bytes, skdev->proto_ver);
2644 if (cmp_cycle != skdev->skcomp_cycle) {
2645 pr_debug("%s:%s:%d end of completions\n",
2646 skdev->name, __func__, __LINE__);
2650 * Update the completion queue head index and possibly
2651 * the completion cycle count. 8-bit wrap-around.
2654 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2655 skdev->skcomp_ix = 0;
2656 skdev->skcomp_cycle++;
2660 * The command context is a unique 32-bit ID. The low order
2661 * bits help locate the request. The request is usually a
2662 * r/w request (see skd_start() above) or a special request.
2665 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2667 /* Is this other than a r/w request? */
2668 if (req_slot >= skdev->num_req_context) {
2670 * This is not a completion for a r/w request.
2672 skd_complete_other(skdev, skcmp, skerr);
2676 skreq = &skdev->skreq_table[req_slot];
2679 * Make sure the request ID for the slot matches.
2681 if (skreq->id != req_id) {
2682 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2683 skdev->name, __func__, __LINE__,
2686 u16 new_id = cmp_cntxt;
2687 pr_err("(%s): Completion mismatch "
2688 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2689 skd_name(skdev), req_id,
2696 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2698 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2699 pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2700 skdev->name, __func__, __LINE__,
2702 /* a previously timed out command can
2703 * now be cleaned up */
2704 skd_release_skreq(skdev, skreq);
2708 skreq->completion = *skcmp;
2709 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2710 skreq->err_info = *skerr;
2711 skd_log_check_status(skdev, cmp_status, skerr->key,
2712 skerr->code, skerr->qual,
2715 /* Release DMA resources for the request. */
2716 if (skreq->n_sg > 0)
2717 skd_postop_sg_list(skdev, skreq);
2720 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2721 "req=0x%x req_id=0x%x\n",
2722 skdev->name, __func__, __LINE__,
2723 skreq, skreq->id, req_id);
2726 * Capture the outcome and post it back to the
2729 if (likely(cmp_status == SAM_STAT_GOOD))
2730 skd_end_request(skdev, skreq, BLK_STS_OK);
2732 skd_resolve_req_exception(skdev, skreq);
2736 * Release the skreq, its FIT msg (if one), timeout slot,
2739 skd_release_skreq(skdev, skreq);
2741 /* skd_isr_comp_limit equal zero means no limit */
2743 if (++processed >= limit) {
2750 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2751 && (skdev->in_flight) == 0) {
2752 skdev->state = SKD_DRVR_STATE_PAUSED;
2753 wake_up_interruptible(&skdev->waitq);
2759 static void skd_complete_other(struct skd_device *skdev,
2760 volatile struct fit_completion_entry_v1 *skcomp,
2761 volatile struct fit_comp_error_info *skerr)
2766 struct skd_special_context *skspcl;
2768 req_id = skcomp->tag;
2769 req_table = req_id & SKD_ID_TABLE_MASK;
2770 req_slot = req_id & SKD_ID_SLOT_MASK;
2772 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2773 skdev->name, __func__, __LINE__,
2774 req_table, req_id, req_slot);
2777 * Based on the request id, determine how to dispatch this completion.
2778 * This swich/case is finding the good cases and forwarding the
2779 * completion entry. Errors are reported below the switch.
2781 switch (req_table) {
2782 case SKD_ID_RW_REQUEST:
2784 * The caller, skd_isr_completion_posted() above,
2785 * handles r/w requests. The only way we get here
2786 * is if the req_slot is out of bounds.
2790 case SKD_ID_SPECIAL_REQUEST:
2792 * Make sure the req_slot is in bounds and that the id
2795 if (req_slot < skdev->n_special) {
2796 skspcl = &skdev->skspcl_table[req_slot];
2797 if (skspcl->req.id == req_id &&
2798 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2799 skd_complete_special(skdev,
2800 skcomp, skerr, skspcl);
2806 case SKD_ID_INTERNAL:
2807 if (req_slot == 0) {
2808 skspcl = &skdev->internal_skspcl;
2809 if (skspcl->req.id == req_id &&
2810 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2811 skd_complete_internal(skdev,
2812 skcomp, skerr, skspcl);
2818 case SKD_ID_FIT_MSG:
2820 * These id's should never appear in a completion record.
2826 * These id's should never appear anywhere;
2832 * If we get here it is a bad or stale id.
2836 static void skd_complete_special(struct skd_device *skdev,
2837 volatile struct fit_completion_entry_v1
2839 volatile struct fit_comp_error_info *skerr,
2840 struct skd_special_context *skspcl)
2842 pr_debug("%s:%s:%d completing special request %p\n",
2843 skdev->name, __func__, __LINE__, skspcl);
2844 if (skspcl->orphaned) {
2845 /* Discard orphaned request */
2846 /* ?: Can this release directly or does it need
2847 * to use a worker? */
2848 pr_debug("%s:%s:%d release orphaned %p\n",
2849 skdev->name, __func__, __LINE__, skspcl);
2850 skd_release_special(skdev, skspcl);
2854 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2856 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2857 skspcl->req.completion = *skcomp;
2858 skspcl->req.err_info = *skerr;
2860 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2861 skerr->code, skerr->qual, skerr->fruc);
2863 wake_up_interruptible(&skdev->waitq);
2866 /* assume spinlock is already held */
2867 static void skd_release_special(struct skd_device *skdev,
2868 struct skd_special_context *skspcl)
2870 int i, was_depleted;
2872 for (i = 0; i < skspcl->req.n_sg; i++) {
2873 struct page *page = sg_page(&skspcl->req.sg[i]);
2877 was_depleted = (skdev->skspcl_free_list == NULL);
2879 skspcl->req.state = SKD_REQ_STATE_IDLE;
2880 skspcl->req.id += SKD_ID_INCR;
2882 (struct skd_request_context *)skdev->skspcl_free_list;
2883 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2886 pr_debug("%s:%s:%d skspcl was depleted\n",
2887 skdev->name, __func__, __LINE__);
2888 /* Free list was depleted. Their might be waiters. */
2889 wake_up_interruptible(&skdev->waitq);
2893 static void skd_reset_skcomp(struct skd_device *skdev)
2896 struct fit_completion_entry_v1 *skcomp;
2898 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2899 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2901 memset(skdev->skcomp_table, 0, nbytes);
2903 skdev->skcomp_ix = 0;
2904 skdev->skcomp_cycle = 1;
2908 *****************************************************************************
2910 *****************************************************************************
2912 static void skd_completion_worker(struct work_struct *work)
2914 struct skd_device *skdev =
2915 container_of(work, struct skd_device, completion_worker);
2916 unsigned long flags;
2917 int flush_enqueued = 0;
2919 spin_lock_irqsave(&skdev->lock, flags);
2922 * pass in limit=0, which means no limit..
2923 * process everything in compq
2925 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2926 skd_request_fn(skdev->queue);
2928 spin_unlock_irqrestore(&skdev->lock, flags);
2931 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2934 skd_isr(int irq, void *ptr)
2936 struct skd_device *skdev;
2941 int flush_enqueued = 0;
2943 skdev = (struct skd_device *)ptr;
2944 spin_lock(&skdev->lock);
2947 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2949 ack = FIT_INT_DEF_MASK;
2952 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2953 skdev->name, __func__, __LINE__, intstat, ack);
2955 /* As long as there is an int pending on device, keep
2956 * running loop. When none, get out, but if we've never
2957 * done any processing, call completion handler?
2960 /* No interrupts on device, but run the completion
2964 if (likely (skdev->state
2965 == SKD_DRVR_STATE_ONLINE))
2972 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2974 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2975 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2976 if (intstat & FIT_ISH_COMPLETION_POSTED) {
2978 * If we have already deferred completion
2979 * processing, don't bother running it again
2983 skd_isr_completion_posted(skdev,
2984 skd_isr_comp_limit, &flush_enqueued);
2987 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2988 skd_isr_fwstate(skdev);
2989 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2991 SKD_DRVR_STATE_DISAPPEARED) {
2992 spin_unlock(&skdev->lock);
2997 if (intstat & FIT_ISH_MSG_FROM_DEV)
2998 skd_isr_msg_from_dev(skdev);
3002 if (unlikely(flush_enqueued))
3003 skd_request_fn(skdev->queue);
3006 schedule_work(&skdev->completion_worker);
3007 else if (!flush_enqueued)
3008 skd_request_fn(skdev->queue);
3010 spin_unlock(&skdev->lock);
3015 static void skd_drive_fault(struct skd_device *skdev)
3017 skdev->state = SKD_DRVR_STATE_FAULT;
3018 pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3021 static void skd_drive_disappeared(struct skd_device *skdev)
3023 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3024 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3027 static void skd_isr_fwstate(struct skd_device *skdev)
3032 int prev_driver_state = skdev->state;
3034 sense = SKD_READL(skdev, FIT_STATUS);
3035 state = sense & FIT_SR_DRIVE_STATE_MASK;
3037 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3039 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3040 skd_drive_state_to_str(state), state);
3042 skdev->drive_state = state;
3044 switch (skdev->drive_state) {
3045 case FIT_SR_DRIVE_INIT:
3046 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3047 skd_disable_interrupts(skdev);
3050 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3051 skd_recover_requests(skdev, 0);
3052 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3053 skdev->timer_countdown = SKD_STARTING_TIMO;
3054 skdev->state = SKD_DRVR_STATE_STARTING;
3055 skd_soft_reset(skdev);
3058 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3059 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3060 skdev->last_mtd = mtd;
3063 case FIT_SR_DRIVE_ONLINE:
3064 skdev->cur_max_queue_depth = skd_max_queue_depth;
3065 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3066 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3068 skdev->queue_low_water_mark =
3069 skdev->cur_max_queue_depth * 2 / 3 + 1;
3070 if (skdev->queue_low_water_mark < 1)
3071 skdev->queue_low_water_mark = 1;
3072 pr_info("(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3074 skdev->cur_max_queue_depth,
3075 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3077 skd_refresh_device_data(skdev);
3080 case FIT_SR_DRIVE_BUSY:
3081 skdev->state = SKD_DRVR_STATE_BUSY;
3082 skdev->timer_countdown = SKD_BUSY_TIMO;
3083 skd_quiesce_dev(skdev);
3085 case FIT_SR_DRIVE_BUSY_SANITIZE:
3086 /* set timer for 3 seconds, we'll abort any unfinished
3087 * commands after that expires
3089 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3090 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3091 blk_start_queue(skdev->queue);
3093 case FIT_SR_DRIVE_BUSY_ERASE:
3094 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3095 skdev->timer_countdown = SKD_BUSY_TIMO;
3097 case FIT_SR_DRIVE_OFFLINE:
3098 skdev->state = SKD_DRVR_STATE_IDLE;
3100 case FIT_SR_DRIVE_SOFT_RESET:
3101 switch (skdev->state) {
3102 case SKD_DRVR_STATE_STARTING:
3103 case SKD_DRVR_STATE_RESTARTING:
3104 /* Expected by a caller of skd_soft_reset() */
3107 skdev->state = SKD_DRVR_STATE_RESTARTING;
3111 case FIT_SR_DRIVE_FW_BOOTING:
3112 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3113 skdev->name, __func__, __LINE__, skdev->name);
3114 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3115 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3118 case FIT_SR_DRIVE_DEGRADED:
3119 case FIT_SR_PCIE_LINK_DOWN:
3120 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3123 case FIT_SR_DRIVE_FAULT:
3124 skd_drive_fault(skdev);
3125 skd_recover_requests(skdev, 0);
3126 blk_start_queue(skdev->queue);
3129 /* PCIe bus returned all Fs? */
3131 pr_info("(%s): state=0x%x sense=0x%x\n",
3132 skd_name(skdev), state, sense);
3133 skd_drive_disappeared(skdev);
3134 skd_recover_requests(skdev, 0);
3135 blk_start_queue(skdev->queue);
3139 * Uknown FW State. Wait for a state we recognize.
3143 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3145 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3146 skd_skdev_state_to_str(skdev->state), skdev->state);
3149 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3153 for (i = 0; i < skdev->num_req_context; i++) {
3154 struct skd_request_context *skreq = &skdev->skreq_table[i];
3156 if (skreq->state == SKD_REQ_STATE_BUSY) {
3157 skd_log_skreq(skdev, skreq, "recover");
3159 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3160 SKD_ASSERT(skreq->req != NULL);
3162 /* Release DMA resources for the request. */
3163 if (skreq->n_sg > 0)
3164 skd_postop_sg_list(skdev, skreq);
3167 (unsigned long) ++skreq->req->special <
3169 blk_requeue_request(skdev->queue, skreq->req);
3171 skd_end_request(skdev, skreq, BLK_STS_IOERR);
3175 skreq->state = SKD_REQ_STATE_IDLE;
3176 skreq->id += SKD_ID_INCR;
3179 skreq[-1].next = skreq;
3182 skdev->skreq_free_list = skdev->skreq_table;
3184 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3185 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3187 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3188 skd_log_skmsg(skdev, skmsg, "salvaged");
3189 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3190 skmsg->state = SKD_MSG_STATE_IDLE;
3191 skmsg->id += SKD_ID_INCR;
3194 skmsg[-1].next = skmsg;
3197 skdev->skmsg_free_list = skdev->skmsg_table;
3199 for (i = 0; i < skdev->n_special; i++) {
3200 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3202 /* If orphaned, reclaim it because it has already been reported
3203 * to the process as an error (it was just waiting for
3204 * a completion that didn't come, and now it will never come)
3205 * If busy, change to a state that will cause it to error
3206 * out in the wait routine and let it do the normal
3207 * reporting and reclaiming
3209 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3210 if (skspcl->orphaned) {
3211 pr_debug("%s:%s:%d orphaned %p\n",
3212 skdev->name, __func__, __LINE__,
3214 skd_release_special(skdev, skspcl);
3216 pr_debug("%s:%s:%d not orphaned %p\n",
3217 skdev->name, __func__, __LINE__,
3219 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3223 skdev->skspcl_free_list = skdev->skspcl_table;
3225 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3226 skdev->timeout_slot[i] = 0;
3228 skdev->in_flight = 0;
3231 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3237 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3239 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3240 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
3242 /* ignore any mtd that is an ack for something we didn't send */
3243 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3246 switch (FIT_MXD_TYPE(mfd)) {
3247 case FIT_MTD_FITFW_INIT:
3248 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3250 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3251 pr_err("(%s): protocol mismatch\n",
3253 pr_err("(%s): got=%d support=%d\n",
3254 skdev->name, skdev->proto_ver,
3255 FIT_PROTOCOL_VERSION_1);
3256 pr_err("(%s): please upgrade driver\n",
3258 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3259 skd_soft_reset(skdev);
3262 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3263 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3264 skdev->last_mtd = mtd;
3267 case FIT_MTD_GET_CMDQ_DEPTH:
3268 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3269 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3270 SKD_N_COMPLETION_ENTRY);
3271 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3272 skdev->last_mtd = mtd;
3275 case FIT_MTD_SET_COMPQ_DEPTH:
3276 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3277 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3278 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3279 skdev->last_mtd = mtd;
3282 case FIT_MTD_SET_COMPQ_ADDR:
3283 skd_reset_skcomp(skdev);
3284 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3285 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3286 skdev->last_mtd = mtd;
3289 case FIT_MTD_CMD_LOG_HOST_ID:
3290 skdev->connect_time_stamp = get_seconds();
3291 data = skdev->connect_time_stamp & 0xFFFF;
3292 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3293 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3294 skdev->last_mtd = mtd;
3297 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3298 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3299 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3300 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3301 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3302 skdev->last_mtd = mtd;
3305 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3306 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3307 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3308 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3309 skdev->last_mtd = mtd;
3311 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3313 skdev->connect_time_stamp, skdev->drive_jiffies);
3316 case FIT_MTD_ARM_QUEUE:
3317 skdev->last_mtd = 0;
3319 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3328 static void skd_disable_interrupts(struct skd_device *skdev)
3332 sense = SKD_READL(skdev, FIT_CONTROL);
3333 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3334 SKD_WRITEL(skdev, sense, FIT_CONTROL);
3335 pr_debug("%s:%s:%d sense 0x%x\n",
3336 skdev->name, __func__, __LINE__, sense);
3338 /* Note that the 1s is written. A 1-bit means
3339 * disable, a 0 means enable.
3341 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3344 static void skd_enable_interrupts(struct skd_device *skdev)
3348 /* unmask interrupts first */
3349 val = FIT_ISH_FW_STATE_CHANGE +
3350 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3352 /* Note that the compliment of mask is written. A 1-bit means
3353 * disable, a 0 means enable. */
3354 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3355 pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3356 skdev->name, __func__, __LINE__, ~val);
3358 val = SKD_READL(skdev, FIT_CONTROL);
3359 val |= FIT_CR_ENABLE_INTERRUPTS;
3360 pr_debug("%s:%s:%d control=0x%x\n",
3361 skdev->name, __func__, __LINE__, val);
3362 SKD_WRITEL(skdev, val, FIT_CONTROL);
3366 *****************************************************************************
3367 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3368 *****************************************************************************
3371 static void skd_soft_reset(struct skd_device *skdev)
3375 val = SKD_READL(skdev, FIT_CONTROL);
3376 val |= (FIT_CR_SOFT_RESET);
3377 pr_debug("%s:%s:%d control=0x%x\n",
3378 skdev->name, __func__, __LINE__, val);
3379 SKD_WRITEL(skdev, val, FIT_CONTROL);
3382 static void skd_start_device(struct skd_device *skdev)
3384 unsigned long flags;
3388 spin_lock_irqsave(&skdev->lock, flags);
3390 /* ack all ghost interrupts */
3391 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3393 sense = SKD_READL(skdev, FIT_STATUS);
3395 pr_debug("%s:%s:%d initial status=0x%x\n",
3396 skdev->name, __func__, __LINE__, sense);
3398 state = sense & FIT_SR_DRIVE_STATE_MASK;
3399 skdev->drive_state = state;
3400 skdev->last_mtd = 0;
3402 skdev->state = SKD_DRVR_STATE_STARTING;
3403 skdev->timer_countdown = SKD_STARTING_TIMO;
3405 skd_enable_interrupts(skdev);
3407 switch (skdev->drive_state) {
3408 case FIT_SR_DRIVE_OFFLINE:
3409 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3412 case FIT_SR_DRIVE_FW_BOOTING:
3413 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3414 skdev->name, __func__, __LINE__, skdev->name);
3415 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3416 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3419 case FIT_SR_DRIVE_BUSY_SANITIZE:
3420 pr_info("(%s): Start: BUSY_SANITIZE\n",
3422 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3423 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3426 case FIT_SR_DRIVE_BUSY_ERASE:
3427 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3428 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3429 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3432 case FIT_SR_DRIVE_INIT:
3433 case FIT_SR_DRIVE_ONLINE:
3434 skd_soft_reset(skdev);
3437 case FIT_SR_DRIVE_BUSY:
3438 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3439 skdev->state = SKD_DRVR_STATE_BUSY;
3440 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3443 case FIT_SR_DRIVE_SOFT_RESET:
3444 pr_err("(%s) drive soft reset in prog\n",
3448 case FIT_SR_DRIVE_FAULT:
3449 /* Fault state is bad...soft reset won't do it...
3450 * Hard reset, maybe, but does it work on device?
3451 * For now, just fault so the system doesn't hang.
3453 skd_drive_fault(skdev);
3454 /*start the queue so we can respond with error to requests */
3455 pr_debug("%s:%s:%d starting %s queue\n",
3456 skdev->name, __func__, __LINE__, skdev->name);
3457 blk_start_queue(skdev->queue);
3458 skdev->gendisk_on = -1;
3459 wake_up_interruptible(&skdev->waitq);
3463 /* Most likely the device isn't there or isn't responding
3464 * to the BAR1 addresses. */
3465 skd_drive_disappeared(skdev);
3466 /*start the queue so we can respond with error to requests */
3467 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3468 skdev->name, __func__, __LINE__, skdev->name);
3469 blk_start_queue(skdev->queue);
3470 skdev->gendisk_on = -1;
3471 wake_up_interruptible(&skdev->waitq);
3475 pr_err("(%s) Start: unknown state %x\n",
3476 skd_name(skdev), skdev->drive_state);
3480 state = SKD_READL(skdev, FIT_CONTROL);
3481 pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3482 skdev->name, __func__, __LINE__, state);
3484 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3485 pr_debug("%s:%s:%d Intr Status=0x%x\n",
3486 skdev->name, __func__, __LINE__, state);
3488 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3489 pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3490 skdev->name, __func__, __LINE__, state);
3492 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3493 pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3494 skdev->name, __func__, __LINE__, state);
3496 state = SKD_READL(skdev, FIT_HW_VERSION);
3497 pr_debug("%s:%s:%d HW version=0x%x\n",
3498 skdev->name, __func__, __LINE__, state);
3500 spin_unlock_irqrestore(&skdev->lock, flags);
3503 static void skd_stop_device(struct skd_device *skdev)
3505 unsigned long flags;
3506 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3510 spin_lock_irqsave(&skdev->lock, flags);
3512 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3513 pr_err("(%s): skd_stop_device not online no sync\n",
3518 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3519 pr_err("(%s): skd_stop_device no special\n",
3524 skdev->state = SKD_DRVR_STATE_SYNCING;
3525 skdev->sync_done = 0;
3527 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3529 spin_unlock_irqrestore(&skdev->lock, flags);
3531 wait_event_interruptible_timeout(skdev->waitq,
3532 (skdev->sync_done), (10 * HZ));
3534 spin_lock_irqsave(&skdev->lock, flags);
3536 switch (skdev->sync_done) {
3538 pr_err("(%s): skd_stop_device no sync\n",
3542 pr_err("(%s): skd_stop_device sync done\n",
3546 pr_err("(%s): skd_stop_device sync error\n",
3551 skdev->state = SKD_DRVR_STATE_STOPPING;
3552 spin_unlock_irqrestore(&skdev->lock, flags);
3554 skd_kill_timer(skdev);
3556 spin_lock_irqsave(&skdev->lock, flags);
3557 skd_disable_interrupts(skdev);
3559 /* ensure all ints on device are cleared */
3560 /* soft reset the device to unload with a clean slate */
3561 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3562 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3564 spin_unlock_irqrestore(&skdev->lock, flags);
3566 /* poll every 100ms, 1 second timeout */
3567 for (i = 0; i < 10; i++) {
3569 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3570 if (dev_state == FIT_SR_DRIVE_INIT)
3572 set_current_state(TASK_INTERRUPTIBLE);
3573 schedule_timeout(msecs_to_jiffies(100));
3576 if (dev_state != FIT_SR_DRIVE_INIT)
3577 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3578 skd_name(skdev), dev_state);
3581 /* assume spinlock is held */
3582 static void skd_restart_device(struct skd_device *skdev)
3586 /* ack all ghost interrupts */
3587 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3589 state = SKD_READL(skdev, FIT_STATUS);
3591 pr_debug("%s:%s:%d drive status=0x%x\n",
3592 skdev->name, __func__, __LINE__, state);
3594 state &= FIT_SR_DRIVE_STATE_MASK;
3595 skdev->drive_state = state;
3596 skdev->last_mtd = 0;
3598 skdev->state = SKD_DRVR_STATE_RESTARTING;
3599 skdev->timer_countdown = SKD_RESTARTING_TIMO;
3601 skd_soft_reset(skdev);
3604 /* assume spinlock is held */
3605 static int skd_quiesce_dev(struct skd_device *skdev)
3609 switch (skdev->state) {
3610 case SKD_DRVR_STATE_BUSY:
3611 case SKD_DRVR_STATE_BUSY_IMMINENT:
3612 pr_debug("%s:%s:%d stopping %s queue\n",
3613 skdev->name, __func__, __LINE__, skdev->name);
3614 blk_stop_queue(skdev->queue);
3616 case SKD_DRVR_STATE_ONLINE:
3617 case SKD_DRVR_STATE_STOPPING:
3618 case SKD_DRVR_STATE_SYNCING:
3619 case SKD_DRVR_STATE_PAUSING:
3620 case SKD_DRVR_STATE_PAUSED:
3621 case SKD_DRVR_STATE_STARTING:
3622 case SKD_DRVR_STATE_RESTARTING:
3623 case SKD_DRVR_STATE_RESUMING:
3626 pr_debug("%s:%s:%d state [%d] not implemented\n",
3627 skdev->name, __func__, __LINE__, skdev->state);
3632 /* assume spinlock is held */
3633 static int skd_unquiesce_dev(struct skd_device *skdev)
3635 int prev_driver_state = skdev->state;
3637 skd_log_skdev(skdev, "unquiesce");
3638 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3639 pr_debug("%s:%s:%d **** device already ONLINE\n",
3640 skdev->name, __func__, __LINE__);
3643 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3645 * If there has been an state change to other than
3646 * ONLINE, we will rely on controller state change
3647 * to come back online and restart the queue.
3648 * The BUSY state means that driver is ready to
3649 * continue normal processing but waiting for controller
3650 * to become available.
3652 skdev->state = SKD_DRVR_STATE_BUSY;
3653 pr_debug("%s:%s:%d drive BUSY state\n",
3654 skdev->name, __func__, __LINE__);
3659 * Drive has just come online, driver is either in startup,
3660 * paused performing a task, or bust waiting for hardware.
3662 switch (skdev->state) {
3663 case SKD_DRVR_STATE_PAUSED:
3664 case SKD_DRVR_STATE_BUSY:
3665 case SKD_DRVR_STATE_BUSY_IMMINENT:
3666 case SKD_DRVR_STATE_BUSY_ERASE:
3667 case SKD_DRVR_STATE_STARTING:
3668 case SKD_DRVR_STATE_RESTARTING:
3669 case SKD_DRVR_STATE_FAULT:
3670 case SKD_DRVR_STATE_IDLE:
3671 case SKD_DRVR_STATE_LOAD:
3672 skdev->state = SKD_DRVR_STATE_ONLINE;
3673 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3675 skd_skdev_state_to_str(prev_driver_state),
3676 prev_driver_state, skd_skdev_state_to_str(skdev->state),
3678 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3679 skdev->name, __func__, __LINE__);
3680 pr_debug("%s:%s:%d starting %s queue\n",
3681 skdev->name, __func__, __LINE__, skdev->name);
3682 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
3683 blk_start_queue(skdev->queue);
3684 skdev->gendisk_on = 1;
3685 wake_up_interruptible(&skdev->waitq);
3688 case SKD_DRVR_STATE_DISAPPEARED:
3690 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3691 skdev->name, __func__, __LINE__,
3699 *****************************************************************************
3700 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3701 *****************************************************************************
3704 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3706 struct skd_device *skdev = skd_host_data;
3707 unsigned long flags;
3709 spin_lock_irqsave(&skdev->lock, flags);
3710 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3711 skdev->name, __func__, __LINE__,
3712 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3713 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
3714 irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
3715 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3716 spin_unlock_irqrestore(&skdev->lock, flags);
3720 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3722 struct skd_device *skdev = skd_host_data;
3723 unsigned long flags;
3725 spin_lock_irqsave(&skdev->lock, flags);
3726 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3727 skdev->name, __func__, __LINE__,
3728 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3729 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3730 skd_isr_fwstate(skdev);
3731 spin_unlock_irqrestore(&skdev->lock, flags);
3735 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3737 struct skd_device *skdev = skd_host_data;
3738 unsigned long flags;
3739 int flush_enqueued = 0;
3742 spin_lock_irqsave(&skdev->lock, flags);
3743 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3744 skdev->name, __func__, __LINE__,
3745 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3746 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3747 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3750 skd_request_fn(skdev->queue);
3753 schedule_work(&skdev->completion_worker);
3754 else if (!flush_enqueued)
3755 skd_request_fn(skdev->queue);
3757 spin_unlock_irqrestore(&skdev->lock, flags);
3762 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3764 struct skd_device *skdev = skd_host_data;
3765 unsigned long flags;
3767 spin_lock_irqsave(&skdev->lock, flags);
3768 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3769 skdev->name, __func__, __LINE__,
3770 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3771 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3772 skd_isr_msg_from_dev(skdev);
3773 spin_unlock_irqrestore(&skdev->lock, flags);
3777 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3779 struct skd_device *skdev = skd_host_data;
3780 unsigned long flags;
3782 spin_lock_irqsave(&skdev->lock, flags);
3783 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3784 skdev->name, __func__, __LINE__,
3785 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3786 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3787 spin_unlock_irqrestore(&skdev->lock, flags);
3792 *****************************************************************************
3793 * PCIe MSI/MSI-X SETUP
3794 *****************************************************************************
3797 struct skd_msix_entry {
3801 struct skd_init_msix_entry {
3803 irq_handler_t handler;
3806 #define SKD_MAX_MSIX_COUNT 13
3807 #define SKD_MIN_MSIX_COUNT 7
3808 #define SKD_BASE_MSIX_IRQ 4
3810 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3811 { "(DMA 0)", skd_reserved_isr },
3812 { "(DMA 1)", skd_reserved_isr },
3813 { "(DMA 2)", skd_reserved_isr },
3814 { "(DMA 3)", skd_reserved_isr },
3815 { "(State Change)", skd_statec_isr },
3816 { "(COMPL_Q)", skd_comp_q },
3817 { "(MSG)", skd_msg_isr },
3818 { "(Reserved)", skd_reserved_isr },
3819 { "(Reserved)", skd_reserved_isr },
3820 { "(Queue Full 0)", skd_qfull_isr },
3821 { "(Queue Full 1)", skd_qfull_isr },
3822 { "(Queue Full 2)", skd_qfull_isr },
3823 { "(Queue Full 3)", skd_qfull_isr },
3826 static int skd_acquire_msix(struct skd_device *skdev)
3829 struct pci_dev *pdev = skdev->pdev;
3831 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3834 pr_err("(%s): failed to enable MSI-X %d\n",
3835 skd_name(skdev), rc);
3839 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3840 sizeof(struct skd_msix_entry), GFP_KERNEL);
3841 if (!skdev->msix_entries) {
3843 pr_err("(%s): msix table allocation error\n",
3848 /* Enable MSI-X vectors for the base queue */
3849 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3850 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3852 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3853 "%s%d-msix %s", DRV_NAME, skdev->devno,
3854 msix_entries[i].name);
3856 rc = devm_request_irq(&skdev->pdev->dev,
3857 pci_irq_vector(skdev->pdev, i),
3858 msix_entries[i].handler, 0,
3859 qentry->isr_name, skdev);
3861 pr_err("(%s): Unable to register(%d) MSI-X "
3863 skd_name(skdev), rc, i, qentry->isr_name);
3868 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3869 skdev->name, __func__, __LINE__,
3870 pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
3875 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3877 kfree(skdev->msix_entries);
3878 skdev->msix_entries = NULL;
3882 static int skd_acquire_irq(struct skd_device *skdev)
3884 struct pci_dev *pdev = skdev->pdev;
3885 unsigned int irq_flag = PCI_IRQ_LEGACY;
3888 if (skd_isr_type == SKD_IRQ_MSIX) {
3889 rc = skd_acquire_msix(skdev);
3893 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3894 skd_name(skdev), rc);
3897 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3900 if (skd_isr_type != SKD_IRQ_LEGACY)
3901 irq_flag |= PCI_IRQ_MSI;
3902 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3904 pr_err("(%s): failed to allocate the MSI interrupt %d\n",
3905 skd_name(skdev), rc);
3909 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3910 pdev->msi_enabled ? 0 : IRQF_SHARED,
3911 skdev->isr_name, skdev);
3913 pci_free_irq_vectors(pdev);
3914 pr_err("(%s): failed to allocate interrupt %d\n",
3915 skd_name(skdev), rc);
3922 static void skd_release_irq(struct skd_device *skdev)
3924 struct pci_dev *pdev = skdev->pdev;
3926 if (skdev->msix_entries) {
3929 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3930 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3934 kfree(skdev->msix_entries);
3935 skdev->msix_entries = NULL;
3937 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3940 pci_free_irq_vectors(pdev);
3944 *****************************************************************************
3946 *****************************************************************************
3949 static int skd_cons_skcomp(struct skd_device *skdev)
3952 struct fit_completion_entry_v1 *skcomp;
3955 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3956 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3958 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
3959 skdev->name, __func__, __LINE__,
3960 nbytes, SKD_N_COMPLETION_ENTRY);
3962 skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
3963 &skdev->cq_dma_address);
3965 if (skcomp == NULL) {
3970 skdev->skcomp_table = skcomp;
3971 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3973 SKD_N_COMPLETION_ENTRY);
3979 static int skd_cons_skmsg(struct skd_device *skdev)
3984 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3985 skdev->name, __func__, __LINE__,
3986 sizeof(struct skd_fitmsg_context),
3987 skdev->num_fitmsg_context,
3988 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
3990 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
3991 *skdev->num_fitmsg_context, GFP_KERNEL);
3992 if (skdev->skmsg_table == NULL) {
3997 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3998 struct skd_fitmsg_context *skmsg;
4000 skmsg = &skdev->skmsg_table[i];
4002 skmsg->id = i + SKD_ID_FIT_MSG;
4004 skmsg->state = SKD_MSG_STATE_IDLE;
4005 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4006 SKD_N_FITMSG_BYTES + 64,
4007 &skmsg->mb_dma_address);
4009 if (skmsg->msg_buf == NULL) {
4014 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4015 (~FIT_QCMD_BASE_ADDRESS_MASK));
4016 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4017 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4018 FIT_QCMD_BASE_ADDRESS_MASK);
4019 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4020 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4021 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4023 skmsg->next = &skmsg[1];
4026 /* Free list is in order starting with the 0th entry. */
4027 skdev->skmsg_table[i - 1].next = NULL;
4028 skdev->skmsg_free_list = skdev->skmsg_table;
4034 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4036 dma_addr_t *ret_dma_addr)
4038 struct fit_sg_descriptor *sg_list;
4041 nbytes = sizeof(*sg_list) * n_sg;
4043 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4045 if (sg_list != NULL) {
4046 uint64_t dma_address = *ret_dma_addr;
4049 memset(sg_list, 0, nbytes);
4051 for (i = 0; i < n_sg - 1; i++) {
4053 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4055 sg_list[i].next_desc_ptr = dma_address + ndp_off;
4057 sg_list[i].next_desc_ptr = 0LL;
4063 static int skd_cons_skreq(struct skd_device *skdev)
4068 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4069 skdev->name, __func__, __LINE__,
4070 sizeof(struct skd_request_context),
4071 skdev->num_req_context,
4072 sizeof(struct skd_request_context) * skdev->num_req_context);
4074 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4075 * skdev->num_req_context, GFP_KERNEL);
4076 if (skdev->skreq_table == NULL) {
4081 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4082 skdev->name, __func__, __LINE__,
4083 skdev->sgs_per_request, sizeof(struct scatterlist),
4084 skdev->sgs_per_request * sizeof(struct scatterlist));
4086 for (i = 0; i < skdev->num_req_context; i++) {
4087 struct skd_request_context *skreq;
4089 skreq = &skdev->skreq_table[i];
4091 skreq->id = i + SKD_ID_RW_REQUEST;
4092 skreq->state = SKD_REQ_STATE_IDLE;
4094 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4095 skdev->sgs_per_request, GFP_KERNEL);
4096 if (skreq->sg == NULL) {
4100 sg_init_table(skreq->sg, skdev->sgs_per_request);
4102 skreq->sksg_list = skd_cons_sg_list(skdev,
4103 skdev->sgs_per_request,
4104 &skreq->sksg_dma_address);
4106 if (skreq->sksg_list == NULL) {
4111 skreq->next = &skreq[1];
4114 /* Free list is in order starting with the 0th entry. */
4115 skdev->skreq_table[i - 1].next = NULL;
4116 skdev->skreq_free_list = skdev->skreq_table;
4122 static int skd_cons_skspcl(struct skd_device *skdev)
4127 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4128 skdev->name, __func__, __LINE__,
4129 sizeof(struct skd_special_context),
4131 sizeof(struct skd_special_context) * skdev->n_special);
4133 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4134 * skdev->n_special, GFP_KERNEL);
4135 if (skdev->skspcl_table == NULL) {
4140 for (i = 0; i < skdev->n_special; i++) {
4141 struct skd_special_context *skspcl;
4143 skspcl = &skdev->skspcl_table[i];
4145 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4146 skspcl->req.state = SKD_REQ_STATE_IDLE;
4148 skspcl->req.next = &skspcl[1].req;
4150 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4153 pci_zalloc_consistent(skdev->pdev, nbytes,
4154 &skspcl->mb_dma_address);
4155 if (skspcl->msg_buf == NULL) {
4160 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4161 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4162 if (skspcl->req.sg == NULL) {
4167 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4168 SKD_N_SG_PER_SPECIAL,
4171 if (skspcl->req.sksg_list == NULL) {
4177 /* Free list is in order starting with the 0th entry. */
4178 skdev->skspcl_table[i - 1].req.next = NULL;
4179 skdev->skspcl_free_list = skdev->skspcl_table;
4187 static int skd_cons_sksb(struct skd_device *skdev)
4190 struct skd_special_context *skspcl;
4193 skspcl = &skdev->internal_skspcl;
4195 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4196 skspcl->req.state = SKD_REQ_STATE_IDLE;
4198 nbytes = SKD_N_INTERNAL_BYTES;
4200 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4201 &skspcl->db_dma_address);
4202 if (skspcl->data_buf == NULL) {
4207 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4208 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4209 &skspcl->mb_dma_address);
4210 if (skspcl->msg_buf == NULL) {
4215 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4216 &skspcl->req.sksg_dma_address);
4217 if (skspcl->req.sksg_list == NULL) {
4222 if (!skd_format_internal_skspcl(skdev)) {
4231 static int skd_cons_disk(struct skd_device *skdev)
4234 struct gendisk *disk;
4235 struct request_queue *q;
4236 unsigned long flags;
4238 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4245 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4247 disk->major = skdev->major;
4248 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4249 disk->fops = &skd_blockdev_ops;
4250 disk->private_data = skdev;
4252 q = blk_init_queue(skd_request_fn, &skdev->lock);
4257 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
4261 q->queuedata = skdev;
4263 blk_queue_write_cache(q, true, true);
4264 blk_queue_max_segments(q, skdev->sgs_per_request);
4265 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4267 /* set optimal I/O size to 8KB */
4268 blk_queue_io_opt(q, 8192);
4270 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4271 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4273 spin_lock_irqsave(&skdev->lock, flags);
4274 pr_debug("%s:%s:%d stopping %s queue\n",
4275 skdev->name, __func__, __LINE__, skdev->name);
4276 blk_stop_queue(skdev->queue);
4277 spin_unlock_irqrestore(&skdev->lock, flags);
4283 #define SKD_N_DEV_TABLE 16u
4284 static u32 skd_next_devno;
4286 static struct skd_device *skd_construct(struct pci_dev *pdev)
4288 struct skd_device *skdev;
4289 int blk_major = skd_major;
4292 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4295 pr_err(PFX "(%s): memory alloc failure\n",
4300 skdev->state = SKD_DRVR_STATE_LOAD;
4302 skdev->devno = skd_next_devno++;
4303 skdev->major = blk_major;
4304 sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4305 skdev->dev_max_queue_depth = 0;
4307 skdev->num_req_context = skd_max_queue_depth;
4308 skdev->num_fitmsg_context = skd_max_queue_depth;
4309 skdev->n_special = skd_max_pass_thru;
4310 skdev->cur_max_queue_depth = 1;
4311 skdev->queue_low_water_mark = 1;
4312 skdev->proto_ver = 99;
4313 skdev->sgs_per_request = skd_sgs_per_request;
4314 skdev->dbg_level = skd_dbg_level;
4316 atomic_set(&skdev->device_count, 0);
4318 spin_lock_init(&skdev->lock);
4320 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4322 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4323 rc = skd_cons_skcomp(skdev);
4327 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4328 rc = skd_cons_skmsg(skdev);
4332 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4333 rc = skd_cons_skreq(skdev);
4337 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4338 rc = skd_cons_skspcl(skdev);
4342 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4343 rc = skd_cons_sksb(skdev);
4347 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4348 rc = skd_cons_disk(skdev);
4352 pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
4356 pr_debug("%s:%s:%d construct failed\n",
4357 skdev->name, __func__, __LINE__);
4358 skd_destruct(skdev);
4363 *****************************************************************************
4365 *****************************************************************************
4368 static void skd_free_skcomp(struct skd_device *skdev)
4370 if (skdev->skcomp_table != NULL) {
4373 nbytes = sizeof(skdev->skcomp_table[0]) *
4374 SKD_N_COMPLETION_ENTRY;
4375 pci_free_consistent(skdev->pdev, nbytes,
4376 skdev->skcomp_table, skdev->cq_dma_address);
4379 skdev->skcomp_table = NULL;
4380 skdev->cq_dma_address = 0;
4383 static void skd_free_skmsg(struct skd_device *skdev)
4387 if (skdev->skmsg_table == NULL)
4390 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4391 struct skd_fitmsg_context *skmsg;
4393 skmsg = &skdev->skmsg_table[i];
4395 if (skmsg->msg_buf != NULL) {
4396 skmsg->msg_buf += skmsg->offset;
4397 skmsg->mb_dma_address += skmsg->offset;
4398 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4400 skmsg->mb_dma_address);
4402 skmsg->msg_buf = NULL;
4403 skmsg->mb_dma_address = 0;
4406 kfree(skdev->skmsg_table);
4407 skdev->skmsg_table = NULL;
4410 static void skd_free_sg_list(struct skd_device *skdev,
4411 struct fit_sg_descriptor *sg_list,
4412 u32 n_sg, dma_addr_t dma_addr)
4414 if (sg_list != NULL) {
4417 nbytes = sizeof(*sg_list) * n_sg;
4419 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4423 static void skd_free_skreq(struct skd_device *skdev)
4427 if (skdev->skreq_table == NULL)
4430 for (i = 0; i < skdev->num_req_context; i++) {
4431 struct skd_request_context *skreq;
4433 skreq = &skdev->skreq_table[i];
4435 skd_free_sg_list(skdev, skreq->sksg_list,
4436 skdev->sgs_per_request,
4437 skreq->sksg_dma_address);
4439 skreq->sksg_list = NULL;
4440 skreq->sksg_dma_address = 0;
4445 kfree(skdev->skreq_table);
4446 skdev->skreq_table = NULL;
4449 static void skd_free_skspcl(struct skd_device *skdev)
4454 if (skdev->skspcl_table == NULL)
4457 for (i = 0; i < skdev->n_special; i++) {
4458 struct skd_special_context *skspcl;
4460 skspcl = &skdev->skspcl_table[i];
4462 if (skspcl->msg_buf != NULL) {
4463 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4464 pci_free_consistent(skdev->pdev, nbytes,
4466 skspcl->mb_dma_address);
4469 skspcl->msg_buf = NULL;
4470 skspcl->mb_dma_address = 0;
4472 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4473 SKD_N_SG_PER_SPECIAL,
4474 skspcl->req.sksg_dma_address);
4476 skspcl->req.sksg_list = NULL;
4477 skspcl->req.sksg_dma_address = 0;
4479 kfree(skspcl->req.sg);
4482 kfree(skdev->skspcl_table);
4483 skdev->skspcl_table = NULL;
4486 static void skd_free_sksb(struct skd_device *skdev)
4488 struct skd_special_context *skspcl;
4491 skspcl = &skdev->internal_skspcl;
4493 if (skspcl->data_buf != NULL) {
4494 nbytes = SKD_N_INTERNAL_BYTES;
4496 pci_free_consistent(skdev->pdev, nbytes,
4497 skspcl->data_buf, skspcl->db_dma_address);
4500 skspcl->data_buf = NULL;
4501 skspcl->db_dma_address = 0;
4503 if (skspcl->msg_buf != NULL) {
4504 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4505 pci_free_consistent(skdev->pdev, nbytes,
4506 skspcl->msg_buf, skspcl->mb_dma_address);
4509 skspcl->msg_buf = NULL;
4510 skspcl->mb_dma_address = 0;
4512 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4513 skspcl->req.sksg_dma_address);
4515 skspcl->req.sksg_list = NULL;
4516 skspcl->req.sksg_dma_address = 0;
4519 static void skd_free_disk(struct skd_device *skdev)
4521 struct gendisk *disk = skdev->disk;
4523 if (disk && (disk->flags & GENHD_FL_UP))
4527 blk_cleanup_queue(skdev->queue);
4528 skdev->queue = NULL;
4536 static void skd_destruct(struct skd_device *skdev)
4541 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
4542 skd_free_disk(skdev);
4544 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
4545 skd_free_sksb(skdev);
4547 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
4548 skd_free_skspcl(skdev);
4550 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
4551 skd_free_skreq(skdev);
4553 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
4554 skd_free_skmsg(skdev);
4556 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4557 skd_free_skcomp(skdev);
4559 pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
4564 *****************************************************************************
4565 * BLOCK DEVICE (BDEV) GLUE
4566 *****************************************************************************
4569 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4571 struct skd_device *skdev;
4574 skdev = bdev->bd_disk->private_data;
4576 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4577 skdev->name, __func__, __LINE__,
4578 bdev->bd_disk->disk_name, current->comm);
4580 if (skdev->read_cap_is_valid) {
4581 capacity = get_capacity(skdev->disk);
4584 geo->cylinders = (capacity) / (255 * 64);
4591 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4593 pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
4594 device_add_disk(parent, skdev->disk);
4598 static const struct block_device_operations skd_blockdev_ops = {
4599 .owner = THIS_MODULE,
4600 .ioctl = skd_bdev_ioctl,
4601 .getgeo = skd_bdev_getgeo,
4605 *****************************************************************************
4607 *****************************************************************************
4610 static const struct pci_device_id skd_pci_tbl[] = {
4611 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4612 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4613 { 0 } /* terminate list */
4616 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4618 static char *skd_pci_info(struct skd_device *skdev, char *str)
4622 strcpy(str, "PCIe (");
4623 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4628 uint16_t pcie_lstat, lspeed, lwidth;
4631 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4632 lspeed = pcie_lstat & (0xF);
4633 lwidth = (pcie_lstat & 0x3F0) >> 4;
4636 strcat(str, "2.5GT/s ");
4637 else if (lspeed == 2)
4638 strcat(str, "5.0GT/s ");
4640 strcat(str, "<unknown> ");
4641 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4647 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4652 struct skd_device *skdev;
4654 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4655 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4656 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4657 pci_name(pdev), pdev->vendor, pdev->device);
4659 rc = pci_enable_device(pdev);
4662 rc = pci_request_regions(pdev, DRV_NAME);
4665 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4667 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4669 pr_err("(%s): consistent DMA mask error %d\n",
4670 pci_name(pdev), rc);
4673 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
4676 pr_err("(%s): DMA mask error %d\n",
4677 pci_name(pdev), rc);
4678 goto err_out_regions;
4683 rc = register_blkdev(0, DRV_NAME);
4685 goto err_out_regions;
4690 skdev = skd_construct(pdev);
4691 if (skdev == NULL) {
4693 goto err_out_regions;
4696 skd_pci_info(skdev, pci_str);
4697 pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
4699 pci_set_master(pdev);
4700 rc = pci_enable_pcie_error_reporting(pdev);
4702 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4703 skd_name(skdev), rc);
4704 skdev->pcie_error_reporting_is_enabled = 0;
4706 skdev->pcie_error_reporting_is_enabled = 1;
4708 pci_set_drvdata(pdev, skdev);
4710 for (i = 0; i < SKD_MAX_BARS; i++) {
4711 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4712 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4713 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4714 skdev->mem_size[i]);
4715 if (!skdev->mem_map[i]) {
4716 pr_err("(%s): Unable to map adapter memory!\n",
4719 goto err_out_iounmap;
4721 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4722 skdev->name, __func__, __LINE__,
4724 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4727 rc = skd_acquire_irq(skdev);
4729 pr_err("(%s): interrupt resource error %d\n",
4730 skd_name(skdev), rc);
4731 goto err_out_iounmap;
4734 rc = skd_start_timer(skdev);
4738 init_waitqueue_head(&skdev->waitq);
4740 skd_start_device(skdev);
4742 rc = wait_event_interruptible_timeout(skdev->waitq,
4743 (skdev->gendisk_on),
4744 (SKD_START_WAIT_SECONDS * HZ));
4745 if (skdev->gendisk_on > 0) {
4746 /* device came on-line after reset */
4747 skd_bdev_attach(&pdev->dev, skdev);
4750 /* we timed out, something is wrong with the device,
4751 don't add the disk structure */
4752 pr_err("(%s): error: waiting for s1120 timed out %d!\n",
4753 skd_name(skdev), rc);
4754 /* in case of no error; we timeout with ENXIO */
4763 skd_stop_device(skdev);
4764 skd_release_irq(skdev);
4767 for (i = 0; i < SKD_MAX_BARS; i++)
4768 if (skdev->mem_map[i])
4769 iounmap(skdev->mem_map[i]);
4771 if (skdev->pcie_error_reporting_is_enabled)
4772 pci_disable_pcie_error_reporting(pdev);
4774 skd_destruct(skdev);
4777 pci_release_regions(pdev);
4780 pci_disable_device(pdev);
4781 pci_set_drvdata(pdev, NULL);
4785 static void skd_pci_remove(struct pci_dev *pdev)
4788 struct skd_device *skdev;
4790 skdev = pci_get_drvdata(pdev);
4792 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4795 skd_stop_device(skdev);
4796 skd_release_irq(skdev);
4798 for (i = 0; i < SKD_MAX_BARS; i++)
4799 if (skdev->mem_map[i])
4800 iounmap((u32 *)skdev->mem_map[i]);
4802 if (skdev->pcie_error_reporting_is_enabled)
4803 pci_disable_pcie_error_reporting(pdev);
4805 skd_destruct(skdev);
4807 pci_release_regions(pdev);
4808 pci_disable_device(pdev);
4809 pci_set_drvdata(pdev, NULL);
4814 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4817 struct skd_device *skdev;
4819 skdev = pci_get_drvdata(pdev);
4821 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4825 skd_stop_device(skdev);
4827 skd_release_irq(skdev);
4829 for (i = 0; i < SKD_MAX_BARS; i++)
4830 if (skdev->mem_map[i])
4831 iounmap((u32 *)skdev->mem_map[i]);
4833 if (skdev->pcie_error_reporting_is_enabled)
4834 pci_disable_pcie_error_reporting(pdev);
4836 pci_release_regions(pdev);
4837 pci_save_state(pdev);
4838 pci_disable_device(pdev);
4839 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4843 static int skd_pci_resume(struct pci_dev *pdev)
4847 struct skd_device *skdev;
4849 skdev = pci_get_drvdata(pdev);
4851 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4855 pci_set_power_state(pdev, PCI_D0);
4856 pci_enable_wake(pdev, PCI_D0, 0);
4857 pci_restore_state(pdev);
4859 rc = pci_enable_device(pdev);
4862 rc = pci_request_regions(pdev, DRV_NAME);
4865 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4867 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4869 pr_err("(%s): consistent DMA mask error %d\n",
4870 pci_name(pdev), rc);
4873 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4876 pr_err("(%s): DMA mask error %d\n",
4877 pci_name(pdev), rc);
4878 goto err_out_regions;
4882 pci_set_master(pdev);
4883 rc = pci_enable_pcie_error_reporting(pdev);
4885 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4887 skdev->pcie_error_reporting_is_enabled = 0;
4889 skdev->pcie_error_reporting_is_enabled = 1;
4891 for (i = 0; i < SKD_MAX_BARS; i++) {
4893 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4894 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4895 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4896 skdev->mem_size[i]);
4897 if (!skdev->mem_map[i]) {
4898 pr_err("(%s): Unable to map adapter memory!\n",
4901 goto err_out_iounmap;
4903 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4904 skdev->name, __func__, __LINE__,
4906 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
4908 rc = skd_acquire_irq(skdev);
4911 pr_err("(%s): interrupt resource error %d\n",
4912 pci_name(pdev), rc);
4913 goto err_out_iounmap;
4916 rc = skd_start_timer(skdev);
4920 init_waitqueue_head(&skdev->waitq);
4922 skd_start_device(skdev);
4927 skd_stop_device(skdev);
4928 skd_release_irq(skdev);
4931 for (i = 0; i < SKD_MAX_BARS; i++)
4932 if (skdev->mem_map[i])
4933 iounmap(skdev->mem_map[i]);
4935 if (skdev->pcie_error_reporting_is_enabled)
4936 pci_disable_pcie_error_reporting(pdev);
4939 pci_release_regions(pdev);
4942 pci_disable_device(pdev);
4946 static void skd_pci_shutdown(struct pci_dev *pdev)
4948 struct skd_device *skdev;
4950 pr_err("skd_pci_shutdown called\n");
4952 skdev = pci_get_drvdata(pdev);
4954 pr_err("%s: no device data for PCI\n", pci_name(pdev));
4958 pr_err("%s: calling stop\n", skd_name(skdev));
4959 skd_stop_device(skdev);
4962 static struct pci_driver skd_driver = {
4964 .id_table = skd_pci_tbl,
4965 .probe = skd_pci_probe,
4966 .remove = skd_pci_remove,
4967 .suspend = skd_pci_suspend,
4968 .resume = skd_pci_resume,
4969 .shutdown = skd_pci_shutdown,
4973 *****************************************************************************
4975 *****************************************************************************
4978 static const char *skd_name(struct skd_device *skdev)
4980 memset(skdev->id_str, 0, sizeof(skdev->id_str));
4982 if (skdev->inquiry_is_valid)
4983 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
4984 skdev->name, skdev->inq_serial_num,
4985 pci_name(skdev->pdev));
4987 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
4988 skdev->name, pci_name(skdev->pdev));
4990 return skdev->id_str;
4993 const char *skd_drive_state_to_str(int state)
4996 case FIT_SR_DRIVE_OFFLINE:
4998 case FIT_SR_DRIVE_INIT:
5000 case FIT_SR_DRIVE_ONLINE:
5002 case FIT_SR_DRIVE_BUSY:
5004 case FIT_SR_DRIVE_FAULT:
5006 case FIT_SR_DRIVE_DEGRADED:
5008 case FIT_SR_PCIE_LINK_DOWN:
5010 case FIT_SR_DRIVE_SOFT_RESET:
5011 return "SOFT_RESET";
5012 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5014 case FIT_SR_DRIVE_INIT_FAULT:
5015 return "INIT_FAULT";
5016 case FIT_SR_DRIVE_BUSY_SANITIZE:
5017 return "BUSY_SANITIZE";
5018 case FIT_SR_DRIVE_BUSY_ERASE:
5019 return "BUSY_ERASE";
5020 case FIT_SR_DRIVE_FW_BOOTING:
5021 return "FW_BOOTING";
5027 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5030 case SKD_DRVR_STATE_LOAD:
5032 case SKD_DRVR_STATE_IDLE:
5034 case SKD_DRVR_STATE_BUSY:
5036 case SKD_DRVR_STATE_STARTING:
5038 case SKD_DRVR_STATE_ONLINE:
5040 case SKD_DRVR_STATE_PAUSING:
5042 case SKD_DRVR_STATE_PAUSED:
5044 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5045 return "DRAINING_TIMEOUT";
5046 case SKD_DRVR_STATE_RESTARTING:
5047 return "RESTARTING";
5048 case SKD_DRVR_STATE_RESUMING:
5050 case SKD_DRVR_STATE_STOPPING:
5052 case SKD_DRVR_STATE_SYNCING:
5054 case SKD_DRVR_STATE_FAULT:
5056 case SKD_DRVR_STATE_DISAPPEARED:
5057 return "DISAPPEARED";
5058 case SKD_DRVR_STATE_BUSY_ERASE:
5059 return "BUSY_ERASE";
5060 case SKD_DRVR_STATE_BUSY_SANITIZE:
5061 return "BUSY_SANITIZE";
5062 case SKD_DRVR_STATE_BUSY_IMMINENT:
5063 return "BUSY_IMMINENT";
5064 case SKD_DRVR_STATE_WAIT_BOOT:
5072 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5075 case SKD_MSG_STATE_IDLE:
5077 case SKD_MSG_STATE_BUSY:
5084 static const char *skd_skreq_state_to_str(enum skd_req_state state)
5087 case SKD_REQ_STATE_IDLE:
5089 case SKD_REQ_STATE_SETUP:
5091 case SKD_REQ_STATE_BUSY:
5093 case SKD_REQ_STATE_COMPLETED:
5095 case SKD_REQ_STATE_TIMEOUT:
5097 case SKD_REQ_STATE_ABORTED:
5104 static void skd_log_skdev(struct skd_device *skdev, const char *event)
5106 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5107 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5108 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
5109 skdev->name, __func__, __LINE__,
5110 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5111 skd_skdev_state_to_str(skdev->state), skdev->state);
5112 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
5113 skdev->name, __func__, __LINE__,
5114 skdev->in_flight, skdev->cur_max_queue_depth,
5115 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5116 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
5117 skdev->name, __func__, __LINE__,
5118 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
5121 static void skd_log_skmsg(struct skd_device *skdev,
5122 struct skd_fitmsg_context *skmsg, const char *event)
5124 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5125 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5126 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
5127 skdev->name, __func__, __LINE__,
5128 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5129 skmsg->id, skmsg->length);
5132 static void skd_log_skreq(struct skd_device *skdev,
5133 struct skd_request_context *skreq, const char *event)
5135 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5136 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5137 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5138 skdev->name, __func__, __LINE__,
5139 skd_skreq_state_to_str(skreq->state), skreq->state,
5140 skreq->id, skreq->fitmsg_id);
5141 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
5142 skdev->name, __func__, __LINE__,
5143 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5145 if (skreq->req != NULL) {
5146 struct request *req = skreq->req;
5147 u32 lba = (u32)blk_rq_pos(req);
5148 u32 count = blk_rq_sectors(req);
5150 pr_debug("%s:%s:%d "
5151 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5152 skdev->name, __func__, __LINE__,
5153 req, lba, lba, count, count,
5154 (int)rq_data_dir(req));
5156 pr_debug("%s:%s:%d req=NULL\n",
5157 skdev->name, __func__, __LINE__);
5161 *****************************************************************************
5163 *****************************************************************************
5166 static int __init skd_init(void)
5168 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5170 switch (skd_isr_type) {
5171 case SKD_IRQ_LEGACY:
5176 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5177 skd_isr_type, SKD_IRQ_DEFAULT);
5178 skd_isr_type = SKD_IRQ_DEFAULT;
5181 if (skd_max_queue_depth < 1 ||
5182 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5183 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5184 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5185 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5188 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5189 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5190 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5191 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5194 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5195 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5196 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5197 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5200 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5201 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5206 if (skd_isr_comp_limit < 0) {
5207 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5208 skd_isr_comp_limit, 0);
5209 skd_isr_comp_limit = 0;
5212 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5213 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5214 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5215 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5218 return pci_register_driver(&skd_driver);
5221 static void __exit skd_exit(void)
5223 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5225 pci_unregister_driver(&skd_driver);
5228 unregister_blkdev(skd_major, DRV_NAME);
5231 module_init(skd_init);
5232 module_exit(skd_exit);