2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
5 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/compiler.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/time.h>
25 #include <linux/hdreg.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/completion.h>
28 #include <linux/scatterlist.h>
29 #include <linux/version.h>
30 #include <linux/err.h>
31 #include <linux/aer.h>
32 #include <linux/wait.h>
33 #include <linux/uio.h>
34 #include <linux/stringify.h>
35 #include <scsi/scsi.h>
38 #include <linux/uaccess.h>
39 #include <asm/unaligned.h>
41 #include "skd_s1120.h"
43 static int skd_dbg_level;
44 static int skd_isr_comp_limit = 4;
50 STEC_LINK_UNKNOWN = 0xFF
54 SKD_FLUSH_INITIALIZER,
55 SKD_FLUSH_ZERO_SIZE_FIRST,
56 SKD_FLUSH_DATA_SECOND,
59 #define SKD_ASSERT(expr) \
61 if (unlikely(!(expr))) { \
62 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
63 # expr, __FILE__, __func__, __LINE__); \
67 #define DRV_NAME "skd"
68 #define DRV_VERSION "2.2.1"
69 #define DRV_BUILD_ID "0260"
70 #define PFX DRV_NAME ": "
71 #define DRV_BIN_VERSION 0x100
72 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
74 MODULE_LICENSE("GPL");
76 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
77 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
79 #define PCI_VENDOR_ID_STEC 0x1B39
80 #define PCI_DEVICE_ID_S1120 0x0001
82 #define SKD_FUA_NV (1 << 1)
83 #define SKD_MINORS_PER_DEVICE 16
85 #define SKD_MAX_QUEUE_DEPTH 200u
87 #define SKD_PAUSE_TIMEOUT (5 * 1000)
89 #define SKD_N_FITMSG_BYTES (512u)
90 #define SKD_MAX_REQ_PER_MSG 14
92 #define SKD_N_SPECIAL_CONTEXT 32u
93 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
95 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
96 * 128KB limit. That allows 4096*4K = 16M xfer size
98 #define SKD_N_SG_PER_REQ_DEFAULT 256u
99 #define SKD_N_SG_PER_SPECIAL 256u
101 #define SKD_N_COMPLETION_ENTRY 256u
102 #define SKD_N_READ_CAP_BYTES (8u)
104 #define SKD_N_INTERNAL_BYTES (512u)
106 #define SKD_SKCOMP_SIZE \
107 ((sizeof(struct fit_completion_entry_v1) + \
108 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
110 /* 5 bits of uniqifier, 0xF800 */
111 #define SKD_ID_INCR (0x400)
112 #define SKD_ID_TABLE_MASK (3u << 8u)
113 #define SKD_ID_RW_REQUEST (0u << 8u)
114 #define SKD_ID_INTERNAL (1u << 8u)
115 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
116 #define SKD_ID_FIT_MSG (3u << 8u)
117 #define SKD_ID_SLOT_MASK 0x00FFu
118 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
120 #define SKD_N_TIMEOUT_SLOT 4u
121 #define SKD_TIMEOUT_SLOT_MASK 3u
123 #define SKD_N_MAX_SECTORS 2048u
125 #define SKD_MAX_RETRIES 2u
127 #define SKD_TIMER_SECONDS(seconds) (seconds)
128 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
130 #define INQ_STD_NBYTES 36
132 enum skd_drvr_state {
136 SKD_DRVR_STATE_STARTING,
137 SKD_DRVR_STATE_ONLINE,
138 SKD_DRVR_STATE_PAUSING,
139 SKD_DRVR_STATE_PAUSED,
140 SKD_DRVR_STATE_DRAINING_TIMEOUT,
141 SKD_DRVR_STATE_RESTARTING,
142 SKD_DRVR_STATE_RESUMING,
143 SKD_DRVR_STATE_STOPPING,
144 SKD_DRVR_STATE_FAULT,
145 SKD_DRVR_STATE_DISAPPEARED,
146 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
147 SKD_DRVR_STATE_BUSY_ERASE,
148 SKD_DRVR_STATE_BUSY_SANITIZE,
149 SKD_DRVR_STATE_BUSY_IMMINENT,
150 SKD_DRVR_STATE_WAIT_BOOT,
151 SKD_DRVR_STATE_SYNCING,
154 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
155 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
156 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
157 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
158 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
159 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
160 #define SKD_START_WAIT_SECONDS 90u
166 SKD_REQ_STATE_COMPLETED,
167 SKD_REQ_STATE_TIMEOUT,
168 SKD_REQ_STATE_ABORTED,
171 enum skd_fit_msg_state {
176 enum skd_check_status_action {
177 SKD_CHECK_STATUS_REPORT_GOOD,
178 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
179 SKD_CHECK_STATUS_REQUEUE_REQUEST,
180 SKD_CHECK_STATUS_REPORT_ERROR,
181 SKD_CHECK_STATUS_BUSY_IMMINENT,
184 struct skd_fitmsg_context {
185 enum skd_fit_msg_state state;
187 struct skd_fitmsg_context *next;
196 dma_addr_t mb_dma_address;
199 struct skd_request_context {
200 enum skd_req_state state;
202 struct skd_request_context *next;
212 struct scatterlist *sg;
216 struct fit_sg_descriptor *sksg_list;
217 dma_addr_t sksg_dma_address;
219 struct fit_completion_entry_v1 completion;
221 struct fit_comp_error_info err_info;
224 #define SKD_DATA_DIR_HOST_TO_CARD 1
225 #define SKD_DATA_DIR_CARD_TO_HOST 2
227 struct skd_special_context {
228 struct skd_request_context req;
233 dma_addr_t db_dma_address;
236 dma_addr_t mb_dma_address;
249 struct sg_iovec *iov;
250 struct sg_iovec no_iov_iov;
252 struct skd_special_context *skspcl;
255 typedef enum skd_irq_type {
261 #define SKD_MAX_BARS 2
264 volatile void __iomem *mem_map[SKD_MAX_BARS];
265 resource_size_t mem_phys[SKD_MAX_BARS];
266 u32 mem_size[SKD_MAX_BARS];
268 struct skd_msix_entry *msix_entries;
270 struct pci_dev *pdev;
271 int pcie_error_reporting_is_enabled;
274 struct gendisk *disk;
275 struct request_queue *queue;
276 struct device *class_dev;
284 enum skd_drvr_state state;
288 u32 cur_max_queue_depth;
289 u32 queue_low_water_mark;
290 u32 dev_max_queue_depth;
292 u32 num_fitmsg_context;
295 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
297 struct skd_fitmsg_context *skmsg_free_list;
298 struct skd_fitmsg_context *skmsg_table;
300 struct skd_request_context *skreq_free_list;
301 struct skd_request_context *skreq_table;
303 struct skd_special_context *skspcl_free_list;
304 struct skd_special_context *skspcl_table;
306 struct skd_special_context internal_skspcl;
307 u32 read_cap_blocksize;
308 u32 read_cap_last_lba;
309 int read_cap_is_valid;
310 int inquiry_is_valid;
311 u8 inq_serial_num[13]; /*12 chars plus null term */
315 struct fit_completion_entry_v1 *skcomp_table;
316 struct fit_comp_error_info *skerr_table;
317 dma_addr_t cq_dma_address;
319 wait_queue_head_t waitq;
321 struct timer_list timer;
332 u32 connect_time_stamp;
334 #define SKD_MAX_CONNECT_RETRIES 16
339 struct work_struct completion_worker;
342 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
343 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
344 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
346 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
348 u32 val = readl(skdev->mem_map[1] + offset);
350 if (unlikely(skdev->dbg_level >= 2))
351 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
355 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
358 writel(val, skdev->mem_map[1] + offset);
359 if (unlikely(skdev->dbg_level >= 2))
360 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
363 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
366 writeq(val, skdev->mem_map[1] + offset);
367 if (unlikely(skdev->dbg_level >= 2))
368 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
373 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
374 static int skd_isr_type = SKD_IRQ_DEFAULT;
376 module_param(skd_isr_type, int, 0444);
377 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
378 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
380 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
381 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
383 module_param(skd_max_req_per_msg, int, 0444);
384 MODULE_PARM_DESC(skd_max_req_per_msg,
385 "Maximum SCSI requests packed in a single message."
386 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
388 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
389 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
390 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
392 module_param(skd_max_queue_depth, int, 0444);
393 MODULE_PARM_DESC(skd_max_queue_depth,
394 "Maximum SCSI requests issued to s1120."
395 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
397 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
398 module_param(skd_sgs_per_request, int, 0444);
399 MODULE_PARM_DESC(skd_sgs_per_request,
400 "Maximum SG elements per block request."
401 " (1-4096, default==256)");
403 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
404 module_param(skd_max_pass_thru, int, 0444);
405 MODULE_PARM_DESC(skd_max_pass_thru,
406 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
408 module_param(skd_dbg_level, int, 0444);
409 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
411 module_param(skd_isr_comp_limit, int, 0444);
412 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
414 /* Major device number dynamically assigned. */
415 static u32 skd_major;
417 static void skd_destruct(struct skd_device *skdev);
418 static const struct block_device_operations skd_blockdev_ops;
419 static void skd_send_fitmsg(struct skd_device *skdev,
420 struct skd_fitmsg_context *skmsg);
421 static void skd_send_special_fitmsg(struct skd_device *skdev,
422 struct skd_special_context *skspcl);
423 static void skd_request_fn(struct request_queue *rq);
424 static void skd_end_request(struct skd_device *skdev,
425 struct skd_request_context *skreq, blk_status_t status);
426 static bool skd_preop_sg_list(struct skd_device *skdev,
427 struct skd_request_context *skreq);
428 static void skd_postop_sg_list(struct skd_device *skdev,
429 struct skd_request_context *skreq);
431 static void skd_restart_device(struct skd_device *skdev);
432 static int skd_quiesce_dev(struct skd_device *skdev);
433 static int skd_unquiesce_dev(struct skd_device *skdev);
434 static void skd_release_special(struct skd_device *skdev,
435 struct skd_special_context *skspcl);
436 static void skd_disable_interrupts(struct skd_device *skdev);
437 static void skd_isr_fwstate(struct skd_device *skdev);
438 static void skd_recover_requests(struct skd_device *skdev, int requeue);
439 static void skd_soft_reset(struct skd_device *skdev);
441 const char *skd_drive_state_to_str(int state);
442 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
443 static void skd_log_skdev(struct skd_device *skdev, const char *event);
444 static void skd_log_skmsg(struct skd_device *skdev,
445 struct skd_fitmsg_context *skmsg, const char *event);
446 static void skd_log_skreq(struct skd_device *skdev,
447 struct skd_request_context *skreq, const char *event);
450 *****************************************************************************
451 * READ/WRITE REQUESTS
452 *****************************************************************************
454 static void skd_fail_all_pending(struct skd_device *skdev)
456 struct request_queue *q = skdev->queue;
460 req = blk_peek_request(q);
463 blk_start_request(req);
464 __blk_end_request_all(req, BLK_STS_IOERR);
469 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
470 int data_dir, unsigned lba,
473 if (data_dir == READ)
474 scsi_req->cdb[0] = 0x28;
476 scsi_req->cdb[0] = 0x2a;
478 scsi_req->cdb[1] = 0;
479 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
480 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
481 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
482 scsi_req->cdb[5] = (lba & 0xff);
483 scsi_req->cdb[6] = 0;
484 scsi_req->cdb[7] = (count & 0xff00) >> 8;
485 scsi_req->cdb[8] = count & 0xff;
486 scsi_req->cdb[9] = 0;
490 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
491 struct skd_request_context *skreq)
493 skreq->flush_cmd = 1;
495 scsi_req->cdb[0] = 0x35;
496 scsi_req->cdb[1] = 0;
497 scsi_req->cdb[2] = 0;
498 scsi_req->cdb[3] = 0;
499 scsi_req->cdb[4] = 0;
500 scsi_req->cdb[5] = 0;
501 scsi_req->cdb[6] = 0;
502 scsi_req->cdb[7] = 0;
503 scsi_req->cdb[8] = 0;
504 scsi_req->cdb[9] = 0;
507 static void skd_request_fn_not_online(struct request_queue *q);
509 static void skd_request_fn(struct request_queue *q)
511 struct skd_device *skdev = q->queuedata;
512 struct skd_fitmsg_context *skmsg = NULL;
513 struct fit_msg_hdr *fmh = NULL;
514 struct skd_request_context *skreq;
515 struct request *req = NULL;
516 struct skd_scsi_request *scsi_req;
517 unsigned long io_flags;
527 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
528 skd_request_fn_not_online(q);
532 if (blk_queue_stopped(skdev->queue)) {
533 if (skdev->skmsg_free_list == NULL ||
534 skdev->skreq_free_list == NULL ||
535 skdev->in_flight >= skdev->queue_low_water_mark)
536 /* There is still some kind of shortage */
539 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
544 * - There are no more native requests
545 * - There are already the maximum number of requests in progress
546 * - There are no more skd_request_context entries
547 * - There are no more FIT msg buffers
553 req = blk_peek_request(q);
555 /* Are there any native requests to start? */
559 lba = (u32)blk_rq_pos(req);
560 count = blk_rq_sectors(req);
561 data_dir = rq_data_dir(req);
562 io_flags = req->cmd_flags;
564 if (req_op(req) == REQ_OP_FLUSH)
567 if (io_flags & REQ_FUA)
570 dev_dbg(&skdev->pdev->dev,
571 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
572 req, lba, lba, count, count, data_dir);
574 /* At this point we know there is a request */
576 /* Are too many requets already in progress? */
577 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
578 dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
579 skdev->in_flight, skdev->cur_max_queue_depth);
583 /* Is a skd_request_context available? */
584 skreq = skdev->skreq_free_list;
586 dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
589 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
590 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
592 /* Now we check to see if we can get a fit msg */
594 if (skdev->skmsg_free_list == NULL) {
595 dev_dbg(&skdev->pdev->dev, "Out of msg\n");
600 skreq->flush_cmd = 0;
602 skreq->sg_byte_count = 0;
605 * OK to now dequeue request from q.
607 * At this point we are comitted to either start or reject
608 * the native request. Note that skd_request_context is
609 * available but is still at the head of the free list.
611 blk_start_request(req);
613 skreq->fitmsg_id = 0;
615 /* Either a FIT msg is in progress or we have to start one. */
617 /* Are there any FIT msg buffers available? */
618 skmsg = skdev->skmsg_free_list;
620 dev_dbg(&skdev->pdev->dev,
621 "Out of msg skdev=%p\n",
625 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
626 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
628 skdev->skmsg_free_list = skmsg->next;
630 skmsg->state = SKD_MSG_STATE_BUSY;
631 skmsg->id += SKD_ID_INCR;
633 /* Initialize the FIT msg header */
634 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
635 memset(fmh, 0, sizeof(*fmh));
636 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
637 skmsg->length = sizeof(*fmh);
640 skreq->fitmsg_id = skmsg->id;
643 * Note that a FIT msg may have just been started
644 * but contains no SoFIT requests yet.
648 * Transcode the request, checking as we go. The outcome of
649 * the transcoding is represented by the error variable.
651 cmd_ptr = &skmsg->msg_buf[skmsg->length];
652 memset(cmd_ptr, 0, 32);
654 be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
655 cmdctxt = skreq->id + SKD_ID_INCR;
658 scsi_req->hdr.tag = cmdctxt;
659 scsi_req->hdr.sg_list_dma_address = be_dmaa;
661 if (data_dir == READ)
662 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
664 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
666 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
667 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
668 SKD_ASSERT(skreq->flush_cmd == 1);
670 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
674 scsi_req->cdb[1] |= SKD_FUA_NV;
679 if (!skd_preop_sg_list(skdev, skreq)) {
681 * Complete the native request with error.
682 * Note that the request context is still at the
683 * head of the free list, and that the SoFIT request
684 * was encoded into the FIT msg buffer but the FIT
685 * msg length has not been updated. In short, the
686 * only resource that has been allocated but might
687 * not be used is that the FIT msg could be empty.
689 dev_dbg(&skdev->pdev->dev, "error Out\n");
690 skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
695 scsi_req->hdr.sg_list_len_bytes =
696 cpu_to_be32(skreq->sg_byte_count);
698 /* Complete resource allocations. */
699 skdev->skreq_free_list = skreq->next;
700 skreq->state = SKD_REQ_STATE_BUSY;
701 skreq->id += SKD_ID_INCR;
703 skmsg->length += sizeof(struct skd_scsi_request);
704 fmh->num_protocol_cmds_coalesced++;
707 * Update the active request counts.
708 * Capture the timeout timestamp.
710 skreq->timeout_stamp = skdev->timeout_stamp;
711 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
712 skdev->timeout_slot[timo_slot]++;
714 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
718 * If the FIT msg buffer is full send it.
720 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
721 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
722 skd_send_fitmsg(skdev, skmsg);
729 * Is a FIT msg in progress? If it is empty put the buffer back
730 * on the free list. If it is non-empty send what we got.
731 * This minimizes latency when there are fewer requests than
732 * what fits in a FIT msg.
735 /* Bigger than just a FIT msg header? */
736 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
737 dev_dbg(&skdev->pdev->dev, "sending msg=%p, len %d\n",
738 skmsg, skmsg->length);
739 skd_send_fitmsg(skdev, skmsg);
742 * The FIT msg is empty. It means we got started
743 * on the msg, but the requests were rejected.
745 skmsg->state = SKD_MSG_STATE_IDLE;
746 skmsg->id += SKD_ID_INCR;
747 skmsg->next = skdev->skmsg_free_list;
748 skdev->skmsg_free_list = skmsg;
755 * If req is non-NULL it means there is something to do but
756 * we are out of a resource.
759 blk_stop_queue(skdev->queue);
762 static void skd_end_request(struct skd_device *skdev,
763 struct skd_request_context *skreq, blk_status_t error)
765 if (unlikely(error)) {
766 struct request *req = skreq->req;
767 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
768 u32 lba = (u32)blk_rq_pos(req);
769 u32 count = blk_rq_sectors(req);
771 dev_err(&skdev->pdev->dev,
772 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
775 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
778 __blk_end_request_all(skreq->req, error);
781 static bool skd_preop_sg_list(struct skd_device *skdev,
782 struct skd_request_context *skreq)
784 struct request *req = skreq->req;
785 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
786 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
787 struct scatterlist *sg = &skreq->sg[0];
791 skreq->sg_byte_count = 0;
793 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
794 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
796 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
801 * Map scatterlist to PCI bus addresses.
802 * Note PCI might change the number of entries.
804 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
808 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
812 for (i = 0; i < n_sg; i++) {
813 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
814 u32 cnt = sg_dma_len(&sg[i]);
815 uint64_t dma_addr = sg_dma_address(&sg[i]);
817 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
818 sgd->byte_count = cnt;
819 skreq->sg_byte_count += cnt;
820 sgd->host_side_addr = dma_addr;
821 sgd->dev_side_addr = 0;
824 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
825 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
827 if (unlikely(skdev->dbg_level > 1)) {
828 dev_dbg(&skdev->pdev->dev,
829 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
830 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
831 for (i = 0; i < n_sg; i++) {
832 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
834 dev_dbg(&skdev->pdev->dev,
835 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
836 i, sgd->byte_count, sgd->control,
837 sgd->host_side_addr, sgd->next_desc_ptr);
844 static void skd_postop_sg_list(struct skd_device *skdev,
845 struct skd_request_context *skreq)
847 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
848 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
851 * restore the next ptr for next IO request so we
852 * don't have to set it every time.
854 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
855 skreq->sksg_dma_address +
856 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
857 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
860 static void skd_request_fn_not_online(struct request_queue *q)
862 struct skd_device *skdev = q->queuedata;
864 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
866 skd_log_skdev(skdev, "req_not_online");
867 switch (skdev->state) {
868 case SKD_DRVR_STATE_PAUSING:
869 case SKD_DRVR_STATE_PAUSED:
870 case SKD_DRVR_STATE_STARTING:
871 case SKD_DRVR_STATE_RESTARTING:
872 case SKD_DRVR_STATE_WAIT_BOOT:
873 /* In case of starting, we haven't started the queue,
874 * so we can't get here... but requests are
875 * possibly hanging out waiting for us because we
876 * reported the dev/skd0 already. They'll wait
877 * forever if connect doesn't complete.
878 * What to do??? delay dev/skd0 ??
880 case SKD_DRVR_STATE_BUSY:
881 case SKD_DRVR_STATE_BUSY_IMMINENT:
882 case SKD_DRVR_STATE_BUSY_ERASE:
883 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
886 case SKD_DRVR_STATE_BUSY_SANITIZE:
887 case SKD_DRVR_STATE_STOPPING:
888 case SKD_DRVR_STATE_SYNCING:
889 case SKD_DRVR_STATE_FAULT:
890 case SKD_DRVR_STATE_DISAPPEARED:
895 /* If we get here, terminate all pending block requeusts
896 * with EIO and any scsi pass thru with appropriate sense
899 skd_fail_all_pending(skdev);
903 *****************************************************************************
905 *****************************************************************************
908 static void skd_timer_tick_not_online(struct skd_device *skdev);
910 static void skd_timer_tick(ulong arg)
912 struct skd_device *skdev = (struct skd_device *)arg;
915 unsigned long reqflags;
918 if (skdev->state == SKD_DRVR_STATE_FAULT)
919 /* The driver has declared fault, and we want it to
920 * stay that way until driver is reloaded.
924 spin_lock_irqsave(&skdev->lock, reqflags);
926 state = SKD_READL(skdev, FIT_STATUS);
927 state &= FIT_SR_DRIVE_STATE_MASK;
928 if (state != skdev->drive_state)
929 skd_isr_fwstate(skdev);
931 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
932 skd_timer_tick_not_online(skdev);
935 skdev->timeout_stamp++;
936 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
939 * All requests that happened during the previous use of
940 * this slot should be done by now. The previous use was
941 * over 7 seconds ago.
943 if (skdev->timeout_slot[timo_slot] == 0)
946 /* Something is overdue */
947 dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
948 skdev->timeout_slot[timo_slot], skdev->in_flight);
949 dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
950 skdev->timeout_slot[timo_slot], skdev->in_flight);
952 skdev->timer_countdown = SKD_DRAINING_TIMO;
953 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
954 skdev->timo_slot = timo_slot;
955 blk_stop_queue(skdev->queue);
958 mod_timer(&skdev->timer, (jiffies + HZ));
960 spin_unlock_irqrestore(&skdev->lock, reqflags);
963 static void skd_timer_tick_not_online(struct skd_device *skdev)
965 switch (skdev->state) {
966 case SKD_DRVR_STATE_IDLE:
967 case SKD_DRVR_STATE_LOAD:
969 case SKD_DRVR_STATE_BUSY_SANITIZE:
970 dev_dbg(&skdev->pdev->dev,
971 "drive busy sanitize[%x], driver[%x]\n",
972 skdev->drive_state, skdev->state);
973 /* If we've been in sanitize for 3 seconds, we figure we're not
974 * going to get anymore completions, so recover requests now
976 if (skdev->timer_countdown > 0) {
977 skdev->timer_countdown--;
980 skd_recover_requests(skdev, 0);
983 case SKD_DRVR_STATE_BUSY:
984 case SKD_DRVR_STATE_BUSY_IMMINENT:
985 case SKD_DRVR_STATE_BUSY_ERASE:
986 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
987 skdev->state, skdev->timer_countdown);
988 if (skdev->timer_countdown > 0) {
989 skdev->timer_countdown--;
992 dev_dbg(&skdev->pdev->dev,
993 "busy[%x], timedout=%d, restarting device.",
994 skdev->state, skdev->timer_countdown);
995 skd_restart_device(skdev);
998 case SKD_DRVR_STATE_WAIT_BOOT:
999 case SKD_DRVR_STATE_STARTING:
1000 if (skdev->timer_countdown > 0) {
1001 skdev->timer_countdown--;
1004 /* For now, we fault the drive. Could attempt resets to
1005 * revcover at some point. */
1006 skdev->state = SKD_DRVR_STATE_FAULT;
1008 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
1009 skdev->drive_state);
1011 /*start the queue so we can respond with error to requests */
1012 /* wakeup anyone waiting for startup complete */
1013 blk_start_queue(skdev->queue);
1014 skdev->gendisk_on = -1;
1015 wake_up_interruptible(&skdev->waitq);
1018 case SKD_DRVR_STATE_ONLINE:
1019 /* shouldn't get here. */
1022 case SKD_DRVR_STATE_PAUSING:
1023 case SKD_DRVR_STATE_PAUSED:
1026 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1027 dev_dbg(&skdev->pdev->dev,
1028 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1029 skdev->timo_slot, skdev->timer_countdown,
1031 skdev->timeout_slot[skdev->timo_slot]);
1032 /* if the slot has cleared we can let the I/O continue */
1033 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1034 dev_dbg(&skdev->pdev->dev,
1035 "Slot drained, starting queue.\n");
1036 skdev->state = SKD_DRVR_STATE_ONLINE;
1037 blk_start_queue(skdev->queue);
1040 if (skdev->timer_countdown > 0) {
1041 skdev->timer_countdown--;
1044 skd_restart_device(skdev);
1047 case SKD_DRVR_STATE_RESTARTING:
1048 if (skdev->timer_countdown > 0) {
1049 skdev->timer_countdown--;
1052 /* For now, we fault the drive. Could attempt resets to
1053 * revcover at some point. */
1054 skdev->state = SKD_DRVR_STATE_FAULT;
1055 dev_err(&skdev->pdev->dev,
1056 "DriveFault Reconnect Timeout (%x)\n",
1057 skdev->drive_state);
1060 * Recovering does two things:
1061 * 1. completes IO with error
1062 * 2. reclaims dma resources
1063 * When is it safe to recover requests?
1064 * - if the drive state is faulted
1065 * - if the state is still soft reset after out timeout
1066 * - if the drive registers are dead (state = FF)
1067 * If it is "unsafe", we still need to recover, so we will
1068 * disable pci bus mastering and disable our interrupts.
1071 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1072 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1073 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1074 /* It never came out of soft reset. Try to
1075 * recover the requests and then let them
1076 * fail. This is to mitigate hung processes. */
1077 skd_recover_requests(skdev, 0);
1079 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
1080 skdev->drive_state);
1081 pci_disable_device(skdev->pdev);
1082 skd_disable_interrupts(skdev);
1083 skd_recover_requests(skdev, 0);
1086 /*start the queue so we can respond with error to requests */
1087 /* wakeup anyone waiting for startup complete */
1088 blk_start_queue(skdev->queue);
1089 skdev->gendisk_on = -1;
1090 wake_up_interruptible(&skdev->waitq);
1093 case SKD_DRVR_STATE_RESUMING:
1094 case SKD_DRVR_STATE_STOPPING:
1095 case SKD_DRVR_STATE_SYNCING:
1096 case SKD_DRVR_STATE_FAULT:
1097 case SKD_DRVR_STATE_DISAPPEARED:
1103 static int skd_start_timer(struct skd_device *skdev)
1107 init_timer(&skdev->timer);
1108 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1110 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1112 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
1116 static void skd_kill_timer(struct skd_device *skdev)
1118 del_timer_sync(&skdev->timer);
1122 *****************************************************************************
1124 *****************************************************************************
1126 static int skd_ioctl_sg_io(struct skd_device *skdev,
1127 fmode_t mode, void __user *argp);
1128 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1129 struct skd_sg_io *sksgio);
1130 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1131 struct skd_sg_io *sksgio);
1132 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1133 struct skd_sg_io *sksgio);
1134 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1135 struct skd_sg_io *sksgio, int dxfer_dir);
1136 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1137 struct skd_sg_io *sksgio);
1138 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1139 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1140 struct skd_sg_io *sksgio);
1141 static int skd_sg_io_put_status(struct skd_device *skdev,
1142 struct skd_sg_io *sksgio);
1144 static void skd_complete_special(struct skd_device *skdev,
1145 volatile struct fit_completion_entry_v1
1147 volatile struct fit_comp_error_info *skerr,
1148 struct skd_special_context *skspcl);
1150 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1151 uint cmd_in, ulong arg)
1153 static const int sg_version_num = 30527;
1154 int rc = 0, timeout;
1155 struct gendisk *disk = bdev->bd_disk;
1156 struct skd_device *skdev = disk->private_data;
1157 int __user *p = (int __user *)arg;
1159 dev_dbg(&skdev->pdev->dev,
1160 "%s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1161 disk->disk_name, current->comm, mode, cmd_in, arg);
1163 if (!capable(CAP_SYS_ADMIN))
1167 case SG_SET_TIMEOUT:
1168 rc = get_user(timeout, p);
1170 disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1172 case SG_GET_TIMEOUT:
1173 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1175 case SG_GET_VERSION_NUM:
1176 rc = put_user(sg_version_num, p);
1179 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1187 dev_dbg(&skdev->pdev->dev, "%s: completion rc %d\n", disk->disk_name,
1192 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1196 struct skd_sg_io sksgio;
1198 memset(&sksgio, 0, sizeof(sksgio));
1201 sksgio.iov = &sksgio.no_iov_iov;
1203 switch (skdev->state) {
1204 case SKD_DRVR_STATE_ONLINE:
1205 case SKD_DRVR_STATE_BUSY_IMMINENT:
1209 dev_dbg(&skdev->pdev->dev, "drive not online\n");
1214 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1218 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1222 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1226 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1230 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1234 rc = skd_sg_io_await(skdev, &sksgio);
1238 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1242 rc = skd_sg_io_put_status(skdev, &sksgio);
1249 skd_sg_io_release_skspcl(skdev, &sksgio);
1251 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1256 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1257 struct skd_sg_io *sksgio)
1259 struct sg_io_hdr *sgp = &sksgio->sg;
1260 int i, __maybe_unused acc;
1262 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1263 dev_dbg(&skdev->pdev->dev, "access sg failed %p\n",
1268 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1269 dev_dbg(&skdev->pdev->dev, "copy_from_user sg failed %p\n",
1274 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1275 dev_dbg(&skdev->pdev->dev, "interface_id invalid 0x%x\n",
1280 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1281 dev_dbg(&skdev->pdev->dev, "cmd_len invalid %d\n",
1286 if (sgp->iovec_count > 256) {
1287 dev_dbg(&skdev->pdev->dev, "iovec_count invalid %d\n",
1292 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1293 dev_dbg(&skdev->pdev->dev, "dxfer_len invalid %d\n",
1298 switch (sgp->dxfer_direction) {
1303 case SG_DXFER_TO_DEV:
1307 case SG_DXFER_FROM_DEV:
1308 case SG_DXFER_TO_FROM_DEV:
1313 dev_dbg(&skdev->pdev->dev, "dxfer_dir invalid %d\n",
1314 sgp->dxfer_direction);
1318 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1319 dev_dbg(&skdev->pdev->dev, "copy_from_user cmdp failed %p\n",
1324 if (sgp->mx_sb_len != 0) {
1325 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1326 dev_dbg(&skdev->pdev->dev, "access sbp failed %p\n",
1332 if (sgp->iovec_count == 0) {
1333 sksgio->iov[0].iov_base = sgp->dxferp;
1334 sksgio->iov[0].iov_len = sgp->dxfer_len;
1336 sksgio->dxfer_len = sgp->dxfer_len;
1338 struct sg_iovec *iov;
1339 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1340 size_t iov_data_len;
1342 iov = kmalloc(nbytes, GFP_KERNEL);
1344 dev_dbg(&skdev->pdev->dev, "alloc iovec failed %d\n",
1349 sksgio->iovcnt = sgp->iovec_count;
1351 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1352 dev_dbg(&skdev->pdev->dev,
1353 "copy_from_user iovec failed %p\n",
1359 * Sum up the vecs, making sure they don't overflow
1362 for (i = 0; i < sgp->iovec_count; i++) {
1363 if (iov_data_len + iov[i].iov_len < iov_data_len)
1365 iov_data_len += iov[i].iov_len;
1368 /* SG_IO howto says that the shorter of the two wins */
1369 if (sgp->dxfer_len < iov_data_len) {
1370 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1373 sksgio->dxfer_len = sgp->dxfer_len;
1375 sksgio->dxfer_len = iov_data_len;
1378 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1379 struct sg_iovec *iov = sksgio->iov;
1380 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1381 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1382 dev_dbg(&skdev->pdev->dev,
1383 "access data failed %p/%zd\n",
1384 iov->iov_base, iov->iov_len);
1393 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1394 struct skd_sg_io *sksgio)
1396 struct skd_special_context *skspcl = NULL;
1402 spin_lock_irqsave(&skdev->lock, flags);
1403 skspcl = skdev->skspcl_free_list;
1404 if (skspcl != NULL) {
1405 skdev->skspcl_free_list =
1406 (struct skd_special_context *)skspcl->req.next;
1407 skspcl->req.id += SKD_ID_INCR;
1408 skspcl->req.state = SKD_REQ_STATE_SETUP;
1409 skspcl->orphaned = 0;
1410 skspcl->req.n_sg = 0;
1412 spin_unlock_irqrestore(&skdev->lock, flags);
1414 if (skspcl != NULL) {
1419 dev_dbg(&skdev->pdev->dev, "blocking\n");
1421 rc = wait_event_interruptible_timeout(
1423 (skdev->skspcl_free_list != NULL),
1424 msecs_to_jiffies(sksgio->sg.timeout));
1426 dev_dbg(&skdev->pdev->dev, "unblocking, rc=%d\n", rc);
1436 * If we get here rc > 0 meaning the timeout to
1437 * wait_event_interruptible_timeout() had time left, hence the
1438 * sought event -- non-empty free list -- happened.
1439 * Retry the allocation.
1442 sksgio->skspcl = skspcl;
1447 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1448 struct skd_request_context *skreq,
1451 u32 resid = dxfer_len;
1454 * The DMA engine must have aligned addresses and byte counts.
1456 resid += (-resid) & 3;
1457 skreq->sg_byte_count = resid;
1462 u32 nbytes = PAGE_SIZE;
1463 u32 ix = skreq->n_sg;
1464 struct scatterlist *sg = &skreq->sg[ix];
1465 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1471 page = alloc_page(GFP_KERNEL);
1475 sg_set_page(sg, page, nbytes, 0);
1477 /* TODO: This should be going through a pci_???()
1478 * routine to do proper mapping. */
1479 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1480 sksg->byte_count = nbytes;
1482 sksg->host_side_addr = sg_phys(sg);
1484 sksg->dev_side_addr = 0;
1485 sksg->next_desc_ptr = skreq->sksg_dma_address +
1486 (ix + 1) * sizeof(*sksg);
1492 if (skreq->n_sg > 0) {
1493 u32 ix = skreq->n_sg - 1;
1494 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1496 sksg->control = FIT_SGD_CONTROL_LAST;
1497 sksg->next_desc_ptr = 0;
1500 if (unlikely(skdev->dbg_level > 1)) {
1503 dev_dbg(&skdev->pdev->dev,
1504 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1505 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1506 for (i = 0; i < skreq->n_sg; i++) {
1507 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1509 dev_dbg(&skdev->pdev->dev,
1510 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1511 i, sgd->byte_count, sgd->control,
1512 sgd->host_side_addr, sgd->next_desc_ptr);
1519 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1520 struct skd_sg_io *sksgio)
1522 struct skd_special_context *skspcl = sksgio->skspcl;
1523 struct skd_request_context *skreq = &skspcl->req;
1524 u32 dxfer_len = sksgio->dxfer_len;
1527 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1529 * Eventually, errors or not, skd_release_special() is called
1530 * to recover allocations including partial allocations.
1535 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1536 struct skd_sg_io *sksgio, int dxfer_dir)
1538 struct skd_special_context *skspcl = sksgio->skspcl;
1540 struct sg_iovec curiov;
1544 u32 resid = sksgio->dxfer_len;
1548 curiov.iov_base = NULL;
1550 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1551 if (dxfer_dir != SG_DXFER_TO_DEV ||
1552 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1557 u32 nbytes = PAGE_SIZE;
1559 if (curiov.iov_len == 0) {
1560 curiov = sksgio->iov[iov_ix++];
1566 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1567 bufp = page_address(page);
1568 buf_len = PAGE_SIZE;
1571 nbytes = min_t(u32, nbytes, resid);
1572 nbytes = min_t(u32, nbytes, curiov.iov_len);
1573 nbytes = min_t(u32, nbytes, buf_len);
1575 if (dxfer_dir == SG_DXFER_TO_DEV)
1576 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1578 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1584 curiov.iov_len -= nbytes;
1585 curiov.iov_base += nbytes;
1592 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1593 struct skd_sg_io *sksgio)
1595 struct skd_special_context *skspcl = sksgio->skspcl;
1596 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1597 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1599 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1601 /* Initialize the FIT msg header */
1602 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1603 fmh->num_protocol_cmds_coalesced = 1;
1605 /* Initialize the SCSI request */
1606 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1607 scsi_req->hdr.sg_list_dma_address =
1608 cpu_to_be64(skspcl->req.sksg_dma_address);
1609 scsi_req->hdr.tag = skspcl->req.id;
1610 scsi_req->hdr.sg_list_len_bytes =
1611 cpu_to_be32(skspcl->req.sg_byte_count);
1612 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1614 skspcl->req.state = SKD_REQ_STATE_BUSY;
1615 skd_send_special_fitmsg(skdev, skspcl);
1620 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1622 unsigned long flags;
1625 rc = wait_event_interruptible_timeout(skdev->waitq,
1626 (sksgio->skspcl->req.state !=
1627 SKD_REQ_STATE_BUSY),
1628 msecs_to_jiffies(sksgio->sg.
1631 spin_lock_irqsave(&skdev->lock, flags);
1633 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1634 dev_dbg(&skdev->pdev->dev, "skspcl %p aborted\n",
1637 /* Build check cond, sense and let command finish. */
1638 /* For a timeout, we must fabricate completion and sense
1639 * data to complete the command */
1640 sksgio->skspcl->req.completion.status =
1641 SAM_STAT_CHECK_CONDITION;
1643 memset(&sksgio->skspcl->req.err_info, 0,
1644 sizeof(sksgio->skspcl->req.err_info));
1645 sksgio->skspcl->req.err_info.type = 0x70;
1646 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1647 sksgio->skspcl->req.err_info.code = 0x44;
1648 sksgio->skspcl->req.err_info.qual = 0;
1650 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1651 /* No longer on the adapter. We finish. */
1654 /* Something's gone wrong. Still busy. Timeout or
1655 * user interrupted (control-C). Mark as an orphan
1656 * so it will be disposed when completed. */
1657 sksgio->skspcl->orphaned = 1;
1658 sksgio->skspcl = NULL;
1660 dev_dbg(&skdev->pdev->dev, "timed out %p (%u ms)\n",
1661 sksgio, sksgio->sg.timeout);
1664 dev_dbg(&skdev->pdev->dev, "cntlc %p\n", sksgio);
1669 spin_unlock_irqrestore(&skdev->lock, flags);
1674 static int skd_sg_io_put_status(struct skd_device *skdev,
1675 struct skd_sg_io *sksgio)
1677 struct sg_io_hdr *sgp = &sksgio->sg;
1678 struct skd_special_context *skspcl = sksgio->skspcl;
1681 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1683 sgp->status = skspcl->req.completion.status;
1684 resid = sksgio->dxfer_len - nb;
1686 sgp->masked_status = sgp->status & STATUS_MASK;
1687 sgp->msg_status = 0;
1688 sgp->host_status = 0;
1689 sgp->driver_status = 0;
1691 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1692 sgp->info |= SG_INFO_CHECK;
1694 dev_dbg(&skdev->pdev->dev, "status %x masked %x resid 0x%x\n",
1695 sgp->status, sgp->masked_status, sgp->resid);
1697 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1698 if (sgp->mx_sb_len > 0) {
1699 struct fit_comp_error_info *ei = &skspcl->req.err_info;
1700 u32 nbytes = sizeof(*ei);
1702 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1704 sgp->sb_len_wr = nbytes;
1706 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1707 dev_dbg(&skdev->pdev->dev,
1708 "copy_to_user sense failed %p\n",
1715 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1716 dev_dbg(&skdev->pdev->dev, "copy_to_user sg failed %p\n",
1724 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1725 struct skd_sg_io *sksgio)
1727 struct skd_special_context *skspcl = sksgio->skspcl;
1729 if (skspcl != NULL) {
1732 sksgio->skspcl = NULL;
1734 spin_lock_irqsave(&skdev->lock, flags);
1735 skd_release_special(skdev, skspcl);
1736 spin_unlock_irqrestore(&skdev->lock, flags);
1743 *****************************************************************************
1744 * INTERNAL REQUESTS -- generated by driver itself
1745 *****************************************************************************
1748 static int skd_format_internal_skspcl(struct skd_device *skdev)
1750 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1751 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1752 struct fit_msg_hdr *fmh;
1753 uint64_t dma_address;
1754 struct skd_scsi_request *scsi;
1756 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1757 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1758 fmh->num_protocol_cmds_coalesced = 1;
1760 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1761 memset(scsi, 0, sizeof(*scsi));
1762 dma_address = skspcl->req.sksg_dma_address;
1763 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1764 sgd->control = FIT_SGD_CONTROL_LAST;
1765 sgd->byte_count = 0;
1766 sgd->host_side_addr = skspcl->db_dma_address;
1767 sgd->dev_side_addr = 0;
1768 sgd->next_desc_ptr = 0LL;
1773 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1775 static void skd_send_internal_skspcl(struct skd_device *skdev,
1776 struct skd_special_context *skspcl,
1779 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1780 struct skd_scsi_request *scsi;
1781 unsigned char *buf = skspcl->data_buf;
1784 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1786 * A refresh is already in progress.
1787 * Just wait for it to finish.
1791 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1792 skspcl->req.state = SKD_REQ_STATE_BUSY;
1793 skspcl->req.id += SKD_ID_INCR;
1795 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1796 scsi->hdr.tag = skspcl->req.id;
1798 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1801 case TEST_UNIT_READY:
1802 scsi->cdb[0] = TEST_UNIT_READY;
1803 sgd->byte_count = 0;
1804 scsi->hdr.sg_list_len_bytes = 0;
1808 scsi->cdb[0] = READ_CAPACITY;
1809 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1810 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1814 scsi->cdb[0] = INQUIRY;
1815 scsi->cdb[1] = 0x01; /* evpd */
1816 scsi->cdb[2] = 0x80; /* serial number page */
1817 scsi->cdb[4] = 0x10;
1818 sgd->byte_count = 16;
1819 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1822 case SYNCHRONIZE_CACHE:
1823 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1824 sgd->byte_count = 0;
1825 scsi->hdr.sg_list_len_bytes = 0;
1829 scsi->cdb[0] = WRITE_BUFFER;
1830 scsi->cdb[1] = 0x02;
1831 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1832 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1833 sgd->byte_count = WR_BUF_SIZE;
1834 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1835 /* fill incrementing byte pattern */
1836 for (i = 0; i < sgd->byte_count; i++)
1841 scsi->cdb[0] = READ_BUFFER;
1842 scsi->cdb[1] = 0x02;
1843 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1844 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1845 sgd->byte_count = WR_BUF_SIZE;
1846 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1847 memset(skspcl->data_buf, 0, sgd->byte_count);
1851 SKD_ASSERT("Don't know what to send");
1855 skd_send_special_fitmsg(skdev, skspcl);
1858 static void skd_refresh_device_data(struct skd_device *skdev)
1860 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1862 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1865 static int skd_chk_read_buf(struct skd_device *skdev,
1866 struct skd_special_context *skspcl)
1868 unsigned char *buf = skspcl->data_buf;
1871 /* check for incrementing byte pattern */
1872 for (i = 0; i < WR_BUF_SIZE; i++)
1873 if (buf[i] != (i & 0xFF))
1879 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1880 u8 code, u8 qual, u8 fruc)
1882 /* If the check condition is of special interest, log a message */
1883 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1884 && (code == 0x04) && (qual == 0x06)) {
1885 dev_err(&skdev->pdev->dev,
1886 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1887 key, code, qual, fruc);
1891 static void skd_complete_internal(struct skd_device *skdev,
1892 volatile struct fit_completion_entry_v1
1894 volatile struct fit_comp_error_info *skerr,
1895 struct skd_special_context *skspcl)
1897 u8 *buf = skspcl->data_buf;
1900 struct skd_scsi_request *scsi =
1901 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1903 lockdep_assert_held(&skdev->lock);
1905 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1907 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1909 skspcl->req.completion = *skcomp;
1910 skspcl->req.state = SKD_REQ_STATE_IDLE;
1911 skspcl->req.id += SKD_ID_INCR;
1913 status = skspcl->req.completion.status;
1915 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1916 skerr->qual, skerr->fruc);
1918 switch (scsi->cdb[0]) {
1919 case TEST_UNIT_READY:
1920 if (status == SAM_STAT_GOOD)
1921 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1922 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1923 (skerr->key == MEDIUM_ERROR))
1924 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1926 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1927 dev_dbg(&skdev->pdev->dev,
1928 "TUR failed, don't send anymore state 0x%x\n",
1932 dev_dbg(&skdev->pdev->dev,
1933 "**** TUR failed, retry skerr\n");
1934 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1939 if (status == SAM_STAT_GOOD)
1940 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1942 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1943 dev_dbg(&skdev->pdev->dev,
1944 "write buffer failed, don't send anymore state 0x%x\n",
1948 dev_dbg(&skdev->pdev->dev,
1949 "**** write buffer failed, retry skerr\n");
1950 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1955 if (status == SAM_STAT_GOOD) {
1956 if (skd_chk_read_buf(skdev, skspcl) == 0)
1957 skd_send_internal_skspcl(skdev, skspcl,
1960 dev_err(&skdev->pdev->dev,
1961 "*** W/R Buffer mismatch %d ***\n",
1962 skdev->connect_retries);
1963 if (skdev->connect_retries <
1964 SKD_MAX_CONNECT_RETRIES) {
1965 skdev->connect_retries++;
1966 skd_soft_reset(skdev);
1968 dev_err(&skdev->pdev->dev,
1969 "W/R Buffer Connect Error\n");
1975 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1976 dev_dbg(&skdev->pdev->dev,
1977 "read buffer failed, don't send anymore state 0x%x\n",
1981 dev_dbg(&skdev->pdev->dev,
1982 "**** read buffer failed, retry skerr\n");
1983 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1988 skdev->read_cap_is_valid = 0;
1989 if (status == SAM_STAT_GOOD) {
1990 skdev->read_cap_last_lba =
1991 (buf[0] << 24) | (buf[1] << 16) |
1992 (buf[2] << 8) | buf[3];
1993 skdev->read_cap_blocksize =
1994 (buf[4] << 24) | (buf[5] << 16) |
1995 (buf[6] << 8) | buf[7];
1997 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1998 skdev->read_cap_last_lba,
1999 skdev->read_cap_blocksize);
2001 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2003 skdev->read_cap_is_valid = 1;
2005 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2006 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2007 (skerr->key == MEDIUM_ERROR)) {
2008 skdev->read_cap_last_lba = ~0;
2009 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2010 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
2011 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2013 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
2014 skd_send_internal_skspcl(skdev, skspcl,
2020 skdev->inquiry_is_valid = 0;
2021 if (status == SAM_STAT_GOOD) {
2022 skdev->inquiry_is_valid = 1;
2024 for (i = 0; i < 12; i++)
2025 skdev->inq_serial_num[i] = buf[i + 4];
2026 skdev->inq_serial_num[12] = 0;
2029 if (skd_unquiesce_dev(skdev) < 0)
2030 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
2031 /* connection is complete */
2032 skdev->connect_retries = 0;
2035 case SYNCHRONIZE_CACHE:
2036 if (status == SAM_STAT_GOOD)
2037 skdev->sync_done = 1;
2039 skdev->sync_done = -1;
2040 wake_up_interruptible(&skdev->waitq);
2044 SKD_ASSERT("we didn't send this");
2049 *****************************************************************************
2051 *****************************************************************************
2054 static void skd_send_fitmsg(struct skd_device *skdev,
2055 struct skd_fitmsg_context *skmsg)
2058 struct fit_msg_hdr *fmh;
2060 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
2061 skmsg->mb_dma_address, skdev->in_flight);
2062 dev_dbg(&skdev->pdev->dev, "msg_buf 0x%p, offset %x\n", skmsg->msg_buf,
2065 qcmd = skmsg->mb_dma_address;
2066 qcmd |= FIT_QCMD_QID_NORMAL;
2068 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2069 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2071 if (unlikely(skdev->dbg_level > 1)) {
2072 u8 *bp = (u8 *)skmsg->msg_buf;
2074 for (i = 0; i < skmsg->length; i += 8) {
2075 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
2082 if (skmsg->length > 256)
2083 qcmd |= FIT_QCMD_MSGSIZE_512;
2084 else if (skmsg->length > 128)
2085 qcmd |= FIT_QCMD_MSGSIZE_256;
2086 else if (skmsg->length > 64)
2087 qcmd |= FIT_QCMD_MSGSIZE_128;
2090 * This makes no sense because the FIT msg header is
2091 * 64 bytes. If the msg is only 64 bytes long it has
2094 qcmd |= FIT_QCMD_MSGSIZE_64;
2096 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2099 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2102 static void skd_send_special_fitmsg(struct skd_device *skdev,
2103 struct skd_special_context *skspcl)
2107 if (unlikely(skdev->dbg_level > 1)) {
2108 u8 *bp = (u8 *)skspcl->msg_buf;
2111 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2112 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
2118 dev_dbg(&skdev->pdev->dev,
2119 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2120 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2121 skspcl->req.sksg_dma_address);
2122 for (i = 0; i < skspcl->req.n_sg; i++) {
2123 struct fit_sg_descriptor *sgd =
2124 &skspcl->req.sksg_list[i];
2126 dev_dbg(&skdev->pdev->dev,
2127 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
2128 i, sgd->byte_count, sgd->control,
2129 sgd->host_side_addr, sgd->next_desc_ptr);
2134 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2135 * and one 64-byte SSDI command.
2137 qcmd = skspcl->mb_dma_address;
2138 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2140 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2143 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2147 *****************************************************************************
2149 *****************************************************************************
2152 static void skd_complete_other(struct skd_device *skdev,
2153 volatile struct fit_completion_entry_v1 *skcomp,
2154 volatile struct fit_comp_error_info *skerr);
2163 enum skd_check_status_action action;
2166 static struct sns_info skd_chkstat_table[] = {
2168 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2169 SKD_CHECK_STATUS_REPORT_GOOD },
2172 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2173 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2174 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2175 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2176 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2177 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2179 /* Retry (with limits) */
2180 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2181 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2182 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2183 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2184 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2185 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2186 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2187 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2189 /* Busy (or about to be) */
2190 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2191 SKD_CHECK_STATUS_BUSY_IMMINENT },
2195 * Look up status and sense data to decide how to handle the error
2197 * mask says which fields must match e.g., mask=0x18 means check
2198 * type and stat, ignore key, asc, ascq.
2201 static enum skd_check_status_action
2202 skd_check_status(struct skd_device *skdev,
2203 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
2207 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2208 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2210 dev_dbg(&skdev->pdev->dev,
2211 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2212 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
2215 /* Does the info match an entry in the good category? */
2216 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2217 for (i = 0; i < n; i++) {
2218 struct sns_info *sns = &skd_chkstat_table[i];
2220 if (sns->mask & 0x10)
2221 if (skerr->type != sns->type)
2224 if (sns->mask & 0x08)
2225 if (cmp_status != sns->stat)
2228 if (sns->mask & 0x04)
2229 if (skerr->key != sns->key)
2232 if (sns->mask & 0x02)
2233 if (skerr->code != sns->asc)
2236 if (sns->mask & 0x01)
2237 if (skerr->qual != sns->ascq)
2240 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2241 dev_err(&skdev->pdev->dev,
2242 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
2243 skerr->key, skerr->code, skerr->qual);
2248 /* No other match, so nonzero status means error,
2249 * zero status means good
2252 dev_dbg(&skdev->pdev->dev, "status check: error\n");
2253 return SKD_CHECK_STATUS_REPORT_ERROR;
2256 dev_dbg(&skdev->pdev->dev, "status check good default\n");
2257 return SKD_CHECK_STATUS_REPORT_GOOD;
2260 static void skd_resolve_req_exception(struct skd_device *skdev,
2261 struct skd_request_context *skreq)
2263 u8 cmp_status = skreq->completion.status;
2265 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2266 case SKD_CHECK_STATUS_REPORT_GOOD:
2267 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2268 skd_end_request(skdev, skreq, BLK_STS_OK);
2271 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2272 skd_log_skreq(skdev, skreq, "retry(busy)");
2273 blk_requeue_request(skdev->queue, skreq->req);
2274 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
2275 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2276 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2277 skd_quiesce_dev(skdev);
2280 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2281 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2282 skd_log_skreq(skdev, skreq, "retry");
2283 blk_requeue_request(skdev->queue, skreq->req);
2288 case SKD_CHECK_STATUS_REPORT_ERROR:
2290 skd_end_request(skdev, skreq, BLK_STS_IOERR);
2295 /* assume spinlock is already held */
2296 static void skd_release_skreq(struct skd_device *skdev,
2297 struct skd_request_context *skreq)
2300 struct skd_fitmsg_context *skmsg;
2305 * Reclaim the FIT msg buffer if this is
2306 * the first of the requests it carried to
2307 * be completed. The FIT msg buffer used to
2308 * send this request cannot be reused until
2309 * we are sure the s1120 card has copied
2310 * it to its memory. The FIT msg might have
2311 * contained several requests. As soon as
2312 * any of them are completed we know that
2313 * the entire FIT msg was transferred.
2314 * Only the first completed request will
2315 * match the FIT msg buffer id. The FIT
2316 * msg buffer id is immediately updated.
2317 * When subsequent requests complete the FIT
2318 * msg buffer id won't match, so we know
2319 * quite cheaply that it is already done.
2321 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2322 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2324 skmsg = &skdev->skmsg_table[msg_slot];
2325 if (skmsg->id == skreq->fitmsg_id) {
2326 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2327 SKD_ASSERT(skmsg->outstanding > 0);
2328 skmsg->outstanding--;
2329 if (skmsg->outstanding == 0) {
2330 skmsg->state = SKD_MSG_STATE_IDLE;
2331 skmsg->id += SKD_ID_INCR;
2332 skmsg->next = skdev->skmsg_free_list;
2333 skdev->skmsg_free_list = skmsg;
2338 * Decrease the number of active requests.
2339 * Also decrements the count in the timeout slot.
2341 SKD_ASSERT(skdev->in_flight > 0);
2342 skdev->in_flight -= 1;
2344 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2345 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2346 skdev->timeout_slot[timo_slot] -= 1;
2354 * Reclaim the skd_request_context
2356 skreq->state = SKD_REQ_STATE_IDLE;
2357 skreq->id += SKD_ID_INCR;
2358 skreq->next = skdev->skreq_free_list;
2359 skdev->skreq_free_list = skreq;
2362 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2364 static void skd_do_inq_page_00(struct skd_device *skdev,
2365 volatile struct fit_completion_entry_v1 *skcomp,
2366 volatile struct fit_comp_error_info *skerr,
2367 uint8_t *cdb, uint8_t *buf)
2369 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2371 /* Caller requested "supported pages". The driver needs to insert
2374 dev_dbg(&skdev->pdev->dev,
2375 "skd_do_driver_inquiry: modify supported pages.\n");
2377 /* If the device rejected the request because the CDB was
2378 * improperly formed, then just leave.
2380 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2381 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2384 /* Get the amount of space the caller allocated */
2385 max_bytes = (cdb[3] << 8) | cdb[4];
2387 /* Get the number of pages actually returned by the device */
2388 drive_pages = (buf[2] << 8) | buf[3];
2389 drive_bytes = drive_pages + 4;
2390 new_size = drive_pages + 1;
2392 /* Supported pages must be in numerical order, so find where
2393 * the driver page needs to be inserted into the list of
2394 * pages returned by the device.
2396 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2397 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2398 return; /* Device using this page code. abort */
2399 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2403 if (insert_pt < max_bytes) {
2406 /* Shift everything up one byte to make room. */
2407 for (u = new_size + 3; u > insert_pt; u--)
2408 buf[u] = buf[u - 1];
2409 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2411 /* SCSI byte order increment of num_returned_bytes by 1 */
2412 skcomp->num_returned_bytes =
2413 cpu_to_be32(be32_to_cpu(skcomp->num_returned_bytes) + 1);
2416 /* update page length field to reflect the driver's page too */
2417 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2418 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2421 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2427 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2430 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2432 pci_bus_speed = linksta & 0xF;
2433 pci_lanes = (linksta & 0x3F0) >> 4;
2435 *speed = STEC_LINK_UNKNOWN;
2440 switch (pci_bus_speed) {
2442 *speed = STEC_LINK_2_5GTS;
2445 *speed = STEC_LINK_5GTS;
2448 *speed = STEC_LINK_8GTS;
2451 *speed = STEC_LINK_UNKNOWN;
2455 if (pci_lanes <= 0x20)
2461 static void skd_do_inq_page_da(struct skd_device *skdev,
2462 volatile struct fit_completion_entry_v1 *skcomp,
2463 volatile struct fit_comp_error_info *skerr,
2464 uint8_t *cdb, uint8_t *buf)
2466 struct pci_dev *pdev = skdev->pdev;
2468 struct driver_inquiry_data inq;
2471 dev_dbg(&skdev->pdev->dev, "skd_do_driver_inquiry: return driver page\n");
2473 memset(&inq, 0, sizeof(inq));
2475 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2477 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2478 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2479 inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2480 inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2482 pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2483 inq.pcie_vendor_id = cpu_to_be16(val);
2485 pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2486 inq.pcie_device_id = cpu_to_be16(val);
2488 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2489 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2491 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2492 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2494 /* Driver version, fixed lenth, padded with spaces on the right */
2495 inq.driver_version_length = sizeof(inq.driver_version);
2496 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2497 memcpy(inq.driver_version, DRV_VER_COMPL,
2498 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2500 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2502 /* Clear the error set by the device */
2503 skcomp->status = SAM_STAT_GOOD;
2504 memset((void *)skerr, 0, sizeof(*skerr));
2506 /* copy response into output buffer */
2507 max_bytes = (cdb[3] << 8) | cdb[4];
2508 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2510 skcomp->num_returned_bytes =
2511 cpu_to_be32(min_t(uint16_t, max_bytes, sizeof(inq)));
2514 static void skd_do_driver_inq(struct skd_device *skdev,
2515 volatile struct fit_completion_entry_v1 *skcomp,
2516 volatile struct fit_comp_error_info *skerr,
2517 uint8_t *cdb, uint8_t *buf)
2521 else if (cdb[0] != INQUIRY)
2522 return; /* Not an INQUIRY */
2523 else if ((cdb[1] & 1) == 0)
2524 return; /* EVPD not set */
2525 else if (cdb[2] == 0)
2526 /* Need to add driver's page to supported pages list */
2527 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2528 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2529 /* Caller requested driver's page */
2530 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2533 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2542 static void skd_process_scsi_inq(struct skd_device *skdev,
2543 volatile struct fit_completion_entry_v1
2545 volatile struct fit_comp_error_info *skerr,
2546 struct skd_special_context *skspcl)
2549 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2550 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2552 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2553 skspcl->req.sg_data_dir);
2554 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2557 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2560 static int skd_isr_completion_posted(struct skd_device *skdev,
2561 int limit, int *enqueued)
2563 volatile struct fit_completion_entry_v1 *skcmp = NULL;
2564 volatile struct fit_comp_error_info *skerr;
2567 struct skd_request_context *skreq;
2575 lockdep_assert_held(&skdev->lock);
2578 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2580 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2581 cmp_cycle = skcmp->cycle;
2582 cmp_cntxt = skcmp->tag;
2583 cmp_status = skcmp->status;
2584 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2586 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2588 dev_dbg(&skdev->pdev->dev,
2589 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
2590 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
2591 cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
2594 if (cmp_cycle != skdev->skcomp_cycle) {
2595 dev_dbg(&skdev->pdev->dev, "end of completions\n");
2599 * Update the completion queue head index and possibly
2600 * the completion cycle count. 8-bit wrap-around.
2603 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2604 skdev->skcomp_ix = 0;
2605 skdev->skcomp_cycle++;
2609 * The command context is a unique 32-bit ID. The low order
2610 * bits help locate the request. The request is usually a
2611 * r/w request (see skd_start() above) or a special request.
2614 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2616 /* Is this other than a r/w request? */
2617 if (req_slot >= skdev->num_req_context) {
2619 * This is not a completion for a r/w request.
2621 skd_complete_other(skdev, skcmp, skerr);
2625 skreq = &skdev->skreq_table[req_slot];
2628 * Make sure the request ID for the slot matches.
2630 if (skreq->id != req_id) {
2631 dev_dbg(&skdev->pdev->dev,
2632 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
2635 u16 new_id = cmp_cntxt;
2636 dev_err(&skdev->pdev->dev,
2637 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2638 req_id, skreq->id, new_id);
2644 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2646 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2647 dev_dbg(&skdev->pdev->dev, "reclaim req %p id=%04x\n",
2649 /* a previously timed out command can
2650 * now be cleaned up */
2651 skd_release_skreq(skdev, skreq);
2655 skreq->completion = *skcmp;
2656 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2657 skreq->err_info = *skerr;
2658 skd_log_check_status(skdev, cmp_status, skerr->key,
2659 skerr->code, skerr->qual,
2662 /* Release DMA resources for the request. */
2663 if (skreq->n_sg > 0)
2664 skd_postop_sg_list(skdev, skreq);
2667 dev_dbg(&skdev->pdev->dev,
2668 "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
2669 skreq, skreq->id, req_id);
2672 * Capture the outcome and post it back to the
2675 if (likely(cmp_status == SAM_STAT_GOOD))
2676 skd_end_request(skdev, skreq, BLK_STS_OK);
2678 skd_resolve_req_exception(skdev, skreq);
2682 * Release the skreq, its FIT msg (if one), timeout slot,
2685 skd_release_skreq(skdev, skreq);
2687 /* skd_isr_comp_limit equal zero means no limit */
2689 if (++processed >= limit) {
2696 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2697 && (skdev->in_flight) == 0) {
2698 skdev->state = SKD_DRVR_STATE_PAUSED;
2699 wake_up_interruptible(&skdev->waitq);
2705 static void skd_complete_other(struct skd_device *skdev,
2706 volatile struct fit_completion_entry_v1 *skcomp,
2707 volatile struct fit_comp_error_info *skerr)
2712 struct skd_special_context *skspcl;
2714 lockdep_assert_held(&skdev->lock);
2716 req_id = skcomp->tag;
2717 req_table = req_id & SKD_ID_TABLE_MASK;
2718 req_slot = req_id & SKD_ID_SLOT_MASK;
2720 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
2724 * Based on the request id, determine how to dispatch this completion.
2725 * This swich/case is finding the good cases and forwarding the
2726 * completion entry. Errors are reported below the switch.
2728 switch (req_table) {
2729 case SKD_ID_RW_REQUEST:
2731 * The caller, skd_isr_completion_posted() above,
2732 * handles r/w requests. The only way we get here
2733 * is if the req_slot is out of bounds.
2737 case SKD_ID_SPECIAL_REQUEST:
2739 * Make sure the req_slot is in bounds and that the id
2742 if (req_slot < skdev->n_special) {
2743 skspcl = &skdev->skspcl_table[req_slot];
2744 if (skspcl->req.id == req_id &&
2745 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2746 skd_complete_special(skdev,
2747 skcomp, skerr, skspcl);
2753 case SKD_ID_INTERNAL:
2754 if (req_slot == 0) {
2755 skspcl = &skdev->internal_skspcl;
2756 if (skspcl->req.id == req_id &&
2757 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2758 skd_complete_internal(skdev,
2759 skcomp, skerr, skspcl);
2765 case SKD_ID_FIT_MSG:
2767 * These id's should never appear in a completion record.
2773 * These id's should never appear anywhere;
2779 * If we get here it is a bad or stale id.
2783 static void skd_complete_special(struct skd_device *skdev,
2784 volatile struct fit_completion_entry_v1
2786 volatile struct fit_comp_error_info *skerr,
2787 struct skd_special_context *skspcl)
2789 lockdep_assert_held(&skdev->lock);
2791 dev_dbg(&skdev->pdev->dev, " completing special request %p\n", skspcl);
2792 if (skspcl->orphaned) {
2793 /* Discard orphaned request */
2794 /* ?: Can this release directly or does it need
2795 * to use a worker? */
2796 dev_dbg(&skdev->pdev->dev, "release orphaned %p\n", skspcl);
2797 skd_release_special(skdev, skspcl);
2801 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2803 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2804 skspcl->req.completion = *skcomp;
2805 skspcl->req.err_info = *skerr;
2807 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2808 skerr->code, skerr->qual, skerr->fruc);
2810 wake_up_interruptible(&skdev->waitq);
2813 /* assume spinlock is already held */
2814 static void skd_release_special(struct skd_device *skdev,
2815 struct skd_special_context *skspcl)
2817 int i, was_depleted;
2819 for (i = 0; i < skspcl->req.n_sg; i++) {
2820 struct page *page = sg_page(&skspcl->req.sg[i]);
2824 was_depleted = (skdev->skspcl_free_list == NULL);
2826 skspcl->req.state = SKD_REQ_STATE_IDLE;
2827 skspcl->req.id += SKD_ID_INCR;
2829 (struct skd_request_context *)skdev->skspcl_free_list;
2830 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2833 dev_dbg(&skdev->pdev->dev, "skspcl was depleted\n");
2834 /* Free list was depleted. Their might be waiters. */
2835 wake_up_interruptible(&skdev->waitq);
2839 static void skd_reset_skcomp(struct skd_device *skdev)
2841 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
2843 skdev->skcomp_ix = 0;
2844 skdev->skcomp_cycle = 1;
2848 *****************************************************************************
2850 *****************************************************************************
2852 static void skd_completion_worker(struct work_struct *work)
2854 struct skd_device *skdev =
2855 container_of(work, struct skd_device, completion_worker);
2856 unsigned long flags;
2857 int flush_enqueued = 0;
2859 spin_lock_irqsave(&skdev->lock, flags);
2862 * pass in limit=0, which means no limit..
2863 * process everything in compq
2865 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2866 skd_request_fn(skdev->queue);
2868 spin_unlock_irqrestore(&skdev->lock, flags);
2871 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2874 skd_isr(int irq, void *ptr)
2876 struct skd_device *skdev;
2881 int flush_enqueued = 0;
2883 skdev = (struct skd_device *)ptr;
2884 spin_lock(&skdev->lock);
2887 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2889 ack = FIT_INT_DEF_MASK;
2892 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
2895 /* As long as there is an int pending on device, keep
2896 * running loop. When none, get out, but if we've never
2897 * done any processing, call completion handler?
2900 /* No interrupts on device, but run the completion
2904 if (likely (skdev->state
2905 == SKD_DRVR_STATE_ONLINE))
2912 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2914 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2915 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2916 if (intstat & FIT_ISH_COMPLETION_POSTED) {
2918 * If we have already deferred completion
2919 * processing, don't bother running it again
2923 skd_isr_completion_posted(skdev,
2924 skd_isr_comp_limit, &flush_enqueued);
2927 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2928 skd_isr_fwstate(skdev);
2929 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2931 SKD_DRVR_STATE_DISAPPEARED) {
2932 spin_unlock(&skdev->lock);
2937 if (intstat & FIT_ISH_MSG_FROM_DEV)
2938 skd_isr_msg_from_dev(skdev);
2942 if (unlikely(flush_enqueued))
2943 skd_request_fn(skdev->queue);
2946 schedule_work(&skdev->completion_worker);
2947 else if (!flush_enqueued)
2948 skd_request_fn(skdev->queue);
2950 spin_unlock(&skdev->lock);
2955 static void skd_drive_fault(struct skd_device *skdev)
2957 skdev->state = SKD_DRVR_STATE_FAULT;
2958 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
2961 static void skd_drive_disappeared(struct skd_device *skdev)
2963 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2964 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
2967 static void skd_isr_fwstate(struct skd_device *skdev)
2972 int prev_driver_state = skdev->state;
2974 sense = SKD_READL(skdev, FIT_STATUS);
2975 state = sense & FIT_SR_DRIVE_STATE_MASK;
2977 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
2978 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2979 skd_drive_state_to_str(state), state);
2981 skdev->drive_state = state;
2983 switch (skdev->drive_state) {
2984 case FIT_SR_DRIVE_INIT:
2985 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2986 skd_disable_interrupts(skdev);
2989 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
2990 skd_recover_requests(skdev, 0);
2991 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2992 skdev->timer_countdown = SKD_STARTING_TIMO;
2993 skdev->state = SKD_DRVR_STATE_STARTING;
2994 skd_soft_reset(skdev);
2997 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2998 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2999 skdev->last_mtd = mtd;
3002 case FIT_SR_DRIVE_ONLINE:
3003 skdev->cur_max_queue_depth = skd_max_queue_depth;
3004 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3005 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3007 skdev->queue_low_water_mark =
3008 skdev->cur_max_queue_depth * 2 / 3 + 1;
3009 if (skdev->queue_low_water_mark < 1)
3010 skdev->queue_low_water_mark = 1;
3011 dev_info(&skdev->pdev->dev,
3012 "Queue depth limit=%d dev=%d lowat=%d\n",
3013 skdev->cur_max_queue_depth,
3014 skdev->dev_max_queue_depth,
3015 skdev->queue_low_water_mark);
3017 skd_refresh_device_data(skdev);
3020 case FIT_SR_DRIVE_BUSY:
3021 skdev->state = SKD_DRVR_STATE_BUSY;
3022 skdev->timer_countdown = SKD_BUSY_TIMO;
3023 skd_quiesce_dev(skdev);
3025 case FIT_SR_DRIVE_BUSY_SANITIZE:
3026 /* set timer for 3 seconds, we'll abort any unfinished
3027 * commands after that expires
3029 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3030 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3031 blk_start_queue(skdev->queue);
3033 case FIT_SR_DRIVE_BUSY_ERASE:
3034 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3035 skdev->timer_countdown = SKD_BUSY_TIMO;
3037 case FIT_SR_DRIVE_OFFLINE:
3038 skdev->state = SKD_DRVR_STATE_IDLE;
3040 case FIT_SR_DRIVE_SOFT_RESET:
3041 switch (skdev->state) {
3042 case SKD_DRVR_STATE_STARTING:
3043 case SKD_DRVR_STATE_RESTARTING:
3044 /* Expected by a caller of skd_soft_reset() */
3047 skdev->state = SKD_DRVR_STATE_RESTARTING;
3051 case FIT_SR_DRIVE_FW_BOOTING:
3052 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
3053 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3054 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3057 case FIT_SR_DRIVE_DEGRADED:
3058 case FIT_SR_PCIE_LINK_DOWN:
3059 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3062 case FIT_SR_DRIVE_FAULT:
3063 skd_drive_fault(skdev);
3064 skd_recover_requests(skdev, 0);
3065 blk_start_queue(skdev->queue);
3068 /* PCIe bus returned all Fs? */
3070 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
3072 skd_drive_disappeared(skdev);
3073 skd_recover_requests(skdev, 0);
3074 blk_start_queue(skdev->queue);
3078 * Uknown FW State. Wait for a state we recognize.
3082 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3083 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3084 skd_skdev_state_to_str(skdev->state), skdev->state);
3087 static void skd_recover_requests(struct skd_device *skdev, int requeue)
3091 for (i = 0; i < skdev->num_req_context; i++) {
3092 struct skd_request_context *skreq = &skdev->skreq_table[i];
3094 if (skreq->state == SKD_REQ_STATE_BUSY) {
3095 skd_log_skreq(skdev, skreq, "recover");
3097 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3098 SKD_ASSERT(skreq->req != NULL);
3100 /* Release DMA resources for the request. */
3101 if (skreq->n_sg > 0)
3102 skd_postop_sg_list(skdev, skreq);
3105 (unsigned long) ++skreq->req->special <
3107 blk_requeue_request(skdev->queue, skreq->req);
3109 skd_end_request(skdev, skreq, BLK_STS_IOERR);
3113 skreq->state = SKD_REQ_STATE_IDLE;
3114 skreq->id += SKD_ID_INCR;
3117 skreq[-1].next = skreq;
3120 skdev->skreq_free_list = skdev->skreq_table;
3122 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3123 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3125 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3126 skd_log_skmsg(skdev, skmsg, "salvaged");
3127 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3128 skmsg->state = SKD_MSG_STATE_IDLE;
3129 skmsg->id += SKD_ID_INCR;
3132 skmsg[-1].next = skmsg;
3135 skdev->skmsg_free_list = skdev->skmsg_table;
3137 for (i = 0; i < skdev->n_special; i++) {
3138 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3140 /* If orphaned, reclaim it because it has already been reported
3141 * to the process as an error (it was just waiting for
3142 * a completion that didn't come, and now it will never come)
3143 * If busy, change to a state that will cause it to error
3144 * out in the wait routine and let it do the normal
3145 * reporting and reclaiming
3147 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3148 if (skspcl->orphaned) {
3149 dev_dbg(&skdev->pdev->dev, "orphaned %p\n",
3151 skd_release_special(skdev, skspcl);
3153 dev_dbg(&skdev->pdev->dev, "not orphaned %p\n",
3155 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3159 skdev->skspcl_free_list = skdev->skspcl_table;
3161 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3162 skdev->timeout_slot[i] = 0;
3164 skdev->in_flight = 0;
3167 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3173 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3175 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
3178 /* ignore any mtd that is an ack for something we didn't send */
3179 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3182 switch (FIT_MXD_TYPE(mfd)) {
3183 case FIT_MTD_FITFW_INIT:
3184 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3186 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3187 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
3188 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
3189 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
3190 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
3191 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3192 skd_soft_reset(skdev);
3195 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3196 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3197 skdev->last_mtd = mtd;
3200 case FIT_MTD_GET_CMDQ_DEPTH:
3201 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3202 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3203 SKD_N_COMPLETION_ENTRY);
3204 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3205 skdev->last_mtd = mtd;
3208 case FIT_MTD_SET_COMPQ_DEPTH:
3209 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3210 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3211 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3212 skdev->last_mtd = mtd;
3215 case FIT_MTD_SET_COMPQ_ADDR:
3216 skd_reset_skcomp(skdev);
3217 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3218 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3219 skdev->last_mtd = mtd;
3222 case FIT_MTD_CMD_LOG_HOST_ID:
3223 skdev->connect_time_stamp = get_seconds();
3224 data = skdev->connect_time_stamp & 0xFFFF;
3225 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3226 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3227 skdev->last_mtd = mtd;
3230 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3231 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3232 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3233 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3234 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3235 skdev->last_mtd = mtd;
3238 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3239 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3240 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3241 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3242 skdev->last_mtd = mtd;
3244 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
3245 skdev->connect_time_stamp, skdev->drive_jiffies);
3248 case FIT_MTD_ARM_QUEUE:
3249 skdev->last_mtd = 0;
3251 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3260 static void skd_disable_interrupts(struct skd_device *skdev)
3264 sense = SKD_READL(skdev, FIT_CONTROL);
3265 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3266 SKD_WRITEL(skdev, sense, FIT_CONTROL);
3267 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
3269 /* Note that the 1s is written. A 1-bit means
3270 * disable, a 0 means enable.
3272 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3275 static void skd_enable_interrupts(struct skd_device *skdev)
3279 /* unmask interrupts first */
3280 val = FIT_ISH_FW_STATE_CHANGE +
3281 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3283 /* Note that the compliment of mask is written. A 1-bit means
3284 * disable, a 0 means enable. */
3285 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3286 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
3288 val = SKD_READL(skdev, FIT_CONTROL);
3289 val |= FIT_CR_ENABLE_INTERRUPTS;
3290 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
3291 SKD_WRITEL(skdev, val, FIT_CONTROL);
3295 *****************************************************************************
3296 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3297 *****************************************************************************
3300 static void skd_soft_reset(struct skd_device *skdev)
3304 val = SKD_READL(skdev, FIT_CONTROL);
3305 val |= (FIT_CR_SOFT_RESET);
3306 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
3307 SKD_WRITEL(skdev, val, FIT_CONTROL);
3310 static void skd_start_device(struct skd_device *skdev)
3312 unsigned long flags;
3316 spin_lock_irqsave(&skdev->lock, flags);
3318 /* ack all ghost interrupts */
3319 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3321 sense = SKD_READL(skdev, FIT_STATUS);
3323 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
3325 state = sense & FIT_SR_DRIVE_STATE_MASK;
3326 skdev->drive_state = state;
3327 skdev->last_mtd = 0;
3329 skdev->state = SKD_DRVR_STATE_STARTING;
3330 skdev->timer_countdown = SKD_STARTING_TIMO;
3332 skd_enable_interrupts(skdev);
3334 switch (skdev->drive_state) {
3335 case FIT_SR_DRIVE_OFFLINE:
3336 dev_err(&skdev->pdev->dev, "Drive offline...\n");
3339 case FIT_SR_DRIVE_FW_BOOTING:
3340 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
3341 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3342 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3345 case FIT_SR_DRIVE_BUSY_SANITIZE:
3346 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
3347 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3348 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3351 case FIT_SR_DRIVE_BUSY_ERASE:
3352 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
3353 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3354 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3357 case FIT_SR_DRIVE_INIT:
3358 case FIT_SR_DRIVE_ONLINE:
3359 skd_soft_reset(skdev);
3362 case FIT_SR_DRIVE_BUSY:
3363 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
3364 skdev->state = SKD_DRVR_STATE_BUSY;
3365 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3368 case FIT_SR_DRIVE_SOFT_RESET:
3369 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
3372 case FIT_SR_DRIVE_FAULT:
3373 /* Fault state is bad...soft reset won't do it...
3374 * Hard reset, maybe, but does it work on device?
3375 * For now, just fault so the system doesn't hang.
3377 skd_drive_fault(skdev);
3378 /*start the queue so we can respond with error to requests */
3379 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3380 blk_start_queue(skdev->queue);
3381 skdev->gendisk_on = -1;
3382 wake_up_interruptible(&skdev->waitq);
3386 /* Most likely the device isn't there or isn't responding
3387 * to the BAR1 addresses. */
3388 skd_drive_disappeared(skdev);
3389 /*start the queue so we can respond with error to requests */
3390 dev_dbg(&skdev->pdev->dev,
3391 "starting queue to error-out reqs\n");
3392 blk_start_queue(skdev->queue);
3393 skdev->gendisk_on = -1;
3394 wake_up_interruptible(&skdev->waitq);
3398 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
3399 skdev->drive_state);
3403 state = SKD_READL(skdev, FIT_CONTROL);
3404 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
3406 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3407 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
3409 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3410 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
3412 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3413 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
3415 state = SKD_READL(skdev, FIT_HW_VERSION);
3416 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
3418 spin_unlock_irqrestore(&skdev->lock, flags);
3421 static void skd_stop_device(struct skd_device *skdev)
3423 unsigned long flags;
3424 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3428 spin_lock_irqsave(&skdev->lock, flags);
3430 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3431 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
3435 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3436 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
3440 skdev->state = SKD_DRVR_STATE_SYNCING;
3441 skdev->sync_done = 0;
3443 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3445 spin_unlock_irqrestore(&skdev->lock, flags);
3447 wait_event_interruptible_timeout(skdev->waitq,
3448 (skdev->sync_done), (10 * HZ));
3450 spin_lock_irqsave(&skdev->lock, flags);
3452 switch (skdev->sync_done) {
3454 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
3457 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
3460 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
3464 skdev->state = SKD_DRVR_STATE_STOPPING;
3465 spin_unlock_irqrestore(&skdev->lock, flags);
3467 skd_kill_timer(skdev);
3469 spin_lock_irqsave(&skdev->lock, flags);
3470 skd_disable_interrupts(skdev);
3472 /* ensure all ints on device are cleared */
3473 /* soft reset the device to unload with a clean slate */
3474 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3475 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3477 spin_unlock_irqrestore(&skdev->lock, flags);
3479 /* poll every 100ms, 1 second timeout */
3480 for (i = 0; i < 10; i++) {
3482 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3483 if (dev_state == FIT_SR_DRIVE_INIT)
3485 set_current_state(TASK_INTERRUPTIBLE);
3486 schedule_timeout(msecs_to_jiffies(100));
3489 if (dev_state != FIT_SR_DRIVE_INIT)
3490 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
3494 /* assume spinlock is held */
3495 static void skd_restart_device(struct skd_device *skdev)
3499 /* ack all ghost interrupts */
3500 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3502 state = SKD_READL(skdev, FIT_STATUS);
3504 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
3506 state &= FIT_SR_DRIVE_STATE_MASK;
3507 skdev->drive_state = state;
3508 skdev->last_mtd = 0;
3510 skdev->state = SKD_DRVR_STATE_RESTARTING;
3511 skdev->timer_countdown = SKD_RESTARTING_TIMO;
3513 skd_soft_reset(skdev);
3516 /* assume spinlock is held */
3517 static int skd_quiesce_dev(struct skd_device *skdev)
3521 switch (skdev->state) {
3522 case SKD_DRVR_STATE_BUSY:
3523 case SKD_DRVR_STATE_BUSY_IMMINENT:
3524 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
3525 blk_stop_queue(skdev->queue);
3527 case SKD_DRVR_STATE_ONLINE:
3528 case SKD_DRVR_STATE_STOPPING:
3529 case SKD_DRVR_STATE_SYNCING:
3530 case SKD_DRVR_STATE_PAUSING:
3531 case SKD_DRVR_STATE_PAUSED:
3532 case SKD_DRVR_STATE_STARTING:
3533 case SKD_DRVR_STATE_RESTARTING:
3534 case SKD_DRVR_STATE_RESUMING:
3537 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
3543 /* assume spinlock is held */
3544 static int skd_unquiesce_dev(struct skd_device *skdev)
3546 int prev_driver_state = skdev->state;
3548 skd_log_skdev(skdev, "unquiesce");
3549 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3550 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
3553 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3555 * If there has been an state change to other than
3556 * ONLINE, we will rely on controller state change
3557 * to come back online and restart the queue.
3558 * The BUSY state means that driver is ready to
3559 * continue normal processing but waiting for controller
3560 * to become available.
3562 skdev->state = SKD_DRVR_STATE_BUSY;
3563 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
3568 * Drive has just come online, driver is either in startup,
3569 * paused performing a task, or bust waiting for hardware.
3571 switch (skdev->state) {
3572 case SKD_DRVR_STATE_PAUSED:
3573 case SKD_DRVR_STATE_BUSY:
3574 case SKD_DRVR_STATE_BUSY_IMMINENT:
3575 case SKD_DRVR_STATE_BUSY_ERASE:
3576 case SKD_DRVR_STATE_STARTING:
3577 case SKD_DRVR_STATE_RESTARTING:
3578 case SKD_DRVR_STATE_FAULT:
3579 case SKD_DRVR_STATE_IDLE:
3580 case SKD_DRVR_STATE_LOAD:
3581 skdev->state = SKD_DRVR_STATE_ONLINE;
3582 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3583 skd_skdev_state_to_str(prev_driver_state),
3584 prev_driver_state, skd_skdev_state_to_str(skdev->state),
3586 dev_dbg(&skdev->pdev->dev,
3587 "**** device ONLINE...starting block queue\n");
3588 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3589 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
3590 blk_start_queue(skdev->queue);
3591 skdev->gendisk_on = 1;
3592 wake_up_interruptible(&skdev->waitq);
3595 case SKD_DRVR_STATE_DISAPPEARED:
3597 dev_dbg(&skdev->pdev->dev,
3598 "**** driver state %d, not implemented\n",
3606 *****************************************************************************
3607 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3608 *****************************************************************************
3611 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3613 struct skd_device *skdev = skd_host_data;
3614 unsigned long flags;
3616 spin_lock_irqsave(&skdev->lock, flags);
3617 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3618 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3619 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
3620 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3621 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3622 spin_unlock_irqrestore(&skdev->lock, flags);
3626 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3628 struct skd_device *skdev = skd_host_data;
3629 unsigned long flags;
3631 spin_lock_irqsave(&skdev->lock, flags);
3632 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3633 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3634 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3635 skd_isr_fwstate(skdev);
3636 spin_unlock_irqrestore(&skdev->lock, flags);
3640 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3642 struct skd_device *skdev = skd_host_data;
3643 unsigned long flags;
3644 int flush_enqueued = 0;
3647 spin_lock_irqsave(&skdev->lock, flags);
3648 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3649 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3650 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3651 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3654 skd_request_fn(skdev->queue);
3657 schedule_work(&skdev->completion_worker);
3658 else if (!flush_enqueued)
3659 skd_request_fn(skdev->queue);
3661 spin_unlock_irqrestore(&skdev->lock, flags);
3666 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3668 struct skd_device *skdev = skd_host_data;
3669 unsigned long flags;
3671 spin_lock_irqsave(&skdev->lock, flags);
3672 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3673 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3674 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3675 skd_isr_msg_from_dev(skdev);
3676 spin_unlock_irqrestore(&skdev->lock, flags);
3680 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3682 struct skd_device *skdev = skd_host_data;
3683 unsigned long flags;
3685 spin_lock_irqsave(&skdev->lock, flags);
3686 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3687 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3688 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3689 spin_unlock_irqrestore(&skdev->lock, flags);
3694 *****************************************************************************
3695 * PCIe MSI/MSI-X SETUP
3696 *****************************************************************************
3699 struct skd_msix_entry {
3703 struct skd_init_msix_entry {
3705 irq_handler_t handler;
3708 #define SKD_MAX_MSIX_COUNT 13
3709 #define SKD_MIN_MSIX_COUNT 7
3710 #define SKD_BASE_MSIX_IRQ 4
3712 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3713 { "(DMA 0)", skd_reserved_isr },
3714 { "(DMA 1)", skd_reserved_isr },
3715 { "(DMA 2)", skd_reserved_isr },
3716 { "(DMA 3)", skd_reserved_isr },
3717 { "(State Change)", skd_statec_isr },
3718 { "(COMPL_Q)", skd_comp_q },
3719 { "(MSG)", skd_msg_isr },
3720 { "(Reserved)", skd_reserved_isr },
3721 { "(Reserved)", skd_reserved_isr },
3722 { "(Queue Full 0)", skd_qfull_isr },
3723 { "(Queue Full 1)", skd_qfull_isr },
3724 { "(Queue Full 2)", skd_qfull_isr },
3725 { "(Queue Full 3)", skd_qfull_isr },
3728 static int skd_acquire_msix(struct skd_device *skdev)
3731 struct pci_dev *pdev = skdev->pdev;
3733 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3736 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
3740 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3741 sizeof(struct skd_msix_entry), GFP_KERNEL);
3742 if (!skdev->msix_entries) {
3744 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
3748 /* Enable MSI-X vectors for the base queue */
3749 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3750 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3752 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3753 "%s%d-msix %s", DRV_NAME, skdev->devno,
3754 msix_entries[i].name);
3756 rc = devm_request_irq(&skdev->pdev->dev,
3757 pci_irq_vector(skdev->pdev, i),
3758 msix_entries[i].handler, 0,
3759 qentry->isr_name, skdev);
3761 dev_err(&skdev->pdev->dev,
3762 "Unable to register(%d) MSI-X handler %d: %s\n",
3763 rc, i, qentry->isr_name);
3768 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
3769 SKD_MAX_MSIX_COUNT);
3774 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3776 kfree(skdev->msix_entries);
3777 skdev->msix_entries = NULL;
3781 static int skd_acquire_irq(struct skd_device *skdev)
3783 struct pci_dev *pdev = skdev->pdev;
3784 unsigned int irq_flag = PCI_IRQ_LEGACY;
3787 if (skd_isr_type == SKD_IRQ_MSIX) {
3788 rc = skd_acquire_msix(skdev);
3792 dev_err(&skdev->pdev->dev,
3793 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
3796 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3799 if (skd_isr_type != SKD_IRQ_LEGACY)
3800 irq_flag |= PCI_IRQ_MSI;
3801 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3803 dev_err(&skdev->pdev->dev,
3804 "failed to allocate the MSI interrupt %d\n", rc);
3808 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3809 pdev->msi_enabled ? 0 : IRQF_SHARED,
3810 skdev->isr_name, skdev);
3812 pci_free_irq_vectors(pdev);
3813 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
3821 static void skd_release_irq(struct skd_device *skdev)
3823 struct pci_dev *pdev = skdev->pdev;
3825 if (skdev->msix_entries) {
3828 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3829 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3833 kfree(skdev->msix_entries);
3834 skdev->msix_entries = NULL;
3836 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3839 pci_free_irq_vectors(pdev);
3843 *****************************************************************************
3845 *****************************************************************************
3848 static int skd_cons_skcomp(struct skd_device *skdev)
3851 struct fit_completion_entry_v1 *skcomp;
3853 dev_dbg(&skdev->pdev->dev,
3854 "comp pci_alloc, total bytes %zd entries %d\n",
3855 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
3857 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
3858 &skdev->cq_dma_address);
3860 if (skcomp == NULL) {
3865 skdev->skcomp_table = skcomp;
3866 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3868 SKD_N_COMPLETION_ENTRY);
3874 static int skd_cons_skmsg(struct skd_device *skdev)
3879 dev_dbg(&skdev->pdev->dev,
3880 "skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3881 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
3882 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
3884 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
3885 *skdev->num_fitmsg_context, GFP_KERNEL);
3886 if (skdev->skmsg_table == NULL) {
3891 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3892 struct skd_fitmsg_context *skmsg;
3894 skmsg = &skdev->skmsg_table[i];
3896 skmsg->id = i + SKD_ID_FIT_MSG;
3898 skmsg->state = SKD_MSG_STATE_IDLE;
3899 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
3900 SKD_N_FITMSG_BYTES + 64,
3901 &skmsg->mb_dma_address);
3903 if (skmsg->msg_buf == NULL) {
3908 skmsg->offset = (u32)((u64)skmsg->msg_buf &
3909 (~FIT_QCMD_BASE_ADDRESS_MASK));
3910 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
3911 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
3912 FIT_QCMD_BASE_ADDRESS_MASK);
3913 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
3914 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
3915 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
3917 skmsg->next = &skmsg[1];
3920 /* Free list is in order starting with the 0th entry. */
3921 skdev->skmsg_table[i - 1].next = NULL;
3922 skdev->skmsg_free_list = skdev->skmsg_table;
3928 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
3930 dma_addr_t *ret_dma_addr)
3932 struct fit_sg_descriptor *sg_list;
3935 nbytes = sizeof(*sg_list) * n_sg;
3937 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
3939 if (sg_list != NULL) {
3940 uint64_t dma_address = *ret_dma_addr;
3943 memset(sg_list, 0, nbytes);
3945 for (i = 0; i < n_sg - 1; i++) {
3947 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
3949 sg_list[i].next_desc_ptr = dma_address + ndp_off;
3951 sg_list[i].next_desc_ptr = 0LL;
3957 static int skd_cons_skreq(struct skd_device *skdev)
3962 dev_dbg(&skdev->pdev->dev,
3963 "skreq_table kzalloc, struct %lu, count %u total %lu\n",
3964 sizeof(struct skd_request_context), skdev->num_req_context,
3965 sizeof(struct skd_request_context) * skdev->num_req_context);
3967 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
3968 * skdev->num_req_context, GFP_KERNEL);
3969 if (skdev->skreq_table == NULL) {
3974 dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
3975 skdev->sgs_per_request, sizeof(struct scatterlist),
3976 skdev->sgs_per_request * sizeof(struct scatterlist));
3978 for (i = 0; i < skdev->num_req_context; i++) {
3979 struct skd_request_context *skreq;
3981 skreq = &skdev->skreq_table[i];
3983 skreq->id = i + SKD_ID_RW_REQUEST;
3984 skreq->state = SKD_REQ_STATE_IDLE;
3986 skreq->sg = kzalloc(sizeof(struct scatterlist) *
3987 skdev->sgs_per_request, GFP_KERNEL);
3988 if (skreq->sg == NULL) {
3992 sg_init_table(skreq->sg, skdev->sgs_per_request);
3994 skreq->sksg_list = skd_cons_sg_list(skdev,
3995 skdev->sgs_per_request,
3996 &skreq->sksg_dma_address);
3998 if (skreq->sksg_list == NULL) {
4003 skreq->next = &skreq[1];
4006 /* Free list is in order starting with the 0th entry. */
4007 skdev->skreq_table[i - 1].next = NULL;
4008 skdev->skreq_free_list = skdev->skreq_table;
4014 static int skd_cons_skspcl(struct skd_device *skdev)
4019 dev_dbg(&skdev->pdev->dev,
4020 "skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4021 sizeof(struct skd_special_context), skdev->n_special,
4022 sizeof(struct skd_special_context) * skdev->n_special);
4024 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4025 * skdev->n_special, GFP_KERNEL);
4026 if (skdev->skspcl_table == NULL) {
4031 for (i = 0; i < skdev->n_special; i++) {
4032 struct skd_special_context *skspcl;
4034 skspcl = &skdev->skspcl_table[i];
4036 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4037 skspcl->req.state = SKD_REQ_STATE_IDLE;
4039 skspcl->req.next = &skspcl[1].req;
4041 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4044 pci_zalloc_consistent(skdev->pdev, nbytes,
4045 &skspcl->mb_dma_address);
4046 if (skspcl->msg_buf == NULL) {
4051 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4052 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4053 if (skspcl->req.sg == NULL) {
4058 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4059 SKD_N_SG_PER_SPECIAL,
4062 if (skspcl->req.sksg_list == NULL) {
4068 /* Free list is in order starting with the 0th entry. */
4069 skdev->skspcl_table[i - 1].req.next = NULL;
4070 skdev->skspcl_free_list = skdev->skspcl_table;
4078 static int skd_cons_sksb(struct skd_device *skdev)
4081 struct skd_special_context *skspcl;
4084 skspcl = &skdev->internal_skspcl;
4086 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4087 skspcl->req.state = SKD_REQ_STATE_IDLE;
4089 nbytes = SKD_N_INTERNAL_BYTES;
4091 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4092 &skspcl->db_dma_address);
4093 if (skspcl->data_buf == NULL) {
4098 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4099 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4100 &skspcl->mb_dma_address);
4101 if (skspcl->msg_buf == NULL) {
4106 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4107 &skspcl->req.sksg_dma_address);
4108 if (skspcl->req.sksg_list == NULL) {
4113 if (!skd_format_internal_skspcl(skdev)) {
4122 static int skd_cons_disk(struct skd_device *skdev)
4125 struct gendisk *disk;
4126 struct request_queue *q;
4127 unsigned long flags;
4129 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4136 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4138 disk->major = skdev->major;
4139 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4140 disk->fops = &skd_blockdev_ops;
4141 disk->private_data = skdev;
4143 q = blk_init_queue(skd_request_fn, &skdev->lock);
4148 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
4152 q->queuedata = skdev;
4154 blk_queue_write_cache(q, true, true);
4155 blk_queue_max_segments(q, skdev->sgs_per_request);
4156 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4158 /* set optimal I/O size to 8KB */
4159 blk_queue_io_opt(q, 8192);
4161 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4162 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4164 spin_lock_irqsave(&skdev->lock, flags);
4165 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
4166 blk_stop_queue(skdev->queue);
4167 spin_unlock_irqrestore(&skdev->lock, flags);
4173 #define SKD_N_DEV_TABLE 16u
4174 static u32 skd_next_devno;
4176 static struct skd_device *skd_construct(struct pci_dev *pdev)
4178 struct skd_device *skdev;
4179 int blk_major = skd_major;
4182 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4185 dev_err(&pdev->dev, "memory alloc failure\n");
4189 skdev->state = SKD_DRVR_STATE_LOAD;
4191 skdev->devno = skd_next_devno++;
4192 skdev->major = blk_major;
4193 skdev->dev_max_queue_depth = 0;
4195 skdev->num_req_context = skd_max_queue_depth;
4196 skdev->num_fitmsg_context = skd_max_queue_depth;
4197 skdev->n_special = skd_max_pass_thru;
4198 skdev->cur_max_queue_depth = 1;
4199 skdev->queue_low_water_mark = 1;
4200 skdev->proto_ver = 99;
4201 skdev->sgs_per_request = skd_sgs_per_request;
4202 skdev->dbg_level = skd_dbg_level;
4204 spin_lock_init(&skdev->lock);
4206 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4208 dev_dbg(&skdev->pdev->dev, "skcomp\n");
4209 rc = skd_cons_skcomp(skdev);
4213 dev_dbg(&skdev->pdev->dev, "skmsg\n");
4214 rc = skd_cons_skmsg(skdev);
4218 dev_dbg(&skdev->pdev->dev, "skreq\n");
4219 rc = skd_cons_skreq(skdev);
4223 dev_dbg(&skdev->pdev->dev, "skspcl\n");
4224 rc = skd_cons_skspcl(skdev);
4228 dev_dbg(&skdev->pdev->dev, "sksb\n");
4229 rc = skd_cons_sksb(skdev);
4233 dev_dbg(&skdev->pdev->dev, "disk\n");
4234 rc = skd_cons_disk(skdev);
4238 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
4242 dev_dbg(&skdev->pdev->dev, "construct failed\n");
4243 skd_destruct(skdev);
4248 *****************************************************************************
4250 *****************************************************************************
4253 static void skd_free_skcomp(struct skd_device *skdev)
4255 if (skdev->skcomp_table != NULL) {
4258 nbytes = sizeof(skdev->skcomp_table[0]) *
4259 SKD_N_COMPLETION_ENTRY;
4260 pci_free_consistent(skdev->pdev, nbytes,
4261 skdev->skcomp_table, skdev->cq_dma_address);
4264 skdev->skcomp_table = NULL;
4265 skdev->cq_dma_address = 0;
4268 static void skd_free_skmsg(struct skd_device *skdev)
4272 if (skdev->skmsg_table == NULL)
4275 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4276 struct skd_fitmsg_context *skmsg;
4278 skmsg = &skdev->skmsg_table[i];
4280 if (skmsg->msg_buf != NULL) {
4281 skmsg->msg_buf += skmsg->offset;
4282 skmsg->mb_dma_address += skmsg->offset;
4283 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4285 skmsg->mb_dma_address);
4287 skmsg->msg_buf = NULL;
4288 skmsg->mb_dma_address = 0;
4291 kfree(skdev->skmsg_table);
4292 skdev->skmsg_table = NULL;
4295 static void skd_free_sg_list(struct skd_device *skdev,
4296 struct fit_sg_descriptor *sg_list,
4297 u32 n_sg, dma_addr_t dma_addr)
4299 if (sg_list != NULL) {
4302 nbytes = sizeof(*sg_list) * n_sg;
4304 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4308 static void skd_free_skreq(struct skd_device *skdev)
4312 if (skdev->skreq_table == NULL)
4315 for (i = 0; i < skdev->num_req_context; i++) {
4316 struct skd_request_context *skreq;
4318 skreq = &skdev->skreq_table[i];
4320 skd_free_sg_list(skdev, skreq->sksg_list,
4321 skdev->sgs_per_request,
4322 skreq->sksg_dma_address);
4324 skreq->sksg_list = NULL;
4325 skreq->sksg_dma_address = 0;
4330 kfree(skdev->skreq_table);
4331 skdev->skreq_table = NULL;
4334 static void skd_free_skspcl(struct skd_device *skdev)
4339 if (skdev->skspcl_table == NULL)
4342 for (i = 0; i < skdev->n_special; i++) {
4343 struct skd_special_context *skspcl;
4345 skspcl = &skdev->skspcl_table[i];
4347 if (skspcl->msg_buf != NULL) {
4348 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4349 pci_free_consistent(skdev->pdev, nbytes,
4351 skspcl->mb_dma_address);
4354 skspcl->msg_buf = NULL;
4355 skspcl->mb_dma_address = 0;
4357 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4358 SKD_N_SG_PER_SPECIAL,
4359 skspcl->req.sksg_dma_address);
4361 skspcl->req.sksg_list = NULL;
4362 skspcl->req.sksg_dma_address = 0;
4364 kfree(skspcl->req.sg);
4367 kfree(skdev->skspcl_table);
4368 skdev->skspcl_table = NULL;
4371 static void skd_free_sksb(struct skd_device *skdev)
4373 struct skd_special_context *skspcl;
4376 skspcl = &skdev->internal_skspcl;
4378 if (skspcl->data_buf != NULL) {
4379 nbytes = SKD_N_INTERNAL_BYTES;
4381 pci_free_consistent(skdev->pdev, nbytes,
4382 skspcl->data_buf, skspcl->db_dma_address);
4385 skspcl->data_buf = NULL;
4386 skspcl->db_dma_address = 0;
4388 if (skspcl->msg_buf != NULL) {
4389 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4390 pci_free_consistent(skdev->pdev, nbytes,
4391 skspcl->msg_buf, skspcl->mb_dma_address);
4394 skspcl->msg_buf = NULL;
4395 skspcl->mb_dma_address = 0;
4397 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4398 skspcl->req.sksg_dma_address);
4400 skspcl->req.sksg_list = NULL;
4401 skspcl->req.sksg_dma_address = 0;
4404 static void skd_free_disk(struct skd_device *skdev)
4406 struct gendisk *disk = skdev->disk;
4408 if (disk && (disk->flags & GENHD_FL_UP))
4412 blk_cleanup_queue(skdev->queue);
4413 skdev->queue = NULL;
4421 static void skd_destruct(struct skd_device *skdev)
4426 dev_dbg(&skdev->pdev->dev, "disk\n");
4427 skd_free_disk(skdev);
4429 dev_dbg(&skdev->pdev->dev, "sksb\n");
4430 skd_free_sksb(skdev);
4432 dev_dbg(&skdev->pdev->dev, "skspcl\n");
4433 skd_free_skspcl(skdev);
4435 dev_dbg(&skdev->pdev->dev, "skreq\n");
4436 skd_free_skreq(skdev);
4438 dev_dbg(&skdev->pdev->dev, "skmsg\n");
4439 skd_free_skmsg(skdev);
4441 dev_dbg(&skdev->pdev->dev, "skcomp\n");
4442 skd_free_skcomp(skdev);
4444 dev_dbg(&skdev->pdev->dev, "skdev\n");
4449 *****************************************************************************
4450 * BLOCK DEVICE (BDEV) GLUE
4451 *****************************************************************************
4454 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4456 struct skd_device *skdev;
4459 skdev = bdev->bd_disk->private_data;
4461 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
4462 bdev->bd_disk->disk_name, current->comm);
4464 if (skdev->read_cap_is_valid) {
4465 capacity = get_capacity(skdev->disk);
4468 geo->cylinders = (capacity) / (255 * 64);
4475 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4477 dev_dbg(&skdev->pdev->dev, "add_disk\n");
4478 device_add_disk(parent, skdev->disk);
4482 static const struct block_device_operations skd_blockdev_ops = {
4483 .owner = THIS_MODULE,
4484 .ioctl = skd_bdev_ioctl,
4485 .getgeo = skd_bdev_getgeo,
4489 *****************************************************************************
4491 *****************************************************************************
4494 static const struct pci_device_id skd_pci_tbl[] = {
4495 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4496 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4497 { 0 } /* terminate list */
4500 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4502 static char *skd_pci_info(struct skd_device *skdev, char *str)
4506 strcpy(str, "PCIe (");
4507 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4512 uint16_t pcie_lstat, lspeed, lwidth;
4515 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4516 lspeed = pcie_lstat & (0xF);
4517 lwidth = (pcie_lstat & 0x3F0) >> 4;
4520 strcat(str, "2.5GT/s ");
4521 else if (lspeed == 2)
4522 strcat(str, "5.0GT/s ");
4524 strcat(str, "<unknown> ");
4525 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4531 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4536 struct skd_device *skdev;
4538 dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
4539 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4540 dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
4543 rc = pci_enable_device(pdev);
4546 rc = pci_request_regions(pdev, DRV_NAME);
4549 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4551 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4552 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4556 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4558 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
4559 goto err_out_regions;
4564 rc = register_blkdev(0, DRV_NAME);
4566 goto err_out_regions;
4571 skdev = skd_construct(pdev);
4572 if (skdev == NULL) {
4574 goto err_out_regions;
4577 skd_pci_info(skdev, pci_str);
4578 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
4580 pci_set_master(pdev);
4581 rc = pci_enable_pcie_error_reporting(pdev);
4584 "bad enable of PCIe error reporting rc=%d\n", rc);
4585 skdev->pcie_error_reporting_is_enabled = 0;
4587 skdev->pcie_error_reporting_is_enabled = 1;
4589 pci_set_drvdata(pdev, skdev);
4591 for (i = 0; i < SKD_MAX_BARS; i++) {
4592 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4593 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4594 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4595 skdev->mem_size[i]);
4596 if (!skdev->mem_map[i]) {
4598 "Unable to map adapter memory!\n");
4600 goto err_out_iounmap;
4602 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4603 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4604 skdev->mem_size[i]);
4607 rc = skd_acquire_irq(skdev);
4609 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
4610 goto err_out_iounmap;
4613 rc = skd_start_timer(skdev);
4617 init_waitqueue_head(&skdev->waitq);
4619 skd_start_device(skdev);
4621 rc = wait_event_interruptible_timeout(skdev->waitq,
4622 (skdev->gendisk_on),
4623 (SKD_START_WAIT_SECONDS * HZ));
4624 if (skdev->gendisk_on > 0) {
4625 /* device came on-line after reset */
4626 skd_bdev_attach(&pdev->dev, skdev);
4629 /* we timed out, something is wrong with the device,
4630 don't add the disk structure */
4631 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
4633 /* in case of no error; we timeout with ENXIO */
4642 skd_stop_device(skdev);
4643 skd_release_irq(skdev);
4646 for (i = 0; i < SKD_MAX_BARS; i++)
4647 if (skdev->mem_map[i])
4648 iounmap(skdev->mem_map[i]);
4650 if (skdev->pcie_error_reporting_is_enabled)
4651 pci_disable_pcie_error_reporting(pdev);
4653 skd_destruct(skdev);
4656 pci_release_regions(pdev);
4659 pci_disable_device(pdev);
4660 pci_set_drvdata(pdev, NULL);
4664 static void skd_pci_remove(struct pci_dev *pdev)
4667 struct skd_device *skdev;
4669 skdev = pci_get_drvdata(pdev);
4671 dev_err(&pdev->dev, "no device data for PCI\n");
4674 skd_stop_device(skdev);
4675 skd_release_irq(skdev);
4677 for (i = 0; i < SKD_MAX_BARS; i++)
4678 if (skdev->mem_map[i])
4679 iounmap(skdev->mem_map[i]);
4681 if (skdev->pcie_error_reporting_is_enabled)
4682 pci_disable_pcie_error_reporting(pdev);
4684 skd_destruct(skdev);
4686 pci_release_regions(pdev);
4687 pci_disable_device(pdev);
4688 pci_set_drvdata(pdev, NULL);
4693 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4696 struct skd_device *skdev;
4698 skdev = pci_get_drvdata(pdev);
4700 dev_err(&pdev->dev, "no device data for PCI\n");
4704 skd_stop_device(skdev);
4706 skd_release_irq(skdev);
4708 for (i = 0; i < SKD_MAX_BARS; i++)
4709 if (skdev->mem_map[i])
4710 iounmap(skdev->mem_map[i]);
4712 if (skdev->pcie_error_reporting_is_enabled)
4713 pci_disable_pcie_error_reporting(pdev);
4715 pci_release_regions(pdev);
4716 pci_save_state(pdev);
4717 pci_disable_device(pdev);
4718 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4722 static int skd_pci_resume(struct pci_dev *pdev)
4726 struct skd_device *skdev;
4728 skdev = pci_get_drvdata(pdev);
4730 dev_err(&pdev->dev, "no device data for PCI\n");
4734 pci_set_power_state(pdev, PCI_D0);
4735 pci_enable_wake(pdev, PCI_D0, 0);
4736 pci_restore_state(pdev);
4738 rc = pci_enable_device(pdev);
4741 rc = pci_request_regions(pdev, DRV_NAME);
4744 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4746 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4748 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4752 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4755 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
4756 goto err_out_regions;
4760 pci_set_master(pdev);
4761 rc = pci_enable_pcie_error_reporting(pdev);
4764 "bad enable of PCIe error reporting rc=%d\n", rc);
4765 skdev->pcie_error_reporting_is_enabled = 0;
4767 skdev->pcie_error_reporting_is_enabled = 1;
4769 for (i = 0; i < SKD_MAX_BARS; i++) {
4771 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4772 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4773 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4774 skdev->mem_size[i]);
4775 if (!skdev->mem_map[i]) {
4776 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
4778 goto err_out_iounmap;
4780 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4781 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4782 skdev->mem_size[i]);
4784 rc = skd_acquire_irq(skdev);
4786 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
4787 goto err_out_iounmap;
4790 rc = skd_start_timer(skdev);
4794 init_waitqueue_head(&skdev->waitq);
4796 skd_start_device(skdev);
4801 skd_stop_device(skdev);
4802 skd_release_irq(skdev);
4805 for (i = 0; i < SKD_MAX_BARS; i++)
4806 if (skdev->mem_map[i])
4807 iounmap(skdev->mem_map[i]);
4809 if (skdev->pcie_error_reporting_is_enabled)
4810 pci_disable_pcie_error_reporting(pdev);
4813 pci_release_regions(pdev);
4816 pci_disable_device(pdev);
4820 static void skd_pci_shutdown(struct pci_dev *pdev)
4822 struct skd_device *skdev;
4824 dev_err(&pdev->dev, "%s called\n", __func__);
4826 skdev = pci_get_drvdata(pdev);
4828 dev_err(&pdev->dev, "no device data for PCI\n");
4832 dev_err(&pdev->dev, "calling stop\n");
4833 skd_stop_device(skdev);
4836 static struct pci_driver skd_driver = {
4838 .id_table = skd_pci_tbl,
4839 .probe = skd_pci_probe,
4840 .remove = skd_pci_remove,
4841 .suspend = skd_pci_suspend,
4842 .resume = skd_pci_resume,
4843 .shutdown = skd_pci_shutdown,
4847 *****************************************************************************
4849 *****************************************************************************
4852 const char *skd_drive_state_to_str(int state)
4855 case FIT_SR_DRIVE_OFFLINE:
4857 case FIT_SR_DRIVE_INIT:
4859 case FIT_SR_DRIVE_ONLINE:
4861 case FIT_SR_DRIVE_BUSY:
4863 case FIT_SR_DRIVE_FAULT:
4865 case FIT_SR_DRIVE_DEGRADED:
4867 case FIT_SR_PCIE_LINK_DOWN:
4869 case FIT_SR_DRIVE_SOFT_RESET:
4870 return "SOFT_RESET";
4871 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
4873 case FIT_SR_DRIVE_INIT_FAULT:
4874 return "INIT_FAULT";
4875 case FIT_SR_DRIVE_BUSY_SANITIZE:
4876 return "BUSY_SANITIZE";
4877 case FIT_SR_DRIVE_BUSY_ERASE:
4878 return "BUSY_ERASE";
4879 case FIT_SR_DRIVE_FW_BOOTING:
4880 return "FW_BOOTING";
4886 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
4889 case SKD_DRVR_STATE_LOAD:
4891 case SKD_DRVR_STATE_IDLE:
4893 case SKD_DRVR_STATE_BUSY:
4895 case SKD_DRVR_STATE_STARTING:
4897 case SKD_DRVR_STATE_ONLINE:
4899 case SKD_DRVR_STATE_PAUSING:
4901 case SKD_DRVR_STATE_PAUSED:
4903 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
4904 return "DRAINING_TIMEOUT";
4905 case SKD_DRVR_STATE_RESTARTING:
4906 return "RESTARTING";
4907 case SKD_DRVR_STATE_RESUMING:
4909 case SKD_DRVR_STATE_STOPPING:
4911 case SKD_DRVR_STATE_SYNCING:
4913 case SKD_DRVR_STATE_FAULT:
4915 case SKD_DRVR_STATE_DISAPPEARED:
4916 return "DISAPPEARED";
4917 case SKD_DRVR_STATE_BUSY_ERASE:
4918 return "BUSY_ERASE";
4919 case SKD_DRVR_STATE_BUSY_SANITIZE:
4920 return "BUSY_SANITIZE";
4921 case SKD_DRVR_STATE_BUSY_IMMINENT:
4922 return "BUSY_IMMINENT";
4923 case SKD_DRVR_STATE_WAIT_BOOT:
4931 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
4934 case SKD_MSG_STATE_IDLE:
4936 case SKD_MSG_STATE_BUSY:
4943 static const char *skd_skreq_state_to_str(enum skd_req_state state)
4946 case SKD_REQ_STATE_IDLE:
4948 case SKD_REQ_STATE_SETUP:
4950 case SKD_REQ_STATE_BUSY:
4952 case SKD_REQ_STATE_COMPLETED:
4954 case SKD_REQ_STATE_TIMEOUT:
4956 case SKD_REQ_STATE_ABORTED:
4963 static void skd_log_skdev(struct skd_device *skdev, const char *event)
4965 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
4966 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
4967 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
4968 skd_skdev_state_to_str(skdev->state), skdev->state);
4969 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
4970 skdev->in_flight, skdev->cur_max_queue_depth,
4971 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
4972 dev_dbg(&skdev->pdev->dev, " timestamp=0x%x cycle=%d cycle_ix=%d\n",
4973 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
4976 static void skd_log_skmsg(struct skd_device *skdev,
4977 struct skd_fitmsg_context *skmsg, const char *event)
4979 dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
4980 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x length=%d\n",
4981 skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
4985 static void skd_log_skreq(struct skd_device *skdev,
4986 struct skd_request_context *skreq, const char *event)
4988 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
4989 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
4990 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
4992 dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
4993 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
4995 if (skreq->req != NULL) {
4996 struct request *req = skreq->req;
4997 u32 lba = (u32)blk_rq_pos(req);
4998 u32 count = blk_rq_sectors(req);
5000 dev_dbg(&skdev->pdev->dev,
5001 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
5002 lba, lba, count, count, (int)rq_data_dir(req));
5004 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
5008 *****************************************************************************
5010 *****************************************************************************
5013 static int __init skd_init(void)
5015 BUILD_BUG_ON(sizeof(struct fit_msg_hdr) + SKD_MAX_REQ_PER_MSG *
5016 sizeof(struct skd_scsi_request) != SKD_N_FITMSG_BYTES);
5018 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5020 switch (skd_isr_type) {
5021 case SKD_IRQ_LEGACY:
5026 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
5027 skd_isr_type, SKD_IRQ_DEFAULT);
5028 skd_isr_type = SKD_IRQ_DEFAULT;
5031 if (skd_max_queue_depth < 1 ||
5032 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5033 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
5034 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5035 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5038 if (skd_max_req_per_msg < 1 ||
5039 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
5040 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
5041 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5042 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5045 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5046 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
5047 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5048 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5051 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5052 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
5057 if (skd_isr_comp_limit < 0) {
5058 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
5059 skd_isr_comp_limit, 0);
5060 skd_isr_comp_limit = 0;
5063 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5064 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5065 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5066 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5069 return pci_register_driver(&skd_driver);
5072 static void __exit skd_exit(void)
5074 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5076 pci_unregister_driver(&skd_driver);
5079 unregister_blkdev(skd_major, DRV_NAME);
5082 module_init(skd_init);
5083 module_exit(skd_exit);