2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
5 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/compiler.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/time.h>
26 #include <linux/hdreg.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/completion.h>
29 #include <linux/scatterlist.h>
30 #include <linux/version.h>
31 #include <linux/err.h>
32 #include <linux/aer.h>
33 #include <linux/wait.h>
34 #include <linux/stringify.h>
35 #include <scsi/scsi.h>
38 #include <linux/uaccess.h>
39 #include <asm/unaligned.h>
41 #include "skd_s1120.h"
43 static int skd_dbg_level;
44 static int skd_isr_comp_limit = 4;
47 SKD_FLUSH_INITIALIZER,
48 SKD_FLUSH_ZERO_SIZE_FIRST,
49 SKD_FLUSH_DATA_SECOND,
52 #define SKD_ASSERT(expr) \
54 if (unlikely(!(expr))) { \
55 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
56 # expr, __FILE__, __func__, __LINE__); \
60 #define DRV_NAME "skd"
61 #define DRV_VERSION "2.2.1"
62 #define DRV_BUILD_ID "0260"
63 #define PFX DRV_NAME ": "
65 MODULE_LICENSE("GPL");
67 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
68 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
70 #define PCI_VENDOR_ID_STEC 0x1B39
71 #define PCI_DEVICE_ID_S1120 0x0001
73 #define SKD_FUA_NV (1 << 1)
74 #define SKD_MINORS_PER_DEVICE 16
76 #define SKD_MAX_QUEUE_DEPTH 200u
78 #define SKD_PAUSE_TIMEOUT (5 * 1000)
80 #define SKD_N_FITMSG_BYTES (512u)
81 #define SKD_MAX_REQ_PER_MSG 14
83 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
85 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
86 * 128KB limit. That allows 4096*4K = 16M xfer size
88 #define SKD_N_SG_PER_REQ_DEFAULT 256u
90 #define SKD_N_COMPLETION_ENTRY 256u
91 #define SKD_N_READ_CAP_BYTES (8u)
93 #define SKD_N_INTERNAL_BYTES (512u)
95 #define SKD_SKCOMP_SIZE \
96 ((sizeof(struct fit_completion_entry_v1) + \
97 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
99 /* 5 bits of uniqifier, 0xF800 */
100 #define SKD_ID_INCR (0x400)
101 #define SKD_ID_TABLE_MASK (3u << 8u)
102 #define SKD_ID_RW_REQUEST (0u << 8u)
103 #define SKD_ID_INTERNAL (1u << 8u)
104 #define SKD_ID_FIT_MSG (3u << 8u)
105 #define SKD_ID_SLOT_MASK 0x00FFu
106 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
108 #define SKD_N_TIMEOUT_SLOT 4u
109 #define SKD_TIMEOUT_SLOT_MASK 3u
111 #define SKD_N_MAX_SECTORS 2048u
113 #define SKD_MAX_RETRIES 2u
115 #define SKD_TIMER_SECONDS(seconds) (seconds)
116 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
118 #define INQ_STD_NBYTES 36
120 enum skd_drvr_state {
124 SKD_DRVR_STATE_STARTING,
125 SKD_DRVR_STATE_ONLINE,
126 SKD_DRVR_STATE_PAUSING,
127 SKD_DRVR_STATE_PAUSED,
128 SKD_DRVR_STATE_DRAINING_TIMEOUT,
129 SKD_DRVR_STATE_RESTARTING,
130 SKD_DRVR_STATE_RESUMING,
131 SKD_DRVR_STATE_STOPPING,
132 SKD_DRVR_STATE_FAULT,
133 SKD_DRVR_STATE_DISAPPEARED,
134 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
135 SKD_DRVR_STATE_BUSY_ERASE,
136 SKD_DRVR_STATE_BUSY_SANITIZE,
137 SKD_DRVR_STATE_BUSY_IMMINENT,
138 SKD_DRVR_STATE_WAIT_BOOT,
139 SKD_DRVR_STATE_SYNCING,
142 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
143 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
144 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
145 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
146 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
147 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
148 #define SKD_START_WAIT_SECONDS 90u
154 SKD_REQ_STATE_COMPLETED,
155 SKD_REQ_STATE_TIMEOUT,
158 enum skd_check_status_action {
159 SKD_CHECK_STATUS_REPORT_GOOD,
160 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
161 SKD_CHECK_STATUS_REQUEUE_REQUEST,
162 SKD_CHECK_STATUS_REPORT_ERROR,
163 SKD_CHECK_STATUS_BUSY_IMMINENT,
167 struct fit_msg_hdr fmh;
168 struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
171 struct skd_fitmsg_context {
176 struct skd_msg_buf *msg_buf;
177 dma_addr_t mb_dma_address;
180 struct skd_request_context {
181 enum skd_req_state state;
190 enum dma_data_direction data_dir;
191 struct scatterlist *sg;
195 struct fit_sg_descriptor *sksg_list;
196 dma_addr_t sksg_dma_address;
198 struct fit_completion_entry_v1 completion;
200 struct fit_comp_error_info err_info;
204 struct skd_special_context {
205 struct skd_request_context req;
208 dma_addr_t db_dma_address;
210 struct skd_msg_buf *msg_buf;
211 dma_addr_t mb_dma_address;
214 typedef enum skd_irq_type {
220 #define SKD_MAX_BARS 2
223 void __iomem *mem_map[SKD_MAX_BARS];
224 resource_size_t mem_phys[SKD_MAX_BARS];
225 u32 mem_size[SKD_MAX_BARS];
227 struct skd_msix_entry *msix_entries;
229 struct pci_dev *pdev;
230 int pcie_error_reporting_is_enabled;
233 struct gendisk *disk;
234 struct request_queue *queue;
235 struct device *class_dev;
243 enum skd_drvr_state state;
247 u32 cur_max_queue_depth;
248 u32 queue_low_water_mark;
249 u32 dev_max_queue_depth;
251 u32 num_fitmsg_context;
254 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
256 struct skd_fitmsg_context *skmsg_table;
258 struct skd_request_context *skreq_table;
260 struct skd_special_context internal_skspcl;
261 u32 read_cap_blocksize;
262 u32 read_cap_last_lba;
263 int read_cap_is_valid;
264 int inquiry_is_valid;
265 u8 inq_serial_num[13]; /*12 chars plus null term */
269 struct fit_completion_entry_v1 *skcomp_table;
270 struct fit_comp_error_info *skerr_table;
271 dma_addr_t cq_dma_address;
273 wait_queue_head_t waitq;
275 struct timer_list timer;
285 u32 connect_time_stamp;
287 #define SKD_MAX_CONNECT_RETRIES 16
292 struct work_struct completion_worker;
295 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
296 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
297 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
299 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
301 u32 val = readl(skdev->mem_map[1] + offset);
303 if (unlikely(skdev->dbg_level >= 2))
304 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
308 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
311 writel(val, skdev->mem_map[1] + offset);
312 if (unlikely(skdev->dbg_level >= 2))
313 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
316 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
319 writeq(val, skdev->mem_map[1] + offset);
320 if (unlikely(skdev->dbg_level >= 2))
321 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
326 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
327 static int skd_isr_type = SKD_IRQ_DEFAULT;
329 module_param(skd_isr_type, int, 0444);
330 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
331 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
333 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
334 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
336 module_param(skd_max_req_per_msg, int, 0444);
337 MODULE_PARM_DESC(skd_max_req_per_msg,
338 "Maximum SCSI requests packed in a single message."
339 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
341 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
342 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
343 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
345 module_param(skd_max_queue_depth, int, 0444);
346 MODULE_PARM_DESC(skd_max_queue_depth,
347 "Maximum SCSI requests issued to s1120."
348 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
350 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
351 module_param(skd_sgs_per_request, int, 0444);
352 MODULE_PARM_DESC(skd_sgs_per_request,
353 "Maximum SG elements per block request."
354 " (1-4096, default==256)");
356 static int skd_max_pass_thru = 1;
357 module_param(skd_max_pass_thru, int, 0444);
358 MODULE_PARM_DESC(skd_max_pass_thru,
359 "Maximum SCSI pass-thru at a time. IGNORED");
361 module_param(skd_dbg_level, int, 0444);
362 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
364 module_param(skd_isr_comp_limit, int, 0444);
365 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
367 /* Major device number dynamically assigned. */
368 static u32 skd_major;
370 static void skd_destruct(struct skd_device *skdev);
371 static const struct block_device_operations skd_blockdev_ops;
372 static void skd_send_fitmsg(struct skd_device *skdev,
373 struct skd_fitmsg_context *skmsg);
374 static void skd_send_special_fitmsg(struct skd_device *skdev,
375 struct skd_special_context *skspcl);
376 static void skd_request_fn(struct request_queue *rq);
377 static void skd_end_request(struct skd_device *skdev, struct request *req,
378 blk_status_t status);
379 static bool skd_preop_sg_list(struct skd_device *skdev,
380 struct skd_request_context *skreq);
381 static void skd_postop_sg_list(struct skd_device *skdev,
382 struct skd_request_context *skreq);
384 static void skd_restart_device(struct skd_device *skdev);
385 static int skd_quiesce_dev(struct skd_device *skdev);
386 static int skd_unquiesce_dev(struct skd_device *skdev);
387 static void skd_disable_interrupts(struct skd_device *skdev);
388 static void skd_isr_fwstate(struct skd_device *skdev);
389 static void skd_recover_requests(struct skd_device *skdev);
390 static void skd_soft_reset(struct skd_device *skdev);
392 const char *skd_drive_state_to_str(int state);
393 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
394 static void skd_log_skdev(struct skd_device *skdev, const char *event);
395 static void skd_log_skreq(struct skd_device *skdev,
396 struct skd_request_context *skreq, const char *event);
399 *****************************************************************************
400 * READ/WRITE REQUESTS
401 *****************************************************************************
403 static void skd_fail_all_pending(struct skd_device *skdev)
405 struct request_queue *q = skdev->queue;
409 req = blk_peek_request(q);
412 WARN_ON_ONCE(blk_queue_start_tag(q, req));
413 __blk_end_request_all(req, BLK_STS_IOERR);
418 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
419 int data_dir, unsigned lba,
422 if (data_dir == READ)
423 scsi_req->cdb[0] = READ_10;
425 scsi_req->cdb[0] = WRITE_10;
427 scsi_req->cdb[1] = 0;
428 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
429 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
430 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
431 scsi_req->cdb[5] = (lba & 0xff);
432 scsi_req->cdb[6] = 0;
433 scsi_req->cdb[7] = (count & 0xff00) >> 8;
434 scsi_req->cdb[8] = count & 0xff;
435 scsi_req->cdb[9] = 0;
439 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
440 struct skd_request_context *skreq)
442 skreq->flush_cmd = 1;
444 scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
445 scsi_req->cdb[1] = 0;
446 scsi_req->cdb[2] = 0;
447 scsi_req->cdb[3] = 0;
448 scsi_req->cdb[4] = 0;
449 scsi_req->cdb[5] = 0;
450 scsi_req->cdb[6] = 0;
451 scsi_req->cdb[7] = 0;
452 scsi_req->cdb[8] = 0;
453 scsi_req->cdb[9] = 0;
457 * Return true if and only if all pending requests should be failed.
459 static bool skd_fail_all(struct request_queue *q)
461 struct skd_device *skdev = q->queuedata;
463 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
465 skd_log_skdev(skdev, "req_not_online");
466 switch (skdev->state) {
467 case SKD_DRVR_STATE_PAUSING:
468 case SKD_DRVR_STATE_PAUSED:
469 case SKD_DRVR_STATE_STARTING:
470 case SKD_DRVR_STATE_RESTARTING:
471 case SKD_DRVR_STATE_WAIT_BOOT:
472 /* In case of starting, we haven't started the queue,
473 * so we can't get here... but requests are
474 * possibly hanging out waiting for us because we
475 * reported the dev/skd0 already. They'll wait
476 * forever if connect doesn't complete.
477 * What to do??? delay dev/skd0 ??
479 case SKD_DRVR_STATE_BUSY:
480 case SKD_DRVR_STATE_BUSY_IMMINENT:
481 case SKD_DRVR_STATE_BUSY_ERASE:
482 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
485 case SKD_DRVR_STATE_BUSY_SANITIZE:
486 case SKD_DRVR_STATE_STOPPING:
487 case SKD_DRVR_STATE_SYNCING:
488 case SKD_DRVR_STATE_FAULT:
489 case SKD_DRVR_STATE_DISAPPEARED:
495 static void skd_request_fn(struct request_queue *q)
497 struct skd_device *skdev = q->queuedata;
498 struct skd_fitmsg_context *skmsg = NULL;
499 struct fit_msg_hdr *fmh = NULL;
500 struct skd_request_context *skreq;
501 struct request *req = NULL;
502 struct skd_scsi_request *scsi_req;
503 unsigned long io_flags;
513 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
515 skd_fail_all_pending(skdev);
519 if (blk_queue_stopped(skdev->queue)) {
520 if (skdev->in_flight >= skdev->queue_low_water_mark)
521 /* There is still some kind of shortage */
524 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
529 * - There are no more native requests
530 * - There are already the maximum number of requests in progress
531 * - There are no more skd_request_context entries
532 * - There are no more FIT msg buffers
538 req = blk_peek_request(q);
540 /* Are there any native requests to start? */
544 lba = (u32)blk_rq_pos(req);
545 count = blk_rq_sectors(req);
546 data_dir = rq_data_dir(req);
547 io_flags = req->cmd_flags;
549 if (req_op(req) == REQ_OP_FLUSH)
552 if (io_flags & REQ_FUA)
555 dev_dbg(&skdev->pdev->dev,
556 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
557 req, lba, lba, count, count, data_dir);
559 /* At this point we know there is a request */
561 /* Are too many requets already in progress? */
562 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
563 dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
564 skdev->in_flight, skdev->cur_max_queue_depth);
569 * OK to now dequeue request from q.
571 * At this point we are comitted to either start or reject
572 * the native request. Note that skd_request_context is
573 * available but is still at the head of the free list.
575 WARN_ON_ONCE(blk_queue_start_tag(q, req));
577 tag = blk_mq_unique_tag(req);
578 WARN_ONCE(tag >= skd_max_queue_depth,
579 "%#x > %#x (nr_requests = %lu)\n", tag,
580 skd_max_queue_depth, q->nr_requests);
582 skreq = &skdev->skreq_table[tag];
583 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
584 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
586 skreq->id = tag + SKD_ID_RW_REQUEST;
587 skreq->flush_cmd = 0;
589 skreq->sg_byte_count = 0;
592 skreq->fitmsg_id = 0;
594 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE :
597 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
598 dev_dbg(&skdev->pdev->dev, "error Out\n");
599 skd_end_request(skdev, skreq->req, BLK_STS_RESOURCE);
603 /* Either a FIT msg is in progress or we have to start one. */
605 skmsg = &skdev->skmsg_table[tag];
607 /* Initialize the FIT msg header */
608 fmh = &skmsg->msg_buf->fmh;
609 memset(fmh, 0, sizeof(*fmh));
610 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
611 skmsg->length = sizeof(*fmh);
614 skreq->fitmsg_id = skmsg->id;
617 &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
618 memset(scsi_req, 0, sizeof(*scsi_req));
620 be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
621 cmdctxt = skreq->id + SKD_ID_INCR;
623 scsi_req->hdr.tag = cmdctxt;
624 scsi_req->hdr.sg_list_dma_address = be_dmaa;
626 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
627 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
628 SKD_ASSERT(skreq->flush_cmd == 1);
630 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
634 scsi_req->cdb[1] |= SKD_FUA_NV;
636 scsi_req->hdr.sg_list_len_bytes =
637 cpu_to_be32(skreq->sg_byte_count);
639 /* Complete resource allocations. */
640 skreq->state = SKD_REQ_STATE_BUSY;
641 skreq->id += SKD_ID_INCR;
643 skmsg->length += sizeof(struct skd_scsi_request);
644 fmh->num_protocol_cmds_coalesced++;
647 * Update the active request counts.
648 * Capture the timeout timestamp.
650 skreq->timeout_stamp = skdev->timeout_stamp;
651 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
652 skdev->timeout_slot[timo_slot]++;
654 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
658 * If the FIT msg buffer is full send it.
660 if (fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
661 skd_send_fitmsg(skdev, skmsg);
667 /* If the FIT msg buffer is not empty send what we got. */
669 WARN_ON_ONCE(!fmh->num_protocol_cmds_coalesced);
670 skd_send_fitmsg(skdev, skmsg);
676 * If req is non-NULL it means there is something to do but
677 * we are out of a resource.
680 blk_stop_queue(skdev->queue);
683 static void skd_end_request(struct skd_device *skdev, struct request *req,
686 if (unlikely(error)) {
687 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
688 u32 lba = (u32)blk_rq_pos(req);
689 u32 count = blk_rq_sectors(req);
691 dev_err(&skdev->pdev->dev,
692 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
695 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", req->tag,
698 __blk_end_request_all(req, error);
701 static bool skd_preop_sg_list(struct skd_device *skdev,
702 struct skd_request_context *skreq)
704 struct request *req = skreq->req;
705 struct scatterlist *sgl = &skreq->sg[0], *sg;
709 skreq->sg_byte_count = 0;
711 WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
712 skreq->data_dir != DMA_FROM_DEVICE);
714 n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
719 * Map scatterlist to PCI bus addresses.
720 * Note PCI might change the number of entries.
722 n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
726 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
730 for_each_sg(sgl, sg, n_sg, i) {
731 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
732 u32 cnt = sg_dma_len(sg);
733 uint64_t dma_addr = sg_dma_address(sg);
735 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
736 sgd->byte_count = cnt;
737 skreq->sg_byte_count += cnt;
738 sgd->host_side_addr = dma_addr;
739 sgd->dev_side_addr = 0;
742 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
743 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
745 if (unlikely(skdev->dbg_level > 1)) {
746 dev_dbg(&skdev->pdev->dev,
747 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
748 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
749 for (i = 0; i < n_sg; i++) {
750 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
752 dev_dbg(&skdev->pdev->dev,
753 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
754 i, sgd->byte_count, sgd->control,
755 sgd->host_side_addr, sgd->next_desc_ptr);
762 static void skd_postop_sg_list(struct skd_device *skdev,
763 struct skd_request_context *skreq)
766 * restore the next ptr for next IO request so we
767 * don't have to set it every time.
769 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
770 skreq->sksg_dma_address +
771 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
772 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
776 *****************************************************************************
778 *****************************************************************************
781 static void skd_timer_tick_not_online(struct skd_device *skdev);
783 static void skd_timer_tick(ulong arg)
785 struct skd_device *skdev = (struct skd_device *)arg;
788 unsigned long reqflags;
791 if (skdev->state == SKD_DRVR_STATE_FAULT)
792 /* The driver has declared fault, and we want it to
793 * stay that way until driver is reloaded.
797 spin_lock_irqsave(&skdev->lock, reqflags);
799 state = SKD_READL(skdev, FIT_STATUS);
800 state &= FIT_SR_DRIVE_STATE_MASK;
801 if (state != skdev->drive_state)
802 skd_isr_fwstate(skdev);
804 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
805 skd_timer_tick_not_online(skdev);
808 skdev->timeout_stamp++;
809 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
812 * All requests that happened during the previous use of
813 * this slot should be done by now. The previous use was
814 * over 7 seconds ago.
816 if (skdev->timeout_slot[timo_slot] == 0)
819 /* Something is overdue */
820 dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
821 skdev->timeout_slot[timo_slot], skdev->in_flight);
822 dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
823 skdev->timeout_slot[timo_slot], skdev->in_flight);
825 skdev->timer_countdown = SKD_DRAINING_TIMO;
826 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
827 skdev->timo_slot = timo_slot;
828 blk_stop_queue(skdev->queue);
831 mod_timer(&skdev->timer, (jiffies + HZ));
833 spin_unlock_irqrestore(&skdev->lock, reqflags);
836 static void skd_timer_tick_not_online(struct skd_device *skdev)
838 switch (skdev->state) {
839 case SKD_DRVR_STATE_IDLE:
840 case SKD_DRVR_STATE_LOAD:
842 case SKD_DRVR_STATE_BUSY_SANITIZE:
843 dev_dbg(&skdev->pdev->dev,
844 "drive busy sanitize[%x], driver[%x]\n",
845 skdev->drive_state, skdev->state);
846 /* If we've been in sanitize for 3 seconds, we figure we're not
847 * going to get anymore completions, so recover requests now
849 if (skdev->timer_countdown > 0) {
850 skdev->timer_countdown--;
853 skd_recover_requests(skdev);
856 case SKD_DRVR_STATE_BUSY:
857 case SKD_DRVR_STATE_BUSY_IMMINENT:
858 case SKD_DRVR_STATE_BUSY_ERASE:
859 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
860 skdev->state, skdev->timer_countdown);
861 if (skdev->timer_countdown > 0) {
862 skdev->timer_countdown--;
865 dev_dbg(&skdev->pdev->dev,
866 "busy[%x], timedout=%d, restarting device.",
867 skdev->state, skdev->timer_countdown);
868 skd_restart_device(skdev);
871 case SKD_DRVR_STATE_WAIT_BOOT:
872 case SKD_DRVR_STATE_STARTING:
873 if (skdev->timer_countdown > 0) {
874 skdev->timer_countdown--;
877 /* For now, we fault the drive. Could attempt resets to
878 * revcover at some point. */
879 skdev->state = SKD_DRVR_STATE_FAULT;
881 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
884 /*start the queue so we can respond with error to requests */
885 /* wakeup anyone waiting for startup complete */
886 blk_start_queue(skdev->queue);
887 skdev->gendisk_on = -1;
888 wake_up_interruptible(&skdev->waitq);
891 case SKD_DRVR_STATE_ONLINE:
892 /* shouldn't get here. */
895 case SKD_DRVR_STATE_PAUSING:
896 case SKD_DRVR_STATE_PAUSED:
899 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
900 dev_dbg(&skdev->pdev->dev,
901 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
902 skdev->timo_slot, skdev->timer_countdown,
904 skdev->timeout_slot[skdev->timo_slot]);
905 /* if the slot has cleared we can let the I/O continue */
906 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
907 dev_dbg(&skdev->pdev->dev,
908 "Slot drained, starting queue.\n");
909 skdev->state = SKD_DRVR_STATE_ONLINE;
910 blk_start_queue(skdev->queue);
913 if (skdev->timer_countdown > 0) {
914 skdev->timer_countdown--;
917 skd_restart_device(skdev);
920 case SKD_DRVR_STATE_RESTARTING:
921 if (skdev->timer_countdown > 0) {
922 skdev->timer_countdown--;
925 /* For now, we fault the drive. Could attempt resets to
926 * revcover at some point. */
927 skdev->state = SKD_DRVR_STATE_FAULT;
928 dev_err(&skdev->pdev->dev,
929 "DriveFault Reconnect Timeout (%x)\n",
933 * Recovering does two things:
934 * 1. completes IO with error
935 * 2. reclaims dma resources
936 * When is it safe to recover requests?
937 * - if the drive state is faulted
938 * - if the state is still soft reset after out timeout
939 * - if the drive registers are dead (state = FF)
940 * If it is "unsafe", we still need to recover, so we will
941 * disable pci bus mastering and disable our interrupts.
944 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
945 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
946 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
947 /* It never came out of soft reset. Try to
948 * recover the requests and then let them
949 * fail. This is to mitigate hung processes. */
950 skd_recover_requests(skdev);
952 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
954 pci_disable_device(skdev->pdev);
955 skd_disable_interrupts(skdev);
956 skd_recover_requests(skdev);
959 /*start the queue so we can respond with error to requests */
960 /* wakeup anyone waiting for startup complete */
961 blk_start_queue(skdev->queue);
962 skdev->gendisk_on = -1;
963 wake_up_interruptible(&skdev->waitq);
966 case SKD_DRVR_STATE_RESUMING:
967 case SKD_DRVR_STATE_STOPPING:
968 case SKD_DRVR_STATE_SYNCING:
969 case SKD_DRVR_STATE_FAULT:
970 case SKD_DRVR_STATE_DISAPPEARED:
976 static int skd_start_timer(struct skd_device *skdev)
980 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
982 rc = mod_timer(&skdev->timer, (jiffies + HZ));
984 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
988 static void skd_kill_timer(struct skd_device *skdev)
990 del_timer_sync(&skdev->timer);
994 *****************************************************************************
995 * INTERNAL REQUESTS -- generated by driver itself
996 *****************************************************************************
999 static int skd_format_internal_skspcl(struct skd_device *skdev)
1001 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1002 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1003 struct fit_msg_hdr *fmh;
1004 uint64_t dma_address;
1005 struct skd_scsi_request *scsi;
1007 fmh = &skspcl->msg_buf->fmh;
1008 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1009 fmh->num_protocol_cmds_coalesced = 1;
1011 scsi = &skspcl->msg_buf->scsi[0];
1012 memset(scsi, 0, sizeof(*scsi));
1013 dma_address = skspcl->req.sksg_dma_address;
1014 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1015 skspcl->req.n_sg = 1;
1016 sgd->control = FIT_SGD_CONTROL_LAST;
1017 sgd->byte_count = 0;
1018 sgd->host_side_addr = skspcl->db_dma_address;
1019 sgd->dev_side_addr = 0;
1020 sgd->next_desc_ptr = 0LL;
1025 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1027 static void skd_send_internal_skspcl(struct skd_device *skdev,
1028 struct skd_special_context *skspcl,
1031 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1032 struct skd_scsi_request *scsi;
1033 unsigned char *buf = skspcl->data_buf;
1036 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1038 * A refresh is already in progress.
1039 * Just wait for it to finish.
1043 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1044 skspcl->req.state = SKD_REQ_STATE_BUSY;
1045 skspcl->req.id += SKD_ID_INCR;
1047 scsi = &skspcl->msg_buf->scsi[0];
1048 scsi->hdr.tag = skspcl->req.id;
1050 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1053 case TEST_UNIT_READY:
1054 scsi->cdb[0] = TEST_UNIT_READY;
1055 sgd->byte_count = 0;
1056 scsi->hdr.sg_list_len_bytes = 0;
1060 scsi->cdb[0] = READ_CAPACITY;
1061 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1062 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1066 scsi->cdb[0] = INQUIRY;
1067 scsi->cdb[1] = 0x01; /* evpd */
1068 scsi->cdb[2] = 0x80; /* serial number page */
1069 scsi->cdb[4] = 0x10;
1070 sgd->byte_count = 16;
1071 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1074 case SYNCHRONIZE_CACHE:
1075 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1076 sgd->byte_count = 0;
1077 scsi->hdr.sg_list_len_bytes = 0;
1081 scsi->cdb[0] = WRITE_BUFFER;
1082 scsi->cdb[1] = 0x02;
1083 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1084 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1085 sgd->byte_count = WR_BUF_SIZE;
1086 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1087 /* fill incrementing byte pattern */
1088 for (i = 0; i < sgd->byte_count; i++)
1093 scsi->cdb[0] = READ_BUFFER;
1094 scsi->cdb[1] = 0x02;
1095 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1096 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1097 sgd->byte_count = WR_BUF_SIZE;
1098 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1099 memset(skspcl->data_buf, 0, sgd->byte_count);
1103 SKD_ASSERT("Don't know what to send");
1107 skd_send_special_fitmsg(skdev, skspcl);
1110 static void skd_refresh_device_data(struct skd_device *skdev)
1112 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1114 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1117 static int skd_chk_read_buf(struct skd_device *skdev,
1118 struct skd_special_context *skspcl)
1120 unsigned char *buf = skspcl->data_buf;
1123 /* check for incrementing byte pattern */
1124 for (i = 0; i < WR_BUF_SIZE; i++)
1125 if (buf[i] != (i & 0xFF))
1131 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1132 u8 code, u8 qual, u8 fruc)
1134 /* If the check condition is of special interest, log a message */
1135 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1136 && (code == 0x04) && (qual == 0x06)) {
1137 dev_err(&skdev->pdev->dev,
1138 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1139 key, code, qual, fruc);
1143 static void skd_complete_internal(struct skd_device *skdev,
1144 struct fit_completion_entry_v1 *skcomp,
1145 struct fit_comp_error_info *skerr,
1146 struct skd_special_context *skspcl)
1148 u8 *buf = skspcl->data_buf;
1151 struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
1153 lockdep_assert_held(&skdev->lock);
1155 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1157 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1159 skspcl->req.completion = *skcomp;
1160 skspcl->req.state = SKD_REQ_STATE_IDLE;
1161 skspcl->req.id += SKD_ID_INCR;
1163 status = skspcl->req.completion.status;
1165 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1166 skerr->qual, skerr->fruc);
1168 switch (scsi->cdb[0]) {
1169 case TEST_UNIT_READY:
1170 if (status == SAM_STAT_GOOD)
1171 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1172 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1173 (skerr->key == MEDIUM_ERROR))
1174 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1176 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1177 dev_dbg(&skdev->pdev->dev,
1178 "TUR failed, don't send anymore state 0x%x\n",
1182 dev_dbg(&skdev->pdev->dev,
1183 "**** TUR failed, retry skerr\n");
1184 skd_send_internal_skspcl(skdev, skspcl,
1190 if (status == SAM_STAT_GOOD)
1191 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1193 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1194 dev_dbg(&skdev->pdev->dev,
1195 "write buffer failed, don't send anymore state 0x%x\n",
1199 dev_dbg(&skdev->pdev->dev,
1200 "**** write buffer failed, retry skerr\n");
1201 skd_send_internal_skspcl(skdev, skspcl,
1207 if (status == SAM_STAT_GOOD) {
1208 if (skd_chk_read_buf(skdev, skspcl) == 0)
1209 skd_send_internal_skspcl(skdev, skspcl,
1212 dev_err(&skdev->pdev->dev,
1213 "*** W/R Buffer mismatch %d ***\n",
1214 skdev->connect_retries);
1215 if (skdev->connect_retries <
1216 SKD_MAX_CONNECT_RETRIES) {
1217 skdev->connect_retries++;
1218 skd_soft_reset(skdev);
1220 dev_err(&skdev->pdev->dev,
1221 "W/R Buffer Connect Error\n");
1227 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1228 dev_dbg(&skdev->pdev->dev,
1229 "read buffer failed, don't send anymore state 0x%x\n",
1233 dev_dbg(&skdev->pdev->dev,
1234 "**** read buffer failed, retry skerr\n");
1235 skd_send_internal_skspcl(skdev, skspcl,
1241 skdev->read_cap_is_valid = 0;
1242 if (status == SAM_STAT_GOOD) {
1243 skdev->read_cap_last_lba =
1244 (buf[0] << 24) | (buf[1] << 16) |
1245 (buf[2] << 8) | buf[3];
1246 skdev->read_cap_blocksize =
1247 (buf[4] << 24) | (buf[5] << 16) |
1248 (buf[6] << 8) | buf[7];
1250 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1251 skdev->read_cap_last_lba,
1252 skdev->read_cap_blocksize);
1254 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1256 skdev->read_cap_is_valid = 1;
1258 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1259 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1260 (skerr->key == MEDIUM_ERROR)) {
1261 skdev->read_cap_last_lba = ~0;
1262 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1263 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1264 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1266 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
1267 skd_send_internal_skspcl(skdev, skspcl,
1273 skdev->inquiry_is_valid = 0;
1274 if (status == SAM_STAT_GOOD) {
1275 skdev->inquiry_is_valid = 1;
1277 for (i = 0; i < 12; i++)
1278 skdev->inq_serial_num[i] = buf[i + 4];
1279 skdev->inq_serial_num[12] = 0;
1282 if (skd_unquiesce_dev(skdev) < 0)
1283 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
1284 /* connection is complete */
1285 skdev->connect_retries = 0;
1288 case SYNCHRONIZE_CACHE:
1289 if (status == SAM_STAT_GOOD)
1290 skdev->sync_done = 1;
1292 skdev->sync_done = -1;
1293 wake_up_interruptible(&skdev->waitq);
1297 SKD_ASSERT("we didn't send this");
1302 *****************************************************************************
1304 *****************************************************************************
1307 static void skd_send_fitmsg(struct skd_device *skdev,
1308 struct skd_fitmsg_context *skmsg)
1312 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
1313 skmsg->mb_dma_address, skdev->in_flight);
1314 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
1316 qcmd = skmsg->mb_dma_address;
1317 qcmd |= FIT_QCMD_QID_NORMAL;
1319 if (unlikely(skdev->dbg_level > 1)) {
1320 u8 *bp = (u8 *)skmsg->msg_buf;
1322 for (i = 0; i < skmsg->length; i += 8) {
1323 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1330 if (skmsg->length > 256)
1331 qcmd |= FIT_QCMD_MSGSIZE_512;
1332 else if (skmsg->length > 128)
1333 qcmd |= FIT_QCMD_MSGSIZE_256;
1334 else if (skmsg->length > 64)
1335 qcmd |= FIT_QCMD_MSGSIZE_128;
1338 * This makes no sense because the FIT msg header is
1339 * 64 bytes. If the msg is only 64 bytes long it has
1342 qcmd |= FIT_QCMD_MSGSIZE_64;
1344 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1347 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1350 static void skd_send_special_fitmsg(struct skd_device *skdev,
1351 struct skd_special_context *skspcl)
1355 if (unlikely(skdev->dbg_level > 1)) {
1356 u8 *bp = (u8 *)skspcl->msg_buf;
1359 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1360 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1366 dev_dbg(&skdev->pdev->dev,
1367 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
1368 skspcl, skspcl->req.id, skspcl->req.sksg_list,
1369 skspcl->req.sksg_dma_address);
1370 for (i = 0; i < skspcl->req.n_sg; i++) {
1371 struct fit_sg_descriptor *sgd =
1372 &skspcl->req.sksg_list[i];
1374 dev_dbg(&skdev->pdev->dev,
1375 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1376 i, sgd->byte_count, sgd->control,
1377 sgd->host_side_addr, sgd->next_desc_ptr);
1382 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1383 * and one 64-byte SSDI command.
1385 qcmd = skspcl->mb_dma_address;
1386 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1388 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1391 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1395 *****************************************************************************
1397 *****************************************************************************
1400 static void skd_complete_other(struct skd_device *skdev,
1401 struct fit_completion_entry_v1 *skcomp,
1402 struct fit_comp_error_info *skerr);
1411 enum skd_check_status_action action;
1414 static struct sns_info skd_chkstat_table[] = {
1416 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
1417 SKD_CHECK_STATUS_REPORT_GOOD },
1420 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1421 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1422 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1423 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1424 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
1425 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1427 /* Retry (with limits) */
1428 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
1429 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1430 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
1431 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1432 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
1433 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1434 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
1435 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1437 /* Busy (or about to be) */
1438 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
1439 SKD_CHECK_STATUS_BUSY_IMMINENT },
1443 * Look up status and sense data to decide how to handle the error
1445 * mask says which fields must match e.g., mask=0x18 means check
1446 * type and stat, ignore key, asc, ascq.
1449 static enum skd_check_status_action
1450 skd_check_status(struct skd_device *skdev,
1451 u8 cmp_status, struct fit_comp_error_info *skerr)
1455 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1456 skerr->key, skerr->code, skerr->qual, skerr->fruc);
1458 dev_dbg(&skdev->pdev->dev,
1459 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1460 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
1463 /* Does the info match an entry in the good category? */
1464 for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
1465 struct sns_info *sns = &skd_chkstat_table[i];
1467 if (sns->mask & 0x10)
1468 if (skerr->type != sns->type)
1471 if (sns->mask & 0x08)
1472 if (cmp_status != sns->stat)
1475 if (sns->mask & 0x04)
1476 if (skerr->key != sns->key)
1479 if (sns->mask & 0x02)
1480 if (skerr->code != sns->asc)
1483 if (sns->mask & 0x01)
1484 if (skerr->qual != sns->ascq)
1487 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1488 dev_err(&skdev->pdev->dev,
1489 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1490 skerr->key, skerr->code, skerr->qual);
1495 /* No other match, so nonzero status means error,
1496 * zero status means good
1499 dev_dbg(&skdev->pdev->dev, "status check: error\n");
1500 return SKD_CHECK_STATUS_REPORT_ERROR;
1503 dev_dbg(&skdev->pdev->dev, "status check good default\n");
1504 return SKD_CHECK_STATUS_REPORT_GOOD;
1507 static void skd_resolve_req_exception(struct skd_device *skdev,
1508 struct skd_request_context *skreq,
1509 struct request *req)
1511 u8 cmp_status = skreq->completion.status;
1513 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1514 case SKD_CHECK_STATUS_REPORT_GOOD:
1515 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
1516 skd_end_request(skdev, req, BLK_STS_OK);
1519 case SKD_CHECK_STATUS_BUSY_IMMINENT:
1520 skd_log_skreq(skdev, skreq, "retry(busy)");
1521 blk_requeue_request(skdev->queue, req);
1522 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
1523 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1524 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1525 skd_quiesce_dev(skdev);
1528 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
1529 if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
1530 skd_log_skreq(skdev, skreq, "retry");
1531 blk_requeue_request(skdev->queue, req);
1536 case SKD_CHECK_STATUS_REPORT_ERROR:
1538 skd_end_request(skdev, req, BLK_STS_IOERR);
1543 /* assume spinlock is already held */
1544 static void skd_release_skreq(struct skd_device *skdev,
1545 struct skd_request_context *skreq)
1550 * Decrease the number of active requests.
1551 * Also decrements the count in the timeout slot.
1553 SKD_ASSERT(skdev->in_flight > 0);
1554 skdev->in_flight -= 1;
1556 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1557 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
1558 skdev->timeout_slot[timo_slot] -= 1;
1566 * Reclaim the skd_request_context
1568 skreq->state = SKD_REQ_STATE_IDLE;
1569 skreq->id += SKD_ID_INCR;
1572 static struct skd_request_context *skd_skreq_from_rq(struct skd_device *skdev,
1575 struct skd_request_context *skreq;
1578 for (i = 0, skreq = skdev->skreq_table; i < skdev->num_fitmsg_context;
1580 if (skreq->req == rq)
1586 static int skd_isr_completion_posted(struct skd_device *skdev,
1587 int limit, int *enqueued)
1589 struct fit_completion_entry_v1 *skcmp;
1590 struct fit_comp_error_info *skerr;
1594 struct skd_request_context *skreq;
1602 lockdep_assert_held(&skdev->lock);
1605 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1607 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1608 cmp_cycle = skcmp->cycle;
1609 cmp_cntxt = skcmp->tag;
1610 cmp_status = skcmp->status;
1611 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1613 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1615 dev_dbg(&skdev->pdev->dev,
1616 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1617 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
1618 cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
1621 if (cmp_cycle != skdev->skcomp_cycle) {
1622 dev_dbg(&skdev->pdev->dev, "end of completions\n");
1626 * Update the completion queue head index and possibly
1627 * the completion cycle count. 8-bit wrap-around.
1630 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1631 skdev->skcomp_ix = 0;
1632 skdev->skcomp_cycle++;
1636 * The command context is a unique 32-bit ID. The low order
1637 * bits help locate the request. The request is usually a
1638 * r/w request (see skd_start() above) or a special request.
1641 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1643 /* Is this other than a r/w request? */
1644 if (tag >= skdev->num_req_context) {
1646 * This is not a completion for a r/w request.
1648 WARN_ON_ONCE(blk_map_queue_find_tag(skdev->queue->
1650 skd_complete_other(skdev, skcmp, skerr);
1654 rq = blk_map_queue_find_tag(skdev->queue->queue_tags, tag);
1655 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
1658 skreq = skd_skreq_from_rq(skdev, rq);
1661 * Make sure the request ID for the slot matches.
1663 if (skreq->id != req_id) {
1664 dev_dbg(&skdev->pdev->dev,
1665 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
1668 u16 new_id = cmp_cntxt;
1669 dev_err(&skdev->pdev->dev,
1670 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1671 req_id, skreq->id, new_id);
1677 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
1679 skreq->completion = *skcmp;
1680 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
1681 skreq->err_info = *skerr;
1682 skd_log_check_status(skdev, cmp_status, skerr->key,
1683 skerr->code, skerr->qual,
1686 /* Release DMA resources for the request. */
1687 if (skreq->n_sg > 0)
1688 skd_postop_sg_list(skdev, skreq);
1690 /* Mark the FIT msg and timeout slot as free. */
1691 skd_release_skreq(skdev, skreq);
1694 * Capture the outcome and post it back to the native request.
1696 if (likely(cmp_status == SAM_STAT_GOOD))
1697 skd_end_request(skdev, rq, BLK_STS_OK);
1699 skd_resolve_req_exception(skdev, skreq, rq);
1701 /* skd_isr_comp_limit equal zero means no limit */
1703 if (++processed >= limit) {
1710 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
1711 && (skdev->in_flight) == 0) {
1712 skdev->state = SKD_DRVR_STATE_PAUSED;
1713 wake_up_interruptible(&skdev->waitq);
1719 static void skd_complete_other(struct skd_device *skdev,
1720 struct fit_completion_entry_v1 *skcomp,
1721 struct fit_comp_error_info *skerr)
1726 struct skd_special_context *skspcl;
1728 lockdep_assert_held(&skdev->lock);
1730 req_id = skcomp->tag;
1731 req_table = req_id & SKD_ID_TABLE_MASK;
1732 req_slot = req_id & SKD_ID_SLOT_MASK;
1734 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1738 * Based on the request id, determine how to dispatch this completion.
1739 * This swich/case is finding the good cases and forwarding the
1740 * completion entry. Errors are reported below the switch.
1742 switch (req_table) {
1743 case SKD_ID_RW_REQUEST:
1745 * The caller, skd_isr_completion_posted() above,
1746 * handles r/w requests. The only way we get here
1747 * is if the req_slot is out of bounds.
1751 case SKD_ID_INTERNAL:
1752 if (req_slot == 0) {
1753 skspcl = &skdev->internal_skspcl;
1754 if (skspcl->req.id == req_id &&
1755 skspcl->req.state == SKD_REQ_STATE_BUSY) {
1756 skd_complete_internal(skdev,
1757 skcomp, skerr, skspcl);
1763 case SKD_ID_FIT_MSG:
1765 * These id's should never appear in a completion record.
1771 * These id's should never appear anywhere;
1777 * If we get here it is a bad or stale id.
1781 static void skd_reset_skcomp(struct skd_device *skdev)
1783 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
1785 skdev->skcomp_ix = 0;
1786 skdev->skcomp_cycle = 1;
1790 *****************************************************************************
1792 *****************************************************************************
1794 static void skd_completion_worker(struct work_struct *work)
1796 struct skd_device *skdev =
1797 container_of(work, struct skd_device, completion_worker);
1798 unsigned long flags;
1799 int flush_enqueued = 0;
1801 spin_lock_irqsave(&skdev->lock, flags);
1804 * pass in limit=0, which means no limit..
1805 * process everything in compq
1807 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
1808 blk_run_queue_async(skdev->queue);
1810 spin_unlock_irqrestore(&skdev->lock, flags);
1813 static void skd_isr_msg_from_dev(struct skd_device *skdev);
1816 skd_isr(int irq, void *ptr)
1818 struct skd_device *skdev = ptr;
1823 int flush_enqueued = 0;
1825 spin_lock(&skdev->lock);
1828 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1830 ack = FIT_INT_DEF_MASK;
1833 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1836 /* As long as there is an int pending on device, keep
1837 * running loop. When none, get out, but if we've never
1838 * done any processing, call completion handler?
1841 /* No interrupts on device, but run the completion
1845 if (likely (skdev->state
1846 == SKD_DRVR_STATE_ONLINE))
1853 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1855 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1856 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1857 if (intstat & FIT_ISH_COMPLETION_POSTED) {
1859 * If we have already deferred completion
1860 * processing, don't bother running it again
1864 skd_isr_completion_posted(skdev,
1865 skd_isr_comp_limit, &flush_enqueued);
1868 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
1869 skd_isr_fwstate(skdev);
1870 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1872 SKD_DRVR_STATE_DISAPPEARED) {
1873 spin_unlock(&skdev->lock);
1878 if (intstat & FIT_ISH_MSG_FROM_DEV)
1879 skd_isr_msg_from_dev(skdev);
1883 if (unlikely(flush_enqueued))
1884 blk_run_queue_async(skdev->queue);
1887 schedule_work(&skdev->completion_worker);
1888 else if (!flush_enqueued)
1889 blk_run_queue_async(skdev->queue);
1891 spin_unlock(&skdev->lock);
1896 static void skd_drive_fault(struct skd_device *skdev)
1898 skdev->state = SKD_DRVR_STATE_FAULT;
1899 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
1902 static void skd_drive_disappeared(struct skd_device *skdev)
1904 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
1905 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
1908 static void skd_isr_fwstate(struct skd_device *skdev)
1913 int prev_driver_state = skdev->state;
1915 sense = SKD_READL(skdev, FIT_STATUS);
1916 state = sense & FIT_SR_DRIVE_STATE_MASK;
1918 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1919 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1920 skd_drive_state_to_str(state), state);
1922 skdev->drive_state = state;
1924 switch (skdev->drive_state) {
1925 case FIT_SR_DRIVE_INIT:
1926 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1927 skd_disable_interrupts(skdev);
1930 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
1931 skd_recover_requests(skdev);
1932 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1933 skdev->timer_countdown = SKD_STARTING_TIMO;
1934 skdev->state = SKD_DRVR_STATE_STARTING;
1935 skd_soft_reset(skdev);
1938 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
1939 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1940 skdev->last_mtd = mtd;
1943 case FIT_SR_DRIVE_ONLINE:
1944 skdev->cur_max_queue_depth = skd_max_queue_depth;
1945 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1946 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1948 skdev->queue_low_water_mark =
1949 skdev->cur_max_queue_depth * 2 / 3 + 1;
1950 if (skdev->queue_low_water_mark < 1)
1951 skdev->queue_low_water_mark = 1;
1952 dev_info(&skdev->pdev->dev,
1953 "Queue depth limit=%d dev=%d lowat=%d\n",
1954 skdev->cur_max_queue_depth,
1955 skdev->dev_max_queue_depth,
1956 skdev->queue_low_water_mark);
1958 skd_refresh_device_data(skdev);
1961 case FIT_SR_DRIVE_BUSY:
1962 skdev->state = SKD_DRVR_STATE_BUSY;
1963 skdev->timer_countdown = SKD_BUSY_TIMO;
1964 skd_quiesce_dev(skdev);
1966 case FIT_SR_DRIVE_BUSY_SANITIZE:
1967 /* set timer for 3 seconds, we'll abort any unfinished
1968 * commands after that expires
1970 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1971 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1972 blk_start_queue(skdev->queue);
1974 case FIT_SR_DRIVE_BUSY_ERASE:
1975 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1976 skdev->timer_countdown = SKD_BUSY_TIMO;
1978 case FIT_SR_DRIVE_OFFLINE:
1979 skdev->state = SKD_DRVR_STATE_IDLE;
1981 case FIT_SR_DRIVE_SOFT_RESET:
1982 switch (skdev->state) {
1983 case SKD_DRVR_STATE_STARTING:
1984 case SKD_DRVR_STATE_RESTARTING:
1985 /* Expected by a caller of skd_soft_reset() */
1988 skdev->state = SKD_DRVR_STATE_RESTARTING;
1992 case FIT_SR_DRIVE_FW_BOOTING:
1993 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
1994 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1995 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1998 case FIT_SR_DRIVE_DEGRADED:
1999 case FIT_SR_PCIE_LINK_DOWN:
2000 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
2003 case FIT_SR_DRIVE_FAULT:
2004 skd_drive_fault(skdev);
2005 skd_recover_requests(skdev);
2006 blk_start_queue(skdev->queue);
2009 /* PCIe bus returned all Fs? */
2011 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
2013 skd_drive_disappeared(skdev);
2014 skd_recover_requests(skdev);
2015 blk_start_queue(skdev->queue);
2019 * Uknown FW State. Wait for a state we recognize.
2023 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2024 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
2025 skd_skdev_state_to_str(skdev->state), skdev->state);
2028 static void skd_recover_requests(struct skd_device *skdev)
2032 for (i = 0; i < skdev->num_req_context; i++) {
2033 struct skd_request_context *skreq = &skdev->skreq_table[i];
2034 struct request *req = skreq->req;
2036 if (skreq->state == SKD_REQ_STATE_BUSY) {
2037 skd_log_skreq(skdev, skreq, "recover");
2039 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
2040 SKD_ASSERT(req != NULL);
2042 /* Release DMA resources for the request. */
2043 if (skreq->n_sg > 0)
2044 skd_postop_sg_list(skdev, skreq);
2048 skreq->state = SKD_REQ_STATE_IDLE;
2049 skreq->id += SKD_ID_INCR;
2051 skd_end_request(skdev, req, BLK_STS_IOERR);
2055 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
2056 skdev->timeout_slot[i] = 0;
2058 skdev->in_flight = 0;
2061 static void skd_isr_msg_from_dev(struct skd_device *skdev)
2067 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2069 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
2072 /* ignore any mtd that is an ack for something we didn't send */
2073 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
2076 switch (FIT_MXD_TYPE(mfd)) {
2077 case FIT_MTD_FITFW_INIT:
2078 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
2080 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
2081 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
2082 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
2083 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
2084 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
2085 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
2086 skd_soft_reset(skdev);
2089 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
2090 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2091 skdev->last_mtd = mtd;
2094 case FIT_MTD_GET_CMDQ_DEPTH:
2095 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
2096 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
2097 SKD_N_COMPLETION_ENTRY);
2098 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2099 skdev->last_mtd = mtd;
2102 case FIT_MTD_SET_COMPQ_DEPTH:
2103 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
2104 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
2105 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2106 skdev->last_mtd = mtd;
2109 case FIT_MTD_SET_COMPQ_ADDR:
2110 skd_reset_skcomp(skdev);
2111 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
2112 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2113 skdev->last_mtd = mtd;
2116 case FIT_MTD_CMD_LOG_HOST_ID:
2117 skdev->connect_time_stamp = get_seconds();
2118 data = skdev->connect_time_stamp & 0xFFFF;
2119 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
2120 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2121 skdev->last_mtd = mtd;
2124 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
2125 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
2126 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
2127 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
2128 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2129 skdev->last_mtd = mtd;
2132 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
2133 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
2134 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
2135 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2136 skdev->last_mtd = mtd;
2138 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
2139 skdev->connect_time_stamp, skdev->drive_jiffies);
2142 case FIT_MTD_ARM_QUEUE:
2143 skdev->last_mtd = 0;
2145 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2154 static void skd_disable_interrupts(struct skd_device *skdev)
2158 sense = SKD_READL(skdev, FIT_CONTROL);
2159 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2160 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2161 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
2163 /* Note that the 1s is written. A 1-bit means
2164 * disable, a 0 means enable.
2166 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2169 static void skd_enable_interrupts(struct skd_device *skdev)
2173 /* unmask interrupts first */
2174 val = FIT_ISH_FW_STATE_CHANGE +
2175 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
2177 /* Note that the compliment of mask is written. A 1-bit means
2178 * disable, a 0 means enable. */
2179 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2180 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
2182 val = SKD_READL(skdev, FIT_CONTROL);
2183 val |= FIT_CR_ENABLE_INTERRUPTS;
2184 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2185 SKD_WRITEL(skdev, val, FIT_CONTROL);
2189 *****************************************************************************
2190 * START, STOP, RESTART, QUIESCE, UNQUIESCE
2191 *****************************************************************************
2194 static void skd_soft_reset(struct skd_device *skdev)
2198 val = SKD_READL(skdev, FIT_CONTROL);
2199 val |= (FIT_CR_SOFT_RESET);
2200 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2201 SKD_WRITEL(skdev, val, FIT_CONTROL);
2204 static void skd_start_device(struct skd_device *skdev)
2206 unsigned long flags;
2210 spin_lock_irqsave(&skdev->lock, flags);
2212 /* ack all ghost interrupts */
2213 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2215 sense = SKD_READL(skdev, FIT_STATUS);
2217 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
2219 state = sense & FIT_SR_DRIVE_STATE_MASK;
2220 skdev->drive_state = state;
2221 skdev->last_mtd = 0;
2223 skdev->state = SKD_DRVR_STATE_STARTING;
2224 skdev->timer_countdown = SKD_STARTING_TIMO;
2226 skd_enable_interrupts(skdev);
2228 switch (skdev->drive_state) {
2229 case FIT_SR_DRIVE_OFFLINE:
2230 dev_err(&skdev->pdev->dev, "Drive offline...\n");
2233 case FIT_SR_DRIVE_FW_BOOTING:
2234 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
2235 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2236 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2239 case FIT_SR_DRIVE_BUSY_SANITIZE:
2240 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
2241 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2242 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2245 case FIT_SR_DRIVE_BUSY_ERASE:
2246 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
2247 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2248 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2251 case FIT_SR_DRIVE_INIT:
2252 case FIT_SR_DRIVE_ONLINE:
2253 skd_soft_reset(skdev);
2256 case FIT_SR_DRIVE_BUSY:
2257 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
2258 skdev->state = SKD_DRVR_STATE_BUSY;
2259 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2262 case FIT_SR_DRIVE_SOFT_RESET:
2263 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
2266 case FIT_SR_DRIVE_FAULT:
2267 /* Fault state is bad...soft reset won't do it...
2268 * Hard reset, maybe, but does it work on device?
2269 * For now, just fault so the system doesn't hang.
2271 skd_drive_fault(skdev);
2272 /*start the queue so we can respond with error to requests */
2273 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2274 blk_start_queue(skdev->queue);
2275 skdev->gendisk_on = -1;
2276 wake_up_interruptible(&skdev->waitq);
2280 /* Most likely the device isn't there or isn't responding
2281 * to the BAR1 addresses. */
2282 skd_drive_disappeared(skdev);
2283 /*start the queue so we can respond with error to requests */
2284 dev_dbg(&skdev->pdev->dev,
2285 "starting queue to error-out reqs\n");
2286 blk_start_queue(skdev->queue);
2287 skdev->gendisk_on = -1;
2288 wake_up_interruptible(&skdev->waitq);
2292 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2293 skdev->drive_state);
2297 state = SKD_READL(skdev, FIT_CONTROL);
2298 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
2300 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2301 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
2303 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2304 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
2306 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2307 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
2309 state = SKD_READL(skdev, FIT_HW_VERSION);
2310 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
2312 spin_unlock_irqrestore(&skdev->lock, flags);
2315 static void skd_stop_device(struct skd_device *skdev)
2317 unsigned long flags;
2318 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2322 spin_lock_irqsave(&skdev->lock, flags);
2324 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
2325 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
2329 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
2330 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
2334 skdev->state = SKD_DRVR_STATE_SYNCING;
2335 skdev->sync_done = 0;
2337 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2339 spin_unlock_irqrestore(&skdev->lock, flags);
2341 wait_event_interruptible_timeout(skdev->waitq,
2342 (skdev->sync_done), (10 * HZ));
2344 spin_lock_irqsave(&skdev->lock, flags);
2346 switch (skdev->sync_done) {
2348 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
2351 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
2354 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
2358 skdev->state = SKD_DRVR_STATE_STOPPING;
2359 spin_unlock_irqrestore(&skdev->lock, flags);
2361 skd_kill_timer(skdev);
2363 spin_lock_irqsave(&skdev->lock, flags);
2364 skd_disable_interrupts(skdev);
2366 /* ensure all ints on device are cleared */
2367 /* soft reset the device to unload with a clean slate */
2368 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2369 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2371 spin_unlock_irqrestore(&skdev->lock, flags);
2373 /* poll every 100ms, 1 second timeout */
2374 for (i = 0; i < 10; i++) {
2376 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2377 if (dev_state == FIT_SR_DRIVE_INIT)
2379 set_current_state(TASK_INTERRUPTIBLE);
2380 schedule_timeout(msecs_to_jiffies(100));
2383 if (dev_state != FIT_SR_DRIVE_INIT)
2384 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2388 /* assume spinlock is held */
2389 static void skd_restart_device(struct skd_device *skdev)
2393 /* ack all ghost interrupts */
2394 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2396 state = SKD_READL(skdev, FIT_STATUS);
2398 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
2400 state &= FIT_SR_DRIVE_STATE_MASK;
2401 skdev->drive_state = state;
2402 skdev->last_mtd = 0;
2404 skdev->state = SKD_DRVR_STATE_RESTARTING;
2405 skdev->timer_countdown = SKD_RESTARTING_TIMO;
2407 skd_soft_reset(skdev);
2410 /* assume spinlock is held */
2411 static int skd_quiesce_dev(struct skd_device *skdev)
2415 switch (skdev->state) {
2416 case SKD_DRVR_STATE_BUSY:
2417 case SKD_DRVR_STATE_BUSY_IMMINENT:
2418 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2419 blk_stop_queue(skdev->queue);
2421 case SKD_DRVR_STATE_ONLINE:
2422 case SKD_DRVR_STATE_STOPPING:
2423 case SKD_DRVR_STATE_SYNCING:
2424 case SKD_DRVR_STATE_PAUSING:
2425 case SKD_DRVR_STATE_PAUSED:
2426 case SKD_DRVR_STATE_STARTING:
2427 case SKD_DRVR_STATE_RESTARTING:
2428 case SKD_DRVR_STATE_RESUMING:
2431 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2437 /* assume spinlock is held */
2438 static int skd_unquiesce_dev(struct skd_device *skdev)
2440 int prev_driver_state = skdev->state;
2442 skd_log_skdev(skdev, "unquiesce");
2443 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
2444 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
2447 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2449 * If there has been an state change to other than
2450 * ONLINE, we will rely on controller state change
2451 * to come back online and restart the queue.
2452 * The BUSY state means that driver is ready to
2453 * continue normal processing but waiting for controller
2454 * to become available.
2456 skdev->state = SKD_DRVR_STATE_BUSY;
2457 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
2462 * Drive has just come online, driver is either in startup,
2463 * paused performing a task, or bust waiting for hardware.
2465 switch (skdev->state) {
2466 case SKD_DRVR_STATE_PAUSED:
2467 case SKD_DRVR_STATE_BUSY:
2468 case SKD_DRVR_STATE_BUSY_IMMINENT:
2469 case SKD_DRVR_STATE_BUSY_ERASE:
2470 case SKD_DRVR_STATE_STARTING:
2471 case SKD_DRVR_STATE_RESTARTING:
2472 case SKD_DRVR_STATE_FAULT:
2473 case SKD_DRVR_STATE_IDLE:
2474 case SKD_DRVR_STATE_LOAD:
2475 skdev->state = SKD_DRVR_STATE_ONLINE;
2476 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2477 skd_skdev_state_to_str(prev_driver_state),
2478 prev_driver_state, skd_skdev_state_to_str(skdev->state),
2480 dev_dbg(&skdev->pdev->dev,
2481 "**** device ONLINE...starting block queue\n");
2482 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2483 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
2484 blk_start_queue(skdev->queue);
2485 skdev->gendisk_on = 1;
2486 wake_up_interruptible(&skdev->waitq);
2489 case SKD_DRVR_STATE_DISAPPEARED:
2491 dev_dbg(&skdev->pdev->dev,
2492 "**** driver state %d, not implemented\n",
2500 *****************************************************************************
2501 * PCIe MSI/MSI-X INTERRUPT HANDLERS
2502 *****************************************************************************
2505 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
2507 struct skd_device *skdev = skd_host_data;
2508 unsigned long flags;
2510 spin_lock_irqsave(&skdev->lock, flags);
2511 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2512 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2513 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2514 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2515 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2516 spin_unlock_irqrestore(&skdev->lock, flags);
2520 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
2522 struct skd_device *skdev = skd_host_data;
2523 unsigned long flags;
2525 spin_lock_irqsave(&skdev->lock, flags);
2526 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2527 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2528 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2529 skd_isr_fwstate(skdev);
2530 spin_unlock_irqrestore(&skdev->lock, flags);
2534 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
2536 struct skd_device *skdev = skd_host_data;
2537 unsigned long flags;
2538 int flush_enqueued = 0;
2541 spin_lock_irqsave(&skdev->lock, flags);
2542 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2543 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2544 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2545 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2548 blk_run_queue_async(skdev->queue);
2551 schedule_work(&skdev->completion_worker);
2552 else if (!flush_enqueued)
2553 blk_run_queue_async(skdev->queue);
2555 spin_unlock_irqrestore(&skdev->lock, flags);
2560 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
2562 struct skd_device *skdev = skd_host_data;
2563 unsigned long flags;
2565 spin_lock_irqsave(&skdev->lock, flags);
2566 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2567 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2568 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2569 skd_isr_msg_from_dev(skdev);
2570 spin_unlock_irqrestore(&skdev->lock, flags);
2574 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
2576 struct skd_device *skdev = skd_host_data;
2577 unsigned long flags;
2579 spin_lock_irqsave(&skdev->lock, flags);
2580 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2581 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2582 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2583 spin_unlock_irqrestore(&skdev->lock, flags);
2588 *****************************************************************************
2589 * PCIe MSI/MSI-X SETUP
2590 *****************************************************************************
2593 struct skd_msix_entry {
2597 struct skd_init_msix_entry {
2599 irq_handler_t handler;
2602 #define SKD_MAX_MSIX_COUNT 13
2603 #define SKD_MIN_MSIX_COUNT 7
2604 #define SKD_BASE_MSIX_IRQ 4
2606 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
2607 { "(DMA 0)", skd_reserved_isr },
2608 { "(DMA 1)", skd_reserved_isr },
2609 { "(DMA 2)", skd_reserved_isr },
2610 { "(DMA 3)", skd_reserved_isr },
2611 { "(State Change)", skd_statec_isr },
2612 { "(COMPL_Q)", skd_comp_q },
2613 { "(MSG)", skd_msg_isr },
2614 { "(Reserved)", skd_reserved_isr },
2615 { "(Reserved)", skd_reserved_isr },
2616 { "(Queue Full 0)", skd_qfull_isr },
2617 { "(Queue Full 1)", skd_qfull_isr },
2618 { "(Queue Full 2)", skd_qfull_isr },
2619 { "(Queue Full 3)", skd_qfull_isr },
2622 static int skd_acquire_msix(struct skd_device *skdev)
2625 struct pci_dev *pdev = skdev->pdev;
2627 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
2630 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
2634 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2635 sizeof(struct skd_msix_entry), GFP_KERNEL);
2636 if (!skdev->msix_entries) {
2638 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
2642 /* Enable MSI-X vectors for the base queue */
2643 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2644 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2646 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
2647 "%s%d-msix %s", DRV_NAME, skdev->devno,
2648 msix_entries[i].name);
2650 rc = devm_request_irq(&skdev->pdev->dev,
2651 pci_irq_vector(skdev->pdev, i),
2652 msix_entries[i].handler, 0,
2653 qentry->isr_name, skdev);
2655 dev_err(&skdev->pdev->dev,
2656 "Unable to register(%d) MSI-X handler %d: %s\n",
2657 rc, i, qentry->isr_name);
2662 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2663 SKD_MAX_MSIX_COUNT);
2668 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
2670 kfree(skdev->msix_entries);
2671 skdev->msix_entries = NULL;
2675 static int skd_acquire_irq(struct skd_device *skdev)
2677 struct pci_dev *pdev = skdev->pdev;
2678 unsigned int irq_flag = PCI_IRQ_LEGACY;
2681 if (skd_isr_type == SKD_IRQ_MSIX) {
2682 rc = skd_acquire_msix(skdev);
2686 dev_err(&skdev->pdev->dev,
2687 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
2690 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2693 if (skd_isr_type != SKD_IRQ_LEGACY)
2694 irq_flag |= PCI_IRQ_MSI;
2695 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
2697 dev_err(&skdev->pdev->dev,
2698 "failed to allocate the MSI interrupt %d\n", rc);
2702 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
2703 pdev->msi_enabled ? 0 : IRQF_SHARED,
2704 skdev->isr_name, skdev);
2706 pci_free_irq_vectors(pdev);
2707 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2715 static void skd_release_irq(struct skd_device *skdev)
2717 struct pci_dev *pdev = skdev->pdev;
2719 if (skdev->msix_entries) {
2722 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2723 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
2727 kfree(skdev->msix_entries);
2728 skdev->msix_entries = NULL;
2730 devm_free_irq(&pdev->dev, pdev->irq, skdev);
2733 pci_free_irq_vectors(pdev);
2737 *****************************************************************************
2739 *****************************************************************************
2742 static int skd_cons_skcomp(struct skd_device *skdev)
2745 struct fit_completion_entry_v1 *skcomp;
2747 dev_dbg(&skdev->pdev->dev,
2748 "comp pci_alloc, total bytes %zd entries %d\n",
2749 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2751 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
2752 &skdev->cq_dma_address);
2754 if (skcomp == NULL) {
2759 skdev->skcomp_table = skcomp;
2760 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2762 SKD_N_COMPLETION_ENTRY);
2768 static int skd_cons_skmsg(struct skd_device *skdev)
2773 dev_dbg(&skdev->pdev->dev,
2774 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
2775 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2776 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
2778 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2779 sizeof(struct skd_fitmsg_context),
2781 if (skdev->skmsg_table == NULL) {
2786 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2787 struct skd_fitmsg_context *skmsg;
2789 skmsg = &skdev->skmsg_table[i];
2791 skmsg->id = i + SKD_ID_FIT_MSG;
2793 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
2795 &skmsg->mb_dma_address);
2797 if (skmsg->msg_buf == NULL) {
2802 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
2803 (FIT_QCMD_ALIGN - 1),
2804 "not aligned: msg_buf %p mb_dma_address %#llx\n",
2805 skmsg->msg_buf, skmsg->mb_dma_address);
2806 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
2813 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2815 dma_addr_t *ret_dma_addr)
2817 struct fit_sg_descriptor *sg_list;
2820 nbytes = sizeof(*sg_list) * n_sg;
2822 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
2824 if (sg_list != NULL) {
2825 uint64_t dma_address = *ret_dma_addr;
2828 memset(sg_list, 0, nbytes);
2830 for (i = 0; i < n_sg - 1; i++) {
2832 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
2834 sg_list[i].next_desc_ptr = dma_address + ndp_off;
2836 sg_list[i].next_desc_ptr = 0LL;
2842 static int skd_cons_skreq(struct skd_device *skdev)
2847 dev_dbg(&skdev->pdev->dev,
2848 "skreq_table kcalloc, struct %lu, count %u total %lu\n",
2849 sizeof(struct skd_request_context), skdev->num_req_context,
2850 sizeof(struct skd_request_context) * skdev->num_req_context);
2852 skdev->skreq_table = kcalloc(skdev->num_req_context,
2853 sizeof(struct skd_request_context),
2855 if (skdev->skreq_table == NULL) {
2860 dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
2861 skdev->sgs_per_request, sizeof(struct scatterlist),
2862 skdev->sgs_per_request * sizeof(struct scatterlist));
2864 for (i = 0; i < skdev->num_req_context; i++) {
2865 struct skd_request_context *skreq;
2867 skreq = &skdev->skreq_table[i];
2868 skreq->state = SKD_REQ_STATE_IDLE;
2869 skreq->sg = kcalloc(skdev->sgs_per_request,
2870 sizeof(struct scatterlist), GFP_KERNEL);
2871 if (skreq->sg == NULL) {
2875 sg_init_table(skreq->sg, skdev->sgs_per_request);
2877 skreq->sksg_list = skd_cons_sg_list(skdev,
2878 skdev->sgs_per_request,
2879 &skreq->sksg_dma_address);
2881 if (skreq->sksg_list == NULL) {
2891 static int skd_cons_sksb(struct skd_device *skdev)
2894 struct skd_special_context *skspcl;
2897 skspcl = &skdev->internal_skspcl;
2899 skspcl->req.id = 0 + SKD_ID_INTERNAL;
2900 skspcl->req.state = SKD_REQ_STATE_IDLE;
2902 nbytes = SKD_N_INTERNAL_BYTES;
2904 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
2905 &skspcl->db_dma_address);
2906 if (skspcl->data_buf == NULL) {
2911 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
2912 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
2913 &skspcl->mb_dma_address);
2914 if (skspcl->msg_buf == NULL) {
2919 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2920 &skspcl->req.sksg_dma_address);
2921 if (skspcl->req.sksg_list == NULL) {
2926 if (!skd_format_internal_skspcl(skdev)) {
2935 static int skd_cons_disk(struct skd_device *skdev)
2938 struct gendisk *disk;
2939 struct request_queue *q;
2940 unsigned long flags;
2942 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
2949 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2951 disk->major = skdev->major;
2952 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2953 disk->fops = &skd_blockdev_ops;
2954 disk->private_data = skdev;
2956 q = blk_init_queue(skd_request_fn, &skdev->lock);
2961 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2962 q->nr_requests = skd_max_queue_depth / 2;
2963 blk_queue_init_tags(q, skd_max_queue_depth, NULL, BLK_TAG_ALLOC_FIFO);
2967 q->queuedata = skdev;
2969 blk_queue_write_cache(q, true, true);
2970 blk_queue_max_segments(q, skdev->sgs_per_request);
2971 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
2973 /* set optimal I/O size to 8KB */
2974 blk_queue_io_opt(q, 8192);
2976 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
2977 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
2979 spin_lock_irqsave(&skdev->lock, flags);
2980 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2981 blk_stop_queue(skdev->queue);
2982 spin_unlock_irqrestore(&skdev->lock, flags);
2988 #define SKD_N_DEV_TABLE 16u
2989 static u32 skd_next_devno;
2991 static struct skd_device *skd_construct(struct pci_dev *pdev)
2993 struct skd_device *skdev;
2994 int blk_major = skd_major;
2997 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
3000 dev_err(&pdev->dev, "memory alloc failure\n");
3004 skdev->state = SKD_DRVR_STATE_LOAD;
3006 skdev->devno = skd_next_devno++;
3007 skdev->major = blk_major;
3008 skdev->dev_max_queue_depth = 0;
3010 skdev->num_req_context = skd_max_queue_depth;
3011 skdev->num_fitmsg_context = skd_max_queue_depth;
3012 skdev->cur_max_queue_depth = 1;
3013 skdev->queue_low_water_mark = 1;
3014 skdev->proto_ver = 99;
3015 skdev->sgs_per_request = skd_sgs_per_request;
3016 skdev->dbg_level = skd_dbg_level;
3018 spin_lock_init(&skdev->lock);
3020 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
3022 dev_dbg(&skdev->pdev->dev, "skcomp\n");
3023 rc = skd_cons_skcomp(skdev);
3027 dev_dbg(&skdev->pdev->dev, "skmsg\n");
3028 rc = skd_cons_skmsg(skdev);
3032 dev_dbg(&skdev->pdev->dev, "skreq\n");
3033 rc = skd_cons_skreq(skdev);
3037 dev_dbg(&skdev->pdev->dev, "sksb\n");
3038 rc = skd_cons_sksb(skdev);
3042 dev_dbg(&skdev->pdev->dev, "disk\n");
3043 rc = skd_cons_disk(skdev);
3047 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
3051 dev_dbg(&skdev->pdev->dev, "construct failed\n");
3052 skd_destruct(skdev);
3057 *****************************************************************************
3059 *****************************************************************************
3062 static void skd_free_skcomp(struct skd_device *skdev)
3064 if (skdev->skcomp_table)
3065 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
3066 skdev->skcomp_table, skdev->cq_dma_address);
3068 skdev->skcomp_table = NULL;
3069 skdev->cq_dma_address = 0;
3072 static void skd_free_skmsg(struct skd_device *skdev)
3076 if (skdev->skmsg_table == NULL)
3079 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3080 struct skd_fitmsg_context *skmsg;
3082 skmsg = &skdev->skmsg_table[i];
3084 if (skmsg->msg_buf != NULL) {
3085 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
3087 skmsg->mb_dma_address);
3089 skmsg->msg_buf = NULL;
3090 skmsg->mb_dma_address = 0;
3093 kfree(skdev->skmsg_table);
3094 skdev->skmsg_table = NULL;
3097 static void skd_free_sg_list(struct skd_device *skdev,
3098 struct fit_sg_descriptor *sg_list,
3099 u32 n_sg, dma_addr_t dma_addr)
3101 if (sg_list != NULL) {
3104 nbytes = sizeof(*sg_list) * n_sg;
3106 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
3110 static void skd_free_skreq(struct skd_device *skdev)
3114 if (skdev->skreq_table == NULL)
3117 for (i = 0; i < skdev->num_req_context; i++) {
3118 struct skd_request_context *skreq;
3120 skreq = &skdev->skreq_table[i];
3122 skd_free_sg_list(skdev, skreq->sksg_list,
3123 skdev->sgs_per_request,
3124 skreq->sksg_dma_address);
3126 skreq->sksg_list = NULL;
3127 skreq->sksg_dma_address = 0;
3132 kfree(skdev->skreq_table);
3133 skdev->skreq_table = NULL;
3136 static void skd_free_sksb(struct skd_device *skdev)
3138 struct skd_special_context *skspcl;
3141 skspcl = &skdev->internal_skspcl;
3143 if (skspcl->data_buf != NULL) {
3144 nbytes = SKD_N_INTERNAL_BYTES;
3146 pci_free_consistent(skdev->pdev, nbytes,
3147 skspcl->data_buf, skspcl->db_dma_address);
3150 skspcl->data_buf = NULL;
3151 skspcl->db_dma_address = 0;
3153 if (skspcl->msg_buf != NULL) {
3154 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
3155 pci_free_consistent(skdev->pdev, nbytes,
3156 skspcl->msg_buf, skspcl->mb_dma_address);
3159 skspcl->msg_buf = NULL;
3160 skspcl->mb_dma_address = 0;
3162 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
3163 skspcl->req.sksg_dma_address);
3165 skspcl->req.sksg_list = NULL;
3166 skspcl->req.sksg_dma_address = 0;
3169 static void skd_free_disk(struct skd_device *skdev)
3171 struct gendisk *disk = skdev->disk;
3173 if (disk && (disk->flags & GENHD_FL_UP))
3177 blk_cleanup_queue(skdev->queue);
3178 skdev->queue = NULL;
3186 static void skd_destruct(struct skd_device *skdev)
3191 dev_dbg(&skdev->pdev->dev, "disk\n");
3192 skd_free_disk(skdev);
3194 dev_dbg(&skdev->pdev->dev, "sksb\n");
3195 skd_free_sksb(skdev);
3197 dev_dbg(&skdev->pdev->dev, "skreq\n");
3198 skd_free_skreq(skdev);
3200 dev_dbg(&skdev->pdev->dev, "skmsg\n");
3201 skd_free_skmsg(skdev);
3203 dev_dbg(&skdev->pdev->dev, "skcomp\n");
3204 skd_free_skcomp(skdev);
3206 dev_dbg(&skdev->pdev->dev, "skdev\n");
3211 *****************************************************************************
3212 * BLOCK DEVICE (BDEV) GLUE
3213 *****************************************************************************
3216 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3218 struct skd_device *skdev;
3221 skdev = bdev->bd_disk->private_data;
3223 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3224 bdev->bd_disk->disk_name, current->comm);
3226 if (skdev->read_cap_is_valid) {
3227 capacity = get_capacity(skdev->disk);
3230 geo->cylinders = (capacity) / (255 * 64);
3237 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
3239 dev_dbg(&skdev->pdev->dev, "add_disk\n");
3240 device_add_disk(parent, skdev->disk);
3244 static const struct block_device_operations skd_blockdev_ops = {
3245 .owner = THIS_MODULE,
3246 .getgeo = skd_bdev_getgeo,
3250 *****************************************************************************
3252 *****************************************************************************
3255 static const struct pci_device_id skd_pci_tbl[] = {
3256 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
3257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
3258 { 0 } /* terminate list */
3261 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
3263 static char *skd_pci_info(struct skd_device *skdev, char *str)
3267 strcpy(str, "PCIe (");
3268 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3273 uint16_t pcie_lstat, lspeed, lwidth;
3276 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3277 lspeed = pcie_lstat & (0xF);
3278 lwidth = (pcie_lstat & 0x3F0) >> 4;
3281 strcat(str, "2.5GT/s ");
3282 else if (lspeed == 2)
3283 strcat(str, "5.0GT/s ");
3285 strcat(str, "<unknown> ");
3286 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
3292 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3297 struct skd_device *skdev;
3299 dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
3300 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
3301 dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
3304 rc = pci_enable_device(pdev);
3307 rc = pci_request_regions(pdev, DRV_NAME);
3310 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3312 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3313 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3317 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3319 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3320 goto err_out_regions;
3325 rc = register_blkdev(0, DRV_NAME);
3327 goto err_out_regions;
3332 skdev = skd_construct(pdev);
3333 if (skdev == NULL) {
3335 goto err_out_regions;
3338 skd_pci_info(skdev, pci_str);
3339 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
3341 pci_set_master(pdev);
3342 rc = pci_enable_pcie_error_reporting(pdev);
3345 "bad enable of PCIe error reporting rc=%d\n", rc);
3346 skdev->pcie_error_reporting_is_enabled = 0;
3348 skdev->pcie_error_reporting_is_enabled = 1;
3350 pci_set_drvdata(pdev, skdev);
3352 for (i = 0; i < SKD_MAX_BARS; i++) {
3353 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3354 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3355 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3356 skdev->mem_size[i]);
3357 if (!skdev->mem_map[i]) {
3359 "Unable to map adapter memory!\n");
3361 goto err_out_iounmap;
3363 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3364 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3365 skdev->mem_size[i]);
3368 rc = skd_acquire_irq(skdev);
3370 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3371 goto err_out_iounmap;
3374 rc = skd_start_timer(skdev);
3378 init_waitqueue_head(&skdev->waitq);
3380 skd_start_device(skdev);
3382 rc = wait_event_interruptible_timeout(skdev->waitq,
3383 (skdev->gendisk_on),
3384 (SKD_START_WAIT_SECONDS * HZ));
3385 if (skdev->gendisk_on > 0) {
3386 /* device came on-line after reset */
3387 skd_bdev_attach(&pdev->dev, skdev);
3390 /* we timed out, something is wrong with the device,
3391 don't add the disk structure */
3392 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
3394 /* in case of no error; we timeout with ENXIO */
3403 skd_stop_device(skdev);
3404 skd_release_irq(skdev);
3407 for (i = 0; i < SKD_MAX_BARS; i++)
3408 if (skdev->mem_map[i])
3409 iounmap(skdev->mem_map[i]);
3411 if (skdev->pcie_error_reporting_is_enabled)
3412 pci_disable_pcie_error_reporting(pdev);
3414 skd_destruct(skdev);
3417 pci_release_regions(pdev);
3420 pci_disable_device(pdev);
3421 pci_set_drvdata(pdev, NULL);
3425 static void skd_pci_remove(struct pci_dev *pdev)
3428 struct skd_device *skdev;
3430 skdev = pci_get_drvdata(pdev);
3432 dev_err(&pdev->dev, "no device data for PCI\n");
3435 skd_stop_device(skdev);
3436 skd_release_irq(skdev);
3438 for (i = 0; i < SKD_MAX_BARS; i++)
3439 if (skdev->mem_map[i])
3440 iounmap(skdev->mem_map[i]);
3442 if (skdev->pcie_error_reporting_is_enabled)
3443 pci_disable_pcie_error_reporting(pdev);
3445 skd_destruct(skdev);
3447 pci_release_regions(pdev);
3448 pci_disable_device(pdev);
3449 pci_set_drvdata(pdev, NULL);
3454 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3457 struct skd_device *skdev;
3459 skdev = pci_get_drvdata(pdev);
3461 dev_err(&pdev->dev, "no device data for PCI\n");
3465 skd_stop_device(skdev);
3467 skd_release_irq(skdev);
3469 for (i = 0; i < SKD_MAX_BARS; i++)
3470 if (skdev->mem_map[i])
3471 iounmap(skdev->mem_map[i]);
3473 if (skdev->pcie_error_reporting_is_enabled)
3474 pci_disable_pcie_error_reporting(pdev);
3476 pci_release_regions(pdev);
3477 pci_save_state(pdev);
3478 pci_disable_device(pdev);
3479 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3483 static int skd_pci_resume(struct pci_dev *pdev)
3487 struct skd_device *skdev;
3489 skdev = pci_get_drvdata(pdev);
3491 dev_err(&pdev->dev, "no device data for PCI\n");
3495 pci_set_power_state(pdev, PCI_D0);
3496 pci_enable_wake(pdev, PCI_D0, 0);
3497 pci_restore_state(pdev);
3499 rc = pci_enable_device(pdev);
3502 rc = pci_request_regions(pdev, DRV_NAME);
3505 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3507 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3509 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3513 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3516 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3517 goto err_out_regions;
3521 pci_set_master(pdev);
3522 rc = pci_enable_pcie_error_reporting(pdev);
3525 "bad enable of PCIe error reporting rc=%d\n", rc);
3526 skdev->pcie_error_reporting_is_enabled = 0;
3528 skdev->pcie_error_reporting_is_enabled = 1;
3530 for (i = 0; i < SKD_MAX_BARS; i++) {
3532 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3533 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3534 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3535 skdev->mem_size[i]);
3536 if (!skdev->mem_map[i]) {
3537 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
3539 goto err_out_iounmap;
3541 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3542 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3543 skdev->mem_size[i]);
3545 rc = skd_acquire_irq(skdev);
3547 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3548 goto err_out_iounmap;
3551 rc = skd_start_timer(skdev);
3555 init_waitqueue_head(&skdev->waitq);
3557 skd_start_device(skdev);
3562 skd_stop_device(skdev);
3563 skd_release_irq(skdev);
3566 for (i = 0; i < SKD_MAX_BARS; i++)
3567 if (skdev->mem_map[i])
3568 iounmap(skdev->mem_map[i]);
3570 if (skdev->pcie_error_reporting_is_enabled)
3571 pci_disable_pcie_error_reporting(pdev);
3574 pci_release_regions(pdev);
3577 pci_disable_device(pdev);
3581 static void skd_pci_shutdown(struct pci_dev *pdev)
3583 struct skd_device *skdev;
3585 dev_err(&pdev->dev, "%s called\n", __func__);
3587 skdev = pci_get_drvdata(pdev);
3589 dev_err(&pdev->dev, "no device data for PCI\n");
3593 dev_err(&pdev->dev, "calling stop\n");
3594 skd_stop_device(skdev);
3597 static struct pci_driver skd_driver = {
3599 .id_table = skd_pci_tbl,
3600 .probe = skd_pci_probe,
3601 .remove = skd_pci_remove,
3602 .suspend = skd_pci_suspend,
3603 .resume = skd_pci_resume,
3604 .shutdown = skd_pci_shutdown,
3608 *****************************************************************************
3610 *****************************************************************************
3613 const char *skd_drive_state_to_str(int state)
3616 case FIT_SR_DRIVE_OFFLINE:
3618 case FIT_SR_DRIVE_INIT:
3620 case FIT_SR_DRIVE_ONLINE:
3622 case FIT_SR_DRIVE_BUSY:
3624 case FIT_SR_DRIVE_FAULT:
3626 case FIT_SR_DRIVE_DEGRADED:
3628 case FIT_SR_PCIE_LINK_DOWN:
3630 case FIT_SR_DRIVE_SOFT_RESET:
3631 return "SOFT_RESET";
3632 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3634 case FIT_SR_DRIVE_INIT_FAULT:
3635 return "INIT_FAULT";
3636 case FIT_SR_DRIVE_BUSY_SANITIZE:
3637 return "BUSY_SANITIZE";
3638 case FIT_SR_DRIVE_BUSY_ERASE:
3639 return "BUSY_ERASE";
3640 case FIT_SR_DRIVE_FW_BOOTING:
3641 return "FW_BOOTING";
3647 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
3650 case SKD_DRVR_STATE_LOAD:
3652 case SKD_DRVR_STATE_IDLE:
3654 case SKD_DRVR_STATE_BUSY:
3656 case SKD_DRVR_STATE_STARTING:
3658 case SKD_DRVR_STATE_ONLINE:
3660 case SKD_DRVR_STATE_PAUSING:
3662 case SKD_DRVR_STATE_PAUSED:
3664 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
3665 return "DRAINING_TIMEOUT";
3666 case SKD_DRVR_STATE_RESTARTING:
3667 return "RESTARTING";
3668 case SKD_DRVR_STATE_RESUMING:
3670 case SKD_DRVR_STATE_STOPPING:
3672 case SKD_DRVR_STATE_SYNCING:
3674 case SKD_DRVR_STATE_FAULT:
3676 case SKD_DRVR_STATE_DISAPPEARED:
3677 return "DISAPPEARED";
3678 case SKD_DRVR_STATE_BUSY_ERASE:
3679 return "BUSY_ERASE";
3680 case SKD_DRVR_STATE_BUSY_SANITIZE:
3681 return "BUSY_SANITIZE";
3682 case SKD_DRVR_STATE_BUSY_IMMINENT:
3683 return "BUSY_IMMINENT";
3684 case SKD_DRVR_STATE_WAIT_BOOT:
3692 static const char *skd_skreq_state_to_str(enum skd_req_state state)
3695 case SKD_REQ_STATE_IDLE:
3697 case SKD_REQ_STATE_SETUP:
3699 case SKD_REQ_STATE_BUSY:
3701 case SKD_REQ_STATE_COMPLETED:
3703 case SKD_REQ_STATE_TIMEOUT:
3710 static void skd_log_skdev(struct skd_device *skdev, const char *event)
3712 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3713 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
3714 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3715 skd_skdev_state_to_str(skdev->state), skdev->state);
3716 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
3717 skdev->in_flight, skdev->cur_max_queue_depth,
3718 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3719 dev_dbg(&skdev->pdev->dev, " timestamp=0x%x cycle=%d cycle_ix=%d\n",
3720 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
3723 static void skd_log_skreq(struct skd_device *skdev,
3724 struct skd_request_context *skreq, const char *event)
3726 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3727 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3728 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
3730 dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
3731 skreq->timeout_stamp, skreq->data_dir, skreq->n_sg);
3733 if (skreq->req != NULL) {
3734 struct request *req = skreq->req;
3735 u32 lba = (u32)blk_rq_pos(req);
3736 u32 count = blk_rq_sectors(req);
3738 dev_dbg(&skdev->pdev->dev,
3739 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
3740 lba, lba, count, count, (int)rq_data_dir(req));
3742 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
3746 *****************************************************************************
3748 *****************************************************************************
3751 static int __init skd_init(void)
3753 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
3754 BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
3755 BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
3756 BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
3757 BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
3758 BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
3759 BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
3760 BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
3762 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
3764 switch (skd_isr_type) {
3765 case SKD_IRQ_LEGACY:
3770 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
3771 skd_isr_type, SKD_IRQ_DEFAULT);
3772 skd_isr_type = SKD_IRQ_DEFAULT;
3775 if (skd_max_queue_depth < 1 ||
3776 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3777 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
3778 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3779 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3782 if (skd_max_req_per_msg < 1 ||
3783 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
3784 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
3785 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3786 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3789 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3790 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
3791 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3792 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3795 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3796 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
3801 if (skd_isr_comp_limit < 0) {
3802 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
3803 skd_isr_comp_limit, 0);
3804 skd_isr_comp_limit = 0;
3807 return pci_register_driver(&skd_driver);
3810 static void __exit skd_exit(void)
3812 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
3814 pci_unregister_driver(&skd_driver);
3817 unregister_blkdev(skd_major, DRV_NAME);
3820 module_init(skd_init);
3821 module_exit(skd_exit);