2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
5 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/compiler.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/time.h>
26 #include <linux/hdreg.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/completion.h>
29 #include <linux/scatterlist.h>
30 #include <linux/version.h>
31 #include <linux/err.h>
32 #include <linux/aer.h>
33 #include <linux/wait.h>
34 #include <linux/stringify.h>
35 #include <linux/slab_def.h>
36 #include <scsi/scsi.h>
39 #include <linux/uaccess.h>
40 #include <asm/unaligned.h>
42 #include "skd_s1120.h"
44 static int skd_dbg_level;
45 static int skd_isr_comp_limit = 4;
47 #define SKD_ASSERT(expr) \
49 if (unlikely(!(expr))) { \
50 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
51 # expr, __FILE__, __func__, __LINE__); \
55 #define DRV_NAME "skd"
56 #define PFX DRV_NAME ": "
58 MODULE_LICENSE("GPL");
60 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
62 #define PCI_VENDOR_ID_STEC 0x1B39
63 #define PCI_DEVICE_ID_S1120 0x0001
65 #define SKD_FUA_NV (1 << 1)
66 #define SKD_MINORS_PER_DEVICE 16
68 #define SKD_MAX_QUEUE_DEPTH 200u
70 #define SKD_PAUSE_TIMEOUT (5 * 1000)
72 #define SKD_N_FITMSG_BYTES (512u)
73 #define SKD_MAX_REQ_PER_MSG 14
75 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
77 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
78 * 128KB limit. That allows 4096*4K = 16M xfer size
80 #define SKD_N_SG_PER_REQ_DEFAULT 256u
82 #define SKD_N_COMPLETION_ENTRY 256u
83 #define SKD_N_READ_CAP_BYTES (8u)
85 #define SKD_N_INTERNAL_BYTES (512u)
87 #define SKD_SKCOMP_SIZE \
88 ((sizeof(struct fit_completion_entry_v1) + \
89 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
91 /* 5 bits of uniqifier, 0xF800 */
92 #define SKD_ID_INCR (0x400)
93 #define SKD_ID_TABLE_MASK (3u << 8u)
94 #define SKD_ID_RW_REQUEST (0u << 8u)
95 #define SKD_ID_INTERNAL (1u << 8u)
96 #define SKD_ID_FIT_MSG (3u << 8u)
97 #define SKD_ID_SLOT_MASK 0x00FFu
98 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
100 #define SKD_N_MAX_SECTORS 2048u
102 #define SKD_MAX_RETRIES 2u
104 #define SKD_TIMER_SECONDS(seconds) (seconds)
105 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
107 #define INQ_STD_NBYTES 36
109 enum skd_drvr_state {
113 SKD_DRVR_STATE_STARTING,
114 SKD_DRVR_STATE_ONLINE,
115 SKD_DRVR_STATE_PAUSING,
116 SKD_DRVR_STATE_PAUSED,
117 SKD_DRVR_STATE_RESTARTING,
118 SKD_DRVR_STATE_RESUMING,
119 SKD_DRVR_STATE_STOPPING,
120 SKD_DRVR_STATE_FAULT,
121 SKD_DRVR_STATE_DISAPPEARED,
122 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
123 SKD_DRVR_STATE_BUSY_ERASE,
124 SKD_DRVR_STATE_BUSY_SANITIZE,
125 SKD_DRVR_STATE_BUSY_IMMINENT,
126 SKD_DRVR_STATE_WAIT_BOOT,
127 SKD_DRVR_STATE_SYNCING,
130 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
131 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
132 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
133 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
134 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
135 #define SKD_START_WAIT_SECONDS 90u
141 SKD_REQ_STATE_COMPLETED,
142 SKD_REQ_STATE_TIMEOUT,
145 enum skd_check_status_action {
146 SKD_CHECK_STATUS_REPORT_GOOD,
147 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
148 SKD_CHECK_STATUS_REQUEUE_REQUEST,
149 SKD_CHECK_STATUS_REPORT_ERROR,
150 SKD_CHECK_STATUS_BUSY_IMMINENT,
154 struct fit_msg_hdr fmh;
155 struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
158 struct skd_fitmsg_context {
163 struct skd_msg_buf *msg_buf;
164 dma_addr_t mb_dma_address;
167 struct skd_request_context {
168 enum skd_req_state state;
175 enum dma_data_direction data_dir;
176 struct scatterlist *sg;
180 struct fit_sg_descriptor *sksg_list;
181 dma_addr_t sksg_dma_address;
183 struct fit_completion_entry_v1 completion;
185 struct fit_comp_error_info err_info;
189 struct skd_special_context {
190 struct skd_request_context req;
193 dma_addr_t db_dma_address;
195 struct skd_msg_buf *msg_buf;
196 dma_addr_t mb_dma_address;
199 typedef enum skd_irq_type {
205 #define SKD_MAX_BARS 2
208 void __iomem *mem_map[SKD_MAX_BARS];
209 resource_size_t mem_phys[SKD_MAX_BARS];
210 u32 mem_size[SKD_MAX_BARS];
212 struct skd_msix_entry *msix_entries;
214 struct pci_dev *pdev;
215 int pcie_error_reporting_is_enabled;
218 struct gendisk *disk;
219 struct blk_mq_tag_set tag_set;
220 struct request_queue *queue;
221 struct skd_fitmsg_context *skmsg;
222 struct device *class_dev;
230 enum skd_drvr_state state;
233 u32 cur_max_queue_depth;
234 u32 queue_low_water_mark;
235 u32 dev_max_queue_depth;
237 u32 num_fitmsg_context;
240 struct skd_fitmsg_context *skmsg_table;
242 struct skd_special_context internal_skspcl;
243 u32 read_cap_blocksize;
244 u32 read_cap_last_lba;
245 int read_cap_is_valid;
246 int inquiry_is_valid;
247 u8 inq_serial_num[13]; /*12 chars plus null term */
251 struct kmem_cache *msgbuf_cache;
252 struct kmem_cache *sglist_cache;
253 struct kmem_cache *databuf_cache;
254 struct fit_completion_entry_v1 *skcomp_table;
255 struct fit_comp_error_info *skerr_table;
256 dma_addr_t cq_dma_address;
258 wait_queue_head_t waitq;
260 struct timer_list timer;
270 u32 connect_time_stamp;
272 #define SKD_MAX_CONNECT_RETRIES 16
277 struct work_struct start_queue;
278 struct work_struct completion_worker;
281 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
282 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
283 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
285 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
287 u32 val = readl(skdev->mem_map[1] + offset);
289 if (unlikely(skdev->dbg_level >= 2))
290 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
294 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
297 writel(val, skdev->mem_map[1] + offset);
298 if (unlikely(skdev->dbg_level >= 2))
299 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
302 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
305 writeq(val, skdev->mem_map[1] + offset);
306 if (unlikely(skdev->dbg_level >= 2))
307 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
312 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
313 static int skd_isr_type = SKD_IRQ_DEFAULT;
315 module_param(skd_isr_type, int, 0444);
316 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
317 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
319 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
320 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
322 module_param(skd_max_req_per_msg, int, 0444);
323 MODULE_PARM_DESC(skd_max_req_per_msg,
324 "Maximum SCSI requests packed in a single message."
325 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
327 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
328 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
329 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
331 module_param(skd_max_queue_depth, int, 0444);
332 MODULE_PARM_DESC(skd_max_queue_depth,
333 "Maximum SCSI requests issued to s1120."
334 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
336 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
337 module_param(skd_sgs_per_request, int, 0444);
338 MODULE_PARM_DESC(skd_sgs_per_request,
339 "Maximum SG elements per block request."
340 " (1-4096, default==256)");
342 static int skd_max_pass_thru = 1;
343 module_param(skd_max_pass_thru, int, 0444);
344 MODULE_PARM_DESC(skd_max_pass_thru,
345 "Maximum SCSI pass-thru at a time. IGNORED");
347 module_param(skd_dbg_level, int, 0444);
348 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
350 module_param(skd_isr_comp_limit, int, 0444);
351 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
353 /* Major device number dynamically assigned. */
354 static u32 skd_major;
356 static void skd_destruct(struct skd_device *skdev);
357 static const struct block_device_operations skd_blockdev_ops;
358 static void skd_send_fitmsg(struct skd_device *skdev,
359 struct skd_fitmsg_context *skmsg);
360 static void skd_send_special_fitmsg(struct skd_device *skdev,
361 struct skd_special_context *skspcl);
362 static void skd_end_request(struct skd_device *skdev, struct request *req,
363 blk_status_t status);
364 static bool skd_preop_sg_list(struct skd_device *skdev,
365 struct skd_request_context *skreq);
366 static void skd_postop_sg_list(struct skd_device *skdev,
367 struct skd_request_context *skreq);
369 static void skd_restart_device(struct skd_device *skdev);
370 static int skd_quiesce_dev(struct skd_device *skdev);
371 static int skd_unquiesce_dev(struct skd_device *skdev);
372 static void skd_disable_interrupts(struct skd_device *skdev);
373 static void skd_isr_fwstate(struct skd_device *skdev);
374 static void skd_recover_requests(struct skd_device *skdev);
375 static void skd_soft_reset(struct skd_device *skdev);
377 const char *skd_drive_state_to_str(int state);
378 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
379 static void skd_log_skdev(struct skd_device *skdev, const char *event);
380 static void skd_log_skreq(struct skd_device *skdev,
381 struct skd_request_context *skreq, const char *event);
384 *****************************************************************************
385 * READ/WRITE REQUESTS
386 *****************************************************************************
388 static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
395 static int skd_in_flight(struct skd_device *skdev)
399 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
405 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
406 int data_dir, unsigned lba,
409 if (data_dir == READ)
410 scsi_req->cdb[0] = READ_10;
412 scsi_req->cdb[0] = WRITE_10;
414 scsi_req->cdb[1] = 0;
415 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
416 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
417 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
418 scsi_req->cdb[5] = (lba & 0xff);
419 scsi_req->cdb[6] = 0;
420 scsi_req->cdb[7] = (count & 0xff00) >> 8;
421 scsi_req->cdb[8] = count & 0xff;
422 scsi_req->cdb[9] = 0;
426 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
427 struct skd_request_context *skreq)
429 skreq->flush_cmd = 1;
431 scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
432 scsi_req->cdb[1] = 0;
433 scsi_req->cdb[2] = 0;
434 scsi_req->cdb[3] = 0;
435 scsi_req->cdb[4] = 0;
436 scsi_req->cdb[5] = 0;
437 scsi_req->cdb[6] = 0;
438 scsi_req->cdb[7] = 0;
439 scsi_req->cdb[8] = 0;
440 scsi_req->cdb[9] = 0;
444 * Return true if and only if all pending requests should be failed.
446 static bool skd_fail_all(struct request_queue *q)
448 struct skd_device *skdev = q->queuedata;
450 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
452 skd_log_skdev(skdev, "req_not_online");
453 switch (skdev->state) {
454 case SKD_DRVR_STATE_PAUSING:
455 case SKD_DRVR_STATE_PAUSED:
456 case SKD_DRVR_STATE_STARTING:
457 case SKD_DRVR_STATE_RESTARTING:
458 case SKD_DRVR_STATE_WAIT_BOOT:
459 /* In case of starting, we haven't started the queue,
460 * so we can't get here... but requests are
461 * possibly hanging out waiting for us because we
462 * reported the dev/skd0 already. They'll wait
463 * forever if connect doesn't complete.
464 * What to do??? delay dev/skd0 ??
466 case SKD_DRVR_STATE_BUSY:
467 case SKD_DRVR_STATE_BUSY_IMMINENT:
468 case SKD_DRVR_STATE_BUSY_ERASE:
471 case SKD_DRVR_STATE_BUSY_SANITIZE:
472 case SKD_DRVR_STATE_STOPPING:
473 case SKD_DRVR_STATE_SYNCING:
474 case SKD_DRVR_STATE_FAULT:
475 case SKD_DRVR_STATE_DISAPPEARED:
481 static void skd_process_request(struct request *req, bool last)
483 struct request_queue *const q = req->q;
484 struct skd_device *skdev = q->queuedata;
485 struct skd_fitmsg_context *skmsg;
486 struct fit_msg_hdr *fmh;
487 const u32 tag = blk_mq_unique_tag(req);
488 struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
489 struct skd_scsi_request *scsi_req;
490 unsigned long flags = 0;
491 const u32 lba = blk_rq_pos(req);
492 const u32 count = blk_rq_sectors(req);
493 const int data_dir = rq_data_dir(req);
495 WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
496 tag, skd_max_queue_depth, q->nr_requests);
498 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
500 dev_dbg(&skdev->pdev->dev,
501 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
502 lba, count, count, data_dir);
504 skreq->id = tag + SKD_ID_RW_REQUEST;
505 skreq->flush_cmd = 0;
507 skreq->sg_byte_count = 0;
509 skreq->fitmsg_id = 0;
511 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
513 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
514 dev_dbg(&skdev->pdev->dev, "error Out\n");
515 skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
520 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
522 sizeof(struct fit_sg_descriptor),
525 /* Either a FIT msg is in progress or we have to start one. */
526 if (skd_max_req_per_msg == 1) {
529 spin_lock_irqsave(&skdev->lock, flags);
530 skmsg = skdev->skmsg;
533 skmsg = &skdev->skmsg_table[tag];
534 skdev->skmsg = skmsg;
536 /* Initialize the FIT msg header */
537 fmh = &skmsg->msg_buf->fmh;
538 memset(fmh, 0, sizeof(*fmh));
539 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
540 skmsg->length = sizeof(*fmh);
542 fmh = &skmsg->msg_buf->fmh;
545 skreq->fitmsg_id = skmsg->id;
547 scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
548 memset(scsi_req, 0, sizeof(*scsi_req));
550 scsi_req->hdr.tag = skreq->id;
551 scsi_req->hdr.sg_list_dma_address =
552 cpu_to_be64(skreq->sksg_dma_address);
554 if (req_op(req) == REQ_OP_FLUSH) {
555 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
556 SKD_ASSERT(skreq->flush_cmd == 1);
558 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
561 if (req->cmd_flags & REQ_FUA)
562 scsi_req->cdb[1] |= SKD_FUA_NV;
564 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
566 /* Complete resource allocations. */
567 skreq->state = SKD_REQ_STATE_BUSY;
569 skmsg->length += sizeof(struct skd_scsi_request);
570 fmh->num_protocol_cmds_coalesced++;
572 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
573 skd_in_flight(skdev));
576 * If the FIT msg buffer is full send it.
578 if (skd_max_req_per_msg == 1) {
579 skd_send_fitmsg(skdev, skmsg);
582 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
583 skd_send_fitmsg(skdev, skmsg);
586 spin_unlock_irqrestore(&skdev->lock, flags);
590 static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
591 const struct blk_mq_queue_data *mqd)
593 struct request *req = mqd->rq;
594 struct request_queue *q = req->q;
595 struct skd_device *skdev = q->queuedata;
597 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
598 blk_mq_start_request(req);
599 skd_process_request(req, mqd->last);
603 return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
609 static enum blk_eh_timer_return skd_timed_out(struct request *req)
611 struct skd_device *skdev = req->q->queuedata;
613 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
614 blk_mq_unique_tag(req));
616 return BLK_EH_HANDLED;
619 static void skd_end_request(struct skd_device *skdev, struct request *req,
622 if (unlikely(error)) {
623 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
624 u32 lba = (u32)blk_rq_pos(req);
625 u32 count = blk_rq_sectors(req);
627 dev_err(&skdev->pdev->dev,
628 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
631 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", req->tag,
634 blk_mq_end_request(req, error);
637 /* Only called in case of a request timeout */
638 static void skd_softirq_done(struct request *req)
640 struct skd_device *skdev = req->q->queuedata;
641 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
644 spin_lock_irqsave(&skdev->lock, flags);
645 skd_end_request(skdev, blk_mq_rq_from_pdu(skreq), BLK_STS_TIMEOUT);
646 spin_unlock_irqrestore(&skdev->lock, flags);
649 static bool skd_preop_sg_list(struct skd_device *skdev,
650 struct skd_request_context *skreq)
652 struct request *req = blk_mq_rq_from_pdu(skreq);
653 struct scatterlist *sgl = &skreq->sg[0], *sg;
657 skreq->sg_byte_count = 0;
659 WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
660 skreq->data_dir != DMA_FROM_DEVICE);
662 n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
667 * Map scatterlist to PCI bus addresses.
668 * Note PCI might change the number of entries.
670 n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
674 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
678 for_each_sg(sgl, sg, n_sg, i) {
679 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
680 u32 cnt = sg_dma_len(sg);
681 uint64_t dma_addr = sg_dma_address(sg);
683 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
684 sgd->byte_count = cnt;
685 skreq->sg_byte_count += cnt;
686 sgd->host_side_addr = dma_addr;
687 sgd->dev_side_addr = 0;
690 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
691 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
693 if (unlikely(skdev->dbg_level > 1)) {
694 dev_dbg(&skdev->pdev->dev,
695 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
696 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
697 for (i = 0; i < n_sg; i++) {
698 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
700 dev_dbg(&skdev->pdev->dev,
701 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
702 i, sgd->byte_count, sgd->control,
703 sgd->host_side_addr, sgd->next_desc_ptr);
710 static void skd_postop_sg_list(struct skd_device *skdev,
711 struct skd_request_context *skreq)
714 * restore the next ptr for next IO request so we
715 * don't have to set it every time.
717 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
718 skreq->sksg_dma_address +
719 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
720 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
724 *****************************************************************************
726 *****************************************************************************
729 static void skd_timer_tick_not_online(struct skd_device *skdev);
731 static void skd_start_queue(struct work_struct *work)
733 struct skd_device *skdev = container_of(work, typeof(*skdev),
737 * Although it is safe to call blk_start_queue() from interrupt
738 * context, blk_mq_start_hw_queues() must not be called from
741 blk_mq_start_hw_queues(skdev->queue);
744 static void skd_timer_tick(ulong arg)
746 struct skd_device *skdev = (struct skd_device *)arg;
747 unsigned long reqflags;
750 if (skdev->state == SKD_DRVR_STATE_FAULT)
751 /* The driver has declared fault, and we want it to
752 * stay that way until driver is reloaded.
756 spin_lock_irqsave(&skdev->lock, reqflags);
758 state = SKD_READL(skdev, FIT_STATUS);
759 state &= FIT_SR_DRIVE_STATE_MASK;
760 if (state != skdev->drive_state)
761 skd_isr_fwstate(skdev);
763 if (skdev->state != SKD_DRVR_STATE_ONLINE)
764 skd_timer_tick_not_online(skdev);
766 mod_timer(&skdev->timer, (jiffies + HZ));
768 spin_unlock_irqrestore(&skdev->lock, reqflags);
771 static void skd_timer_tick_not_online(struct skd_device *skdev)
773 switch (skdev->state) {
774 case SKD_DRVR_STATE_IDLE:
775 case SKD_DRVR_STATE_LOAD:
777 case SKD_DRVR_STATE_BUSY_SANITIZE:
778 dev_dbg(&skdev->pdev->dev,
779 "drive busy sanitize[%x], driver[%x]\n",
780 skdev->drive_state, skdev->state);
781 /* If we've been in sanitize for 3 seconds, we figure we're not
782 * going to get anymore completions, so recover requests now
784 if (skdev->timer_countdown > 0) {
785 skdev->timer_countdown--;
788 skd_recover_requests(skdev);
791 case SKD_DRVR_STATE_BUSY:
792 case SKD_DRVR_STATE_BUSY_IMMINENT:
793 case SKD_DRVR_STATE_BUSY_ERASE:
794 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
795 skdev->state, skdev->timer_countdown);
796 if (skdev->timer_countdown > 0) {
797 skdev->timer_countdown--;
800 dev_dbg(&skdev->pdev->dev,
801 "busy[%x], timedout=%d, restarting device.",
802 skdev->state, skdev->timer_countdown);
803 skd_restart_device(skdev);
806 case SKD_DRVR_STATE_WAIT_BOOT:
807 case SKD_DRVR_STATE_STARTING:
808 if (skdev->timer_countdown > 0) {
809 skdev->timer_countdown--;
812 /* For now, we fault the drive. Could attempt resets to
813 * revcover at some point. */
814 skdev->state = SKD_DRVR_STATE_FAULT;
816 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
819 /*start the queue so we can respond with error to requests */
820 /* wakeup anyone waiting for startup complete */
821 schedule_work(&skdev->start_queue);
822 skdev->gendisk_on = -1;
823 wake_up_interruptible(&skdev->waitq);
826 case SKD_DRVR_STATE_ONLINE:
827 /* shouldn't get here. */
830 case SKD_DRVR_STATE_PAUSING:
831 case SKD_DRVR_STATE_PAUSED:
834 case SKD_DRVR_STATE_RESTARTING:
835 if (skdev->timer_countdown > 0) {
836 skdev->timer_countdown--;
839 /* For now, we fault the drive. Could attempt resets to
840 * revcover at some point. */
841 skdev->state = SKD_DRVR_STATE_FAULT;
842 dev_err(&skdev->pdev->dev,
843 "DriveFault Reconnect Timeout (%x)\n",
847 * Recovering does two things:
848 * 1. completes IO with error
849 * 2. reclaims dma resources
850 * When is it safe to recover requests?
851 * - if the drive state is faulted
852 * - if the state is still soft reset after out timeout
853 * - if the drive registers are dead (state = FF)
854 * If it is "unsafe", we still need to recover, so we will
855 * disable pci bus mastering and disable our interrupts.
858 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
859 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
860 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
861 /* It never came out of soft reset. Try to
862 * recover the requests and then let them
863 * fail. This is to mitigate hung processes. */
864 skd_recover_requests(skdev);
866 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
868 pci_disable_device(skdev->pdev);
869 skd_disable_interrupts(skdev);
870 skd_recover_requests(skdev);
873 /*start the queue so we can respond with error to requests */
874 /* wakeup anyone waiting for startup complete */
875 schedule_work(&skdev->start_queue);
876 skdev->gendisk_on = -1;
877 wake_up_interruptible(&skdev->waitq);
880 case SKD_DRVR_STATE_RESUMING:
881 case SKD_DRVR_STATE_STOPPING:
882 case SKD_DRVR_STATE_SYNCING:
883 case SKD_DRVR_STATE_FAULT:
884 case SKD_DRVR_STATE_DISAPPEARED:
890 static int skd_start_timer(struct skd_device *skdev)
894 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
896 rc = mod_timer(&skdev->timer, (jiffies + HZ));
898 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
902 static void skd_kill_timer(struct skd_device *skdev)
904 del_timer_sync(&skdev->timer);
908 *****************************************************************************
909 * INTERNAL REQUESTS -- generated by driver itself
910 *****************************************************************************
913 static int skd_format_internal_skspcl(struct skd_device *skdev)
915 struct skd_special_context *skspcl = &skdev->internal_skspcl;
916 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
917 struct fit_msg_hdr *fmh;
918 uint64_t dma_address;
919 struct skd_scsi_request *scsi;
921 fmh = &skspcl->msg_buf->fmh;
922 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
923 fmh->num_protocol_cmds_coalesced = 1;
925 scsi = &skspcl->msg_buf->scsi[0];
926 memset(scsi, 0, sizeof(*scsi));
927 dma_address = skspcl->req.sksg_dma_address;
928 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
929 skspcl->req.n_sg = 1;
930 sgd->control = FIT_SGD_CONTROL_LAST;
932 sgd->host_side_addr = skspcl->db_dma_address;
933 sgd->dev_side_addr = 0;
934 sgd->next_desc_ptr = 0LL;
939 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
941 static void skd_send_internal_skspcl(struct skd_device *skdev,
942 struct skd_special_context *skspcl,
945 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
946 struct skd_scsi_request *scsi;
947 unsigned char *buf = skspcl->data_buf;
950 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
952 * A refresh is already in progress.
953 * Just wait for it to finish.
957 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
958 skspcl->req.state = SKD_REQ_STATE_BUSY;
959 skspcl->req.id += SKD_ID_INCR;
961 scsi = &skspcl->msg_buf->scsi[0];
962 scsi->hdr.tag = skspcl->req.id;
964 memset(scsi->cdb, 0, sizeof(scsi->cdb));
967 case TEST_UNIT_READY:
968 scsi->cdb[0] = TEST_UNIT_READY;
970 scsi->hdr.sg_list_len_bytes = 0;
974 scsi->cdb[0] = READ_CAPACITY;
975 sgd->byte_count = SKD_N_READ_CAP_BYTES;
976 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
980 scsi->cdb[0] = INQUIRY;
981 scsi->cdb[1] = 0x01; /* evpd */
982 scsi->cdb[2] = 0x80; /* serial number page */
984 sgd->byte_count = 16;
985 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
988 case SYNCHRONIZE_CACHE:
989 scsi->cdb[0] = SYNCHRONIZE_CACHE;
991 scsi->hdr.sg_list_len_bytes = 0;
995 scsi->cdb[0] = WRITE_BUFFER;
997 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
998 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
999 sgd->byte_count = WR_BUF_SIZE;
1000 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1001 /* fill incrementing byte pattern */
1002 for (i = 0; i < sgd->byte_count; i++)
1007 scsi->cdb[0] = READ_BUFFER;
1008 scsi->cdb[1] = 0x02;
1009 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1010 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1011 sgd->byte_count = WR_BUF_SIZE;
1012 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1013 memset(skspcl->data_buf, 0, sgd->byte_count);
1017 SKD_ASSERT("Don't know what to send");
1021 skd_send_special_fitmsg(skdev, skspcl);
1024 static void skd_refresh_device_data(struct skd_device *skdev)
1026 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1028 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1031 static int skd_chk_read_buf(struct skd_device *skdev,
1032 struct skd_special_context *skspcl)
1034 unsigned char *buf = skspcl->data_buf;
1037 /* check for incrementing byte pattern */
1038 for (i = 0; i < WR_BUF_SIZE; i++)
1039 if (buf[i] != (i & 0xFF))
1045 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1046 u8 code, u8 qual, u8 fruc)
1048 /* If the check condition is of special interest, log a message */
1049 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1050 && (code == 0x04) && (qual == 0x06)) {
1051 dev_err(&skdev->pdev->dev,
1052 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1053 key, code, qual, fruc);
1057 static void skd_complete_internal(struct skd_device *skdev,
1058 struct fit_completion_entry_v1 *skcomp,
1059 struct fit_comp_error_info *skerr,
1060 struct skd_special_context *skspcl)
1062 u8 *buf = skspcl->data_buf;
1065 struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
1067 lockdep_assert_held(&skdev->lock);
1069 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1071 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1073 dma_sync_single_for_cpu(&skdev->pdev->dev,
1074 skspcl->db_dma_address,
1075 skspcl->req.sksg_list[0].byte_count,
1078 skspcl->req.completion = *skcomp;
1079 skspcl->req.state = SKD_REQ_STATE_IDLE;
1080 skspcl->req.id += SKD_ID_INCR;
1082 status = skspcl->req.completion.status;
1084 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1085 skerr->qual, skerr->fruc);
1087 switch (scsi->cdb[0]) {
1088 case TEST_UNIT_READY:
1089 if (status == SAM_STAT_GOOD)
1090 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1091 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1092 (skerr->key == MEDIUM_ERROR))
1093 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1095 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1096 dev_dbg(&skdev->pdev->dev,
1097 "TUR failed, don't send anymore state 0x%x\n",
1101 dev_dbg(&skdev->pdev->dev,
1102 "**** TUR failed, retry skerr\n");
1103 skd_send_internal_skspcl(skdev, skspcl,
1109 if (status == SAM_STAT_GOOD)
1110 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1112 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1113 dev_dbg(&skdev->pdev->dev,
1114 "write buffer failed, don't send anymore state 0x%x\n",
1118 dev_dbg(&skdev->pdev->dev,
1119 "**** write buffer failed, retry skerr\n");
1120 skd_send_internal_skspcl(skdev, skspcl,
1126 if (status == SAM_STAT_GOOD) {
1127 if (skd_chk_read_buf(skdev, skspcl) == 0)
1128 skd_send_internal_skspcl(skdev, skspcl,
1131 dev_err(&skdev->pdev->dev,
1132 "*** W/R Buffer mismatch %d ***\n",
1133 skdev->connect_retries);
1134 if (skdev->connect_retries <
1135 SKD_MAX_CONNECT_RETRIES) {
1136 skdev->connect_retries++;
1137 skd_soft_reset(skdev);
1139 dev_err(&skdev->pdev->dev,
1140 "W/R Buffer Connect Error\n");
1146 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1147 dev_dbg(&skdev->pdev->dev,
1148 "read buffer failed, don't send anymore state 0x%x\n",
1152 dev_dbg(&skdev->pdev->dev,
1153 "**** read buffer failed, retry skerr\n");
1154 skd_send_internal_skspcl(skdev, skspcl,
1160 skdev->read_cap_is_valid = 0;
1161 if (status == SAM_STAT_GOOD) {
1162 skdev->read_cap_last_lba =
1163 (buf[0] << 24) | (buf[1] << 16) |
1164 (buf[2] << 8) | buf[3];
1165 skdev->read_cap_blocksize =
1166 (buf[4] << 24) | (buf[5] << 16) |
1167 (buf[6] << 8) | buf[7];
1169 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1170 skdev->read_cap_last_lba,
1171 skdev->read_cap_blocksize);
1173 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1175 skdev->read_cap_is_valid = 1;
1177 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1178 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1179 (skerr->key == MEDIUM_ERROR)) {
1180 skdev->read_cap_last_lba = ~0;
1181 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1182 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1183 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1185 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
1186 skd_send_internal_skspcl(skdev, skspcl,
1192 skdev->inquiry_is_valid = 0;
1193 if (status == SAM_STAT_GOOD) {
1194 skdev->inquiry_is_valid = 1;
1196 for (i = 0; i < 12; i++)
1197 skdev->inq_serial_num[i] = buf[i + 4];
1198 skdev->inq_serial_num[12] = 0;
1201 if (skd_unquiesce_dev(skdev) < 0)
1202 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
1203 /* connection is complete */
1204 skdev->connect_retries = 0;
1207 case SYNCHRONIZE_CACHE:
1208 if (status == SAM_STAT_GOOD)
1209 skdev->sync_done = 1;
1211 skdev->sync_done = -1;
1212 wake_up_interruptible(&skdev->waitq);
1216 SKD_ASSERT("we didn't send this");
1221 *****************************************************************************
1223 *****************************************************************************
1226 static void skd_send_fitmsg(struct skd_device *skdev,
1227 struct skd_fitmsg_context *skmsg)
1231 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
1232 skmsg->mb_dma_address, skd_in_flight(skdev));
1233 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
1235 qcmd = skmsg->mb_dma_address;
1236 qcmd |= FIT_QCMD_QID_NORMAL;
1238 if (unlikely(skdev->dbg_level > 1)) {
1239 u8 *bp = (u8 *)skmsg->msg_buf;
1241 for (i = 0; i < skmsg->length; i += 8) {
1242 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1249 if (skmsg->length > 256)
1250 qcmd |= FIT_QCMD_MSGSIZE_512;
1251 else if (skmsg->length > 128)
1252 qcmd |= FIT_QCMD_MSGSIZE_256;
1253 else if (skmsg->length > 64)
1254 qcmd |= FIT_QCMD_MSGSIZE_128;
1257 * This makes no sense because the FIT msg header is
1258 * 64 bytes. If the msg is only 64 bytes long it has
1261 qcmd |= FIT_QCMD_MSGSIZE_64;
1263 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
1264 skmsg->length, DMA_TO_DEVICE);
1266 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1269 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1272 static void skd_send_special_fitmsg(struct skd_device *skdev,
1273 struct skd_special_context *skspcl)
1277 WARN_ON_ONCE(skspcl->req.n_sg != 1);
1279 if (unlikely(skdev->dbg_level > 1)) {
1280 u8 *bp = (u8 *)skspcl->msg_buf;
1283 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1284 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1290 dev_dbg(&skdev->pdev->dev,
1291 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
1292 skspcl, skspcl->req.id, skspcl->req.sksg_list,
1293 skspcl->req.sksg_dma_address);
1294 for (i = 0; i < skspcl->req.n_sg; i++) {
1295 struct fit_sg_descriptor *sgd =
1296 &skspcl->req.sksg_list[i];
1298 dev_dbg(&skdev->pdev->dev,
1299 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1300 i, sgd->byte_count, sgd->control,
1301 sgd->host_side_addr, sgd->next_desc_ptr);
1306 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1307 * and one 64-byte SSDI command.
1309 qcmd = skspcl->mb_dma_address;
1310 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1312 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
1313 SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
1314 dma_sync_single_for_device(&skdev->pdev->dev,
1315 skspcl->req.sksg_dma_address,
1316 1 * sizeof(struct fit_sg_descriptor),
1318 dma_sync_single_for_device(&skdev->pdev->dev,
1319 skspcl->db_dma_address,
1320 skspcl->req.sksg_list[0].byte_count,
1323 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1326 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1330 *****************************************************************************
1332 *****************************************************************************
1335 static void skd_complete_other(struct skd_device *skdev,
1336 struct fit_completion_entry_v1 *skcomp,
1337 struct fit_comp_error_info *skerr);
1346 enum skd_check_status_action action;
1349 static struct sns_info skd_chkstat_table[] = {
1351 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
1352 SKD_CHECK_STATUS_REPORT_GOOD },
1355 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1356 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1357 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1358 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1359 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
1360 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1362 /* Retry (with limits) */
1363 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
1364 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1365 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
1366 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1367 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
1368 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1369 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
1370 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1372 /* Busy (or about to be) */
1373 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
1374 SKD_CHECK_STATUS_BUSY_IMMINENT },
1378 * Look up status and sense data to decide how to handle the error
1380 * mask says which fields must match e.g., mask=0x18 means check
1381 * type and stat, ignore key, asc, ascq.
1384 static enum skd_check_status_action
1385 skd_check_status(struct skd_device *skdev,
1386 u8 cmp_status, struct fit_comp_error_info *skerr)
1390 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1391 skerr->key, skerr->code, skerr->qual, skerr->fruc);
1393 dev_dbg(&skdev->pdev->dev,
1394 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1395 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
1398 /* Does the info match an entry in the good category? */
1399 for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
1400 struct sns_info *sns = &skd_chkstat_table[i];
1402 if (sns->mask & 0x10)
1403 if (skerr->type != sns->type)
1406 if (sns->mask & 0x08)
1407 if (cmp_status != sns->stat)
1410 if (sns->mask & 0x04)
1411 if (skerr->key != sns->key)
1414 if (sns->mask & 0x02)
1415 if (skerr->code != sns->asc)
1418 if (sns->mask & 0x01)
1419 if (skerr->qual != sns->ascq)
1422 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1423 dev_err(&skdev->pdev->dev,
1424 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1425 skerr->key, skerr->code, skerr->qual);
1430 /* No other match, so nonzero status means error,
1431 * zero status means good
1434 dev_dbg(&skdev->pdev->dev, "status check: error\n");
1435 return SKD_CHECK_STATUS_REPORT_ERROR;
1438 dev_dbg(&skdev->pdev->dev, "status check good default\n");
1439 return SKD_CHECK_STATUS_REPORT_GOOD;
1442 static void skd_resolve_req_exception(struct skd_device *skdev,
1443 struct skd_request_context *skreq,
1444 struct request *req)
1446 u8 cmp_status = skreq->completion.status;
1448 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1449 case SKD_CHECK_STATUS_REPORT_GOOD:
1450 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
1451 skd_end_request(skdev, req, BLK_STS_OK);
1454 case SKD_CHECK_STATUS_BUSY_IMMINENT:
1455 skd_log_skreq(skdev, skreq, "retry(busy)");
1456 blk_requeue_request(skdev->queue, req);
1457 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
1458 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1459 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1460 skd_quiesce_dev(skdev);
1463 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
1464 if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
1465 skd_log_skreq(skdev, skreq, "retry");
1466 blk_requeue_request(skdev->queue, req);
1471 case SKD_CHECK_STATUS_REPORT_ERROR:
1473 skd_end_request(skdev, req, BLK_STS_IOERR);
1478 static void skd_release_skreq(struct skd_device *skdev,
1479 struct skd_request_context *skreq)
1482 * Reclaim the skd_request_context
1484 skreq->state = SKD_REQ_STATE_IDLE;
1485 skreq->id += SKD_ID_INCR;
1488 static int skd_isr_completion_posted(struct skd_device *skdev,
1489 int limit, int *enqueued)
1491 struct fit_completion_entry_v1 *skcmp;
1492 struct fit_comp_error_info *skerr;
1497 struct skd_request_context *skreq;
1505 lockdep_assert_held(&skdev->lock);
1508 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1510 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1511 cmp_cycle = skcmp->cycle;
1512 cmp_cntxt = skcmp->tag;
1513 cmp_status = skcmp->status;
1514 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1516 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1518 dev_dbg(&skdev->pdev->dev,
1519 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1520 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
1521 cmp_cntxt, cmp_status, skd_in_flight(skdev),
1522 cmp_bytes, skdev->proto_ver);
1524 if (cmp_cycle != skdev->skcomp_cycle) {
1525 dev_dbg(&skdev->pdev->dev, "end of completions\n");
1529 * Update the completion queue head index and possibly
1530 * the completion cycle count. 8-bit wrap-around.
1533 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1534 skdev->skcomp_ix = 0;
1535 skdev->skcomp_cycle++;
1539 * The command context is a unique 32-bit ID. The low order
1540 * bits help locate the request. The request is usually a
1541 * r/w request (see skd_start() above) or a special request.
1544 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1546 /* Is this other than a r/w request? */
1547 if (tag >= skdev->num_req_context) {
1549 * This is not a completion for a r/w request.
1551 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
1553 skd_complete_other(skdev, skcmp, skerr);
1557 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
1558 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
1561 skreq = blk_mq_rq_to_pdu(rq);
1564 * Make sure the request ID for the slot matches.
1566 if (skreq->id != req_id) {
1567 dev_err(&skdev->pdev->dev,
1568 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1569 req_id, skreq->id, cmp_cntxt);
1574 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
1576 skreq->completion = *skcmp;
1577 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
1578 skreq->err_info = *skerr;
1579 skd_log_check_status(skdev, cmp_status, skerr->key,
1580 skerr->code, skerr->qual,
1583 /* Release DMA resources for the request. */
1584 if (skreq->n_sg > 0)
1585 skd_postop_sg_list(skdev, skreq);
1587 skd_release_skreq(skdev, skreq);
1590 * Capture the outcome and post it back to the native request.
1592 if (likely(cmp_status == SAM_STAT_GOOD))
1593 skd_end_request(skdev, rq, BLK_STS_OK);
1595 skd_resolve_req_exception(skdev, skreq, rq);
1597 /* skd_isr_comp_limit equal zero means no limit */
1599 if (++processed >= limit) {
1606 if (skdev->state == SKD_DRVR_STATE_PAUSING &&
1607 skd_in_flight(skdev) == 0) {
1608 skdev->state = SKD_DRVR_STATE_PAUSED;
1609 wake_up_interruptible(&skdev->waitq);
1615 static void skd_complete_other(struct skd_device *skdev,
1616 struct fit_completion_entry_v1 *skcomp,
1617 struct fit_comp_error_info *skerr)
1622 struct skd_special_context *skspcl;
1624 lockdep_assert_held(&skdev->lock);
1626 req_id = skcomp->tag;
1627 req_table = req_id & SKD_ID_TABLE_MASK;
1628 req_slot = req_id & SKD_ID_SLOT_MASK;
1630 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1634 * Based on the request id, determine how to dispatch this completion.
1635 * This swich/case is finding the good cases and forwarding the
1636 * completion entry. Errors are reported below the switch.
1638 switch (req_table) {
1639 case SKD_ID_RW_REQUEST:
1641 * The caller, skd_isr_completion_posted() above,
1642 * handles r/w requests. The only way we get here
1643 * is if the req_slot is out of bounds.
1647 case SKD_ID_INTERNAL:
1648 if (req_slot == 0) {
1649 skspcl = &skdev->internal_skspcl;
1650 if (skspcl->req.id == req_id &&
1651 skspcl->req.state == SKD_REQ_STATE_BUSY) {
1652 skd_complete_internal(skdev,
1653 skcomp, skerr, skspcl);
1659 case SKD_ID_FIT_MSG:
1661 * These id's should never appear in a completion record.
1667 * These id's should never appear anywhere;
1673 * If we get here it is a bad or stale id.
1677 static void skd_reset_skcomp(struct skd_device *skdev)
1679 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
1681 skdev->skcomp_ix = 0;
1682 skdev->skcomp_cycle = 1;
1686 *****************************************************************************
1688 *****************************************************************************
1690 static void skd_completion_worker(struct work_struct *work)
1692 struct skd_device *skdev =
1693 container_of(work, struct skd_device, completion_worker);
1694 unsigned long flags;
1695 int flush_enqueued = 0;
1697 spin_lock_irqsave(&skdev->lock, flags);
1700 * pass in limit=0, which means no limit..
1701 * process everything in compq
1703 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
1704 schedule_work(&skdev->start_queue);
1706 spin_unlock_irqrestore(&skdev->lock, flags);
1709 static void skd_isr_msg_from_dev(struct skd_device *skdev);
1712 skd_isr(int irq, void *ptr)
1714 struct skd_device *skdev = ptr;
1719 int flush_enqueued = 0;
1721 spin_lock(&skdev->lock);
1724 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1726 ack = FIT_INT_DEF_MASK;
1729 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1732 /* As long as there is an int pending on device, keep
1733 * running loop. When none, get out, but if we've never
1734 * done any processing, call completion handler?
1737 /* No interrupts on device, but run the completion
1741 if (likely (skdev->state
1742 == SKD_DRVR_STATE_ONLINE))
1749 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1751 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1752 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1753 if (intstat & FIT_ISH_COMPLETION_POSTED) {
1755 * If we have already deferred completion
1756 * processing, don't bother running it again
1760 skd_isr_completion_posted(skdev,
1761 skd_isr_comp_limit, &flush_enqueued);
1764 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
1765 skd_isr_fwstate(skdev);
1766 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1768 SKD_DRVR_STATE_DISAPPEARED) {
1769 spin_unlock(&skdev->lock);
1774 if (intstat & FIT_ISH_MSG_FROM_DEV)
1775 skd_isr_msg_from_dev(skdev);
1779 if (unlikely(flush_enqueued))
1780 schedule_work(&skdev->start_queue);
1783 schedule_work(&skdev->completion_worker);
1784 else if (!flush_enqueued)
1785 schedule_work(&skdev->start_queue);
1787 spin_unlock(&skdev->lock);
1792 static void skd_drive_fault(struct skd_device *skdev)
1794 skdev->state = SKD_DRVR_STATE_FAULT;
1795 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
1798 static void skd_drive_disappeared(struct skd_device *skdev)
1800 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
1801 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
1804 static void skd_isr_fwstate(struct skd_device *skdev)
1809 int prev_driver_state = skdev->state;
1811 sense = SKD_READL(skdev, FIT_STATUS);
1812 state = sense & FIT_SR_DRIVE_STATE_MASK;
1814 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1815 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1816 skd_drive_state_to_str(state), state);
1818 skdev->drive_state = state;
1820 switch (skdev->drive_state) {
1821 case FIT_SR_DRIVE_INIT:
1822 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1823 skd_disable_interrupts(skdev);
1826 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
1827 skd_recover_requests(skdev);
1828 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1829 skdev->timer_countdown = SKD_STARTING_TIMO;
1830 skdev->state = SKD_DRVR_STATE_STARTING;
1831 skd_soft_reset(skdev);
1834 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
1835 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1836 skdev->last_mtd = mtd;
1839 case FIT_SR_DRIVE_ONLINE:
1840 skdev->cur_max_queue_depth = skd_max_queue_depth;
1841 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1842 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1844 skdev->queue_low_water_mark =
1845 skdev->cur_max_queue_depth * 2 / 3 + 1;
1846 if (skdev->queue_low_water_mark < 1)
1847 skdev->queue_low_water_mark = 1;
1848 dev_info(&skdev->pdev->dev,
1849 "Queue depth limit=%d dev=%d lowat=%d\n",
1850 skdev->cur_max_queue_depth,
1851 skdev->dev_max_queue_depth,
1852 skdev->queue_low_water_mark);
1854 skd_refresh_device_data(skdev);
1857 case FIT_SR_DRIVE_BUSY:
1858 skdev->state = SKD_DRVR_STATE_BUSY;
1859 skdev->timer_countdown = SKD_BUSY_TIMO;
1860 skd_quiesce_dev(skdev);
1862 case FIT_SR_DRIVE_BUSY_SANITIZE:
1863 /* set timer for 3 seconds, we'll abort any unfinished
1864 * commands after that expires
1866 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1867 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1868 schedule_work(&skdev->start_queue);
1870 case FIT_SR_DRIVE_BUSY_ERASE:
1871 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1872 skdev->timer_countdown = SKD_BUSY_TIMO;
1874 case FIT_SR_DRIVE_OFFLINE:
1875 skdev->state = SKD_DRVR_STATE_IDLE;
1877 case FIT_SR_DRIVE_SOFT_RESET:
1878 switch (skdev->state) {
1879 case SKD_DRVR_STATE_STARTING:
1880 case SKD_DRVR_STATE_RESTARTING:
1881 /* Expected by a caller of skd_soft_reset() */
1884 skdev->state = SKD_DRVR_STATE_RESTARTING;
1888 case FIT_SR_DRIVE_FW_BOOTING:
1889 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
1890 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1891 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1894 case FIT_SR_DRIVE_DEGRADED:
1895 case FIT_SR_PCIE_LINK_DOWN:
1896 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
1899 case FIT_SR_DRIVE_FAULT:
1900 skd_drive_fault(skdev);
1901 skd_recover_requests(skdev);
1902 schedule_work(&skdev->start_queue);
1905 /* PCIe bus returned all Fs? */
1907 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
1909 skd_drive_disappeared(skdev);
1910 skd_recover_requests(skdev);
1911 schedule_work(&skdev->start_queue);
1915 * Uknown FW State. Wait for a state we recognize.
1919 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
1920 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
1921 skd_skdev_state_to_str(skdev->state), skdev->state);
1924 static void skd_recover_request(struct request *req, void *data, bool reserved)
1926 struct skd_device *const skdev = data;
1927 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
1929 if (skreq->state != SKD_REQ_STATE_BUSY)
1932 skd_log_skreq(skdev, skreq, "recover");
1934 /* Release DMA resources for the request. */
1935 if (skreq->n_sg > 0)
1936 skd_postop_sg_list(skdev, skreq);
1938 skreq->state = SKD_REQ_STATE_IDLE;
1940 skd_end_request(skdev, req, BLK_STS_IOERR);
1943 static void skd_recover_requests(struct skd_device *skdev)
1945 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
1948 static void skd_isr_msg_from_dev(struct skd_device *skdev)
1954 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
1956 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
1959 /* ignore any mtd that is an ack for something we didn't send */
1960 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
1963 switch (FIT_MXD_TYPE(mfd)) {
1964 case FIT_MTD_FITFW_INIT:
1965 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
1967 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
1968 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
1969 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
1970 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
1971 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
1972 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
1973 skd_soft_reset(skdev);
1976 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
1977 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1978 skdev->last_mtd = mtd;
1981 case FIT_MTD_GET_CMDQ_DEPTH:
1982 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
1983 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
1984 SKD_N_COMPLETION_ENTRY);
1985 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1986 skdev->last_mtd = mtd;
1989 case FIT_MTD_SET_COMPQ_DEPTH:
1990 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
1991 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
1992 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1993 skdev->last_mtd = mtd;
1996 case FIT_MTD_SET_COMPQ_ADDR:
1997 skd_reset_skcomp(skdev);
1998 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
1999 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2000 skdev->last_mtd = mtd;
2003 case FIT_MTD_CMD_LOG_HOST_ID:
2004 skdev->connect_time_stamp = get_seconds();
2005 data = skdev->connect_time_stamp & 0xFFFF;
2006 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
2007 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2008 skdev->last_mtd = mtd;
2011 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
2012 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
2013 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
2014 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
2015 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2016 skdev->last_mtd = mtd;
2019 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
2020 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
2021 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
2022 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2023 skdev->last_mtd = mtd;
2025 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
2026 skdev->connect_time_stamp, skdev->drive_jiffies);
2029 case FIT_MTD_ARM_QUEUE:
2030 skdev->last_mtd = 0;
2032 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2041 static void skd_disable_interrupts(struct skd_device *skdev)
2045 sense = SKD_READL(skdev, FIT_CONTROL);
2046 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2047 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2048 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
2050 /* Note that the 1s is written. A 1-bit means
2051 * disable, a 0 means enable.
2053 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2056 static void skd_enable_interrupts(struct skd_device *skdev)
2060 /* unmask interrupts first */
2061 val = FIT_ISH_FW_STATE_CHANGE +
2062 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
2064 /* Note that the compliment of mask is written. A 1-bit means
2065 * disable, a 0 means enable. */
2066 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2067 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
2069 val = SKD_READL(skdev, FIT_CONTROL);
2070 val |= FIT_CR_ENABLE_INTERRUPTS;
2071 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2072 SKD_WRITEL(skdev, val, FIT_CONTROL);
2076 *****************************************************************************
2077 * START, STOP, RESTART, QUIESCE, UNQUIESCE
2078 *****************************************************************************
2081 static void skd_soft_reset(struct skd_device *skdev)
2085 val = SKD_READL(skdev, FIT_CONTROL);
2086 val |= (FIT_CR_SOFT_RESET);
2087 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2088 SKD_WRITEL(skdev, val, FIT_CONTROL);
2091 static void skd_start_device(struct skd_device *skdev)
2093 unsigned long flags;
2097 spin_lock_irqsave(&skdev->lock, flags);
2099 /* ack all ghost interrupts */
2100 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2102 sense = SKD_READL(skdev, FIT_STATUS);
2104 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
2106 state = sense & FIT_SR_DRIVE_STATE_MASK;
2107 skdev->drive_state = state;
2108 skdev->last_mtd = 0;
2110 skdev->state = SKD_DRVR_STATE_STARTING;
2111 skdev->timer_countdown = SKD_STARTING_TIMO;
2113 skd_enable_interrupts(skdev);
2115 switch (skdev->drive_state) {
2116 case FIT_SR_DRIVE_OFFLINE:
2117 dev_err(&skdev->pdev->dev, "Drive offline...\n");
2120 case FIT_SR_DRIVE_FW_BOOTING:
2121 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
2122 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2123 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2126 case FIT_SR_DRIVE_BUSY_SANITIZE:
2127 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
2128 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2129 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2132 case FIT_SR_DRIVE_BUSY_ERASE:
2133 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
2134 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2135 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2138 case FIT_SR_DRIVE_INIT:
2139 case FIT_SR_DRIVE_ONLINE:
2140 skd_soft_reset(skdev);
2143 case FIT_SR_DRIVE_BUSY:
2144 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
2145 skdev->state = SKD_DRVR_STATE_BUSY;
2146 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2149 case FIT_SR_DRIVE_SOFT_RESET:
2150 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
2153 case FIT_SR_DRIVE_FAULT:
2154 /* Fault state is bad...soft reset won't do it...
2155 * Hard reset, maybe, but does it work on device?
2156 * For now, just fault so the system doesn't hang.
2158 skd_drive_fault(skdev);
2159 /*start the queue so we can respond with error to requests */
2160 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2161 schedule_work(&skdev->start_queue);
2162 skdev->gendisk_on = -1;
2163 wake_up_interruptible(&skdev->waitq);
2167 /* Most likely the device isn't there or isn't responding
2168 * to the BAR1 addresses. */
2169 skd_drive_disappeared(skdev);
2170 /*start the queue so we can respond with error to requests */
2171 dev_dbg(&skdev->pdev->dev,
2172 "starting queue to error-out reqs\n");
2173 schedule_work(&skdev->start_queue);
2174 skdev->gendisk_on = -1;
2175 wake_up_interruptible(&skdev->waitq);
2179 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2180 skdev->drive_state);
2184 state = SKD_READL(skdev, FIT_CONTROL);
2185 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
2187 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2188 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
2190 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2191 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
2193 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2194 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
2196 state = SKD_READL(skdev, FIT_HW_VERSION);
2197 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
2199 spin_unlock_irqrestore(&skdev->lock, flags);
2202 static void skd_stop_device(struct skd_device *skdev)
2204 unsigned long flags;
2205 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2209 spin_lock_irqsave(&skdev->lock, flags);
2211 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
2212 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
2216 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
2217 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
2221 skdev->state = SKD_DRVR_STATE_SYNCING;
2222 skdev->sync_done = 0;
2224 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2226 spin_unlock_irqrestore(&skdev->lock, flags);
2228 wait_event_interruptible_timeout(skdev->waitq,
2229 (skdev->sync_done), (10 * HZ));
2231 spin_lock_irqsave(&skdev->lock, flags);
2233 switch (skdev->sync_done) {
2235 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
2238 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
2241 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
2245 skdev->state = SKD_DRVR_STATE_STOPPING;
2246 spin_unlock_irqrestore(&skdev->lock, flags);
2248 skd_kill_timer(skdev);
2250 spin_lock_irqsave(&skdev->lock, flags);
2251 skd_disable_interrupts(skdev);
2253 /* ensure all ints on device are cleared */
2254 /* soft reset the device to unload with a clean slate */
2255 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2256 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2258 spin_unlock_irqrestore(&skdev->lock, flags);
2260 /* poll every 100ms, 1 second timeout */
2261 for (i = 0; i < 10; i++) {
2263 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2264 if (dev_state == FIT_SR_DRIVE_INIT)
2266 set_current_state(TASK_INTERRUPTIBLE);
2267 schedule_timeout(msecs_to_jiffies(100));
2270 if (dev_state != FIT_SR_DRIVE_INIT)
2271 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2275 /* assume spinlock is held */
2276 static void skd_restart_device(struct skd_device *skdev)
2280 /* ack all ghost interrupts */
2281 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2283 state = SKD_READL(skdev, FIT_STATUS);
2285 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
2287 state &= FIT_SR_DRIVE_STATE_MASK;
2288 skdev->drive_state = state;
2289 skdev->last_mtd = 0;
2291 skdev->state = SKD_DRVR_STATE_RESTARTING;
2292 skdev->timer_countdown = SKD_RESTARTING_TIMO;
2294 skd_soft_reset(skdev);
2297 /* assume spinlock is held */
2298 static int skd_quiesce_dev(struct skd_device *skdev)
2302 switch (skdev->state) {
2303 case SKD_DRVR_STATE_BUSY:
2304 case SKD_DRVR_STATE_BUSY_IMMINENT:
2305 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2306 blk_mq_stop_hw_queues(skdev->queue);
2308 case SKD_DRVR_STATE_ONLINE:
2309 case SKD_DRVR_STATE_STOPPING:
2310 case SKD_DRVR_STATE_SYNCING:
2311 case SKD_DRVR_STATE_PAUSING:
2312 case SKD_DRVR_STATE_PAUSED:
2313 case SKD_DRVR_STATE_STARTING:
2314 case SKD_DRVR_STATE_RESTARTING:
2315 case SKD_DRVR_STATE_RESUMING:
2318 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2324 /* assume spinlock is held */
2325 static int skd_unquiesce_dev(struct skd_device *skdev)
2327 int prev_driver_state = skdev->state;
2329 skd_log_skdev(skdev, "unquiesce");
2330 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
2331 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
2334 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2336 * If there has been an state change to other than
2337 * ONLINE, we will rely on controller state change
2338 * to come back online and restart the queue.
2339 * The BUSY state means that driver is ready to
2340 * continue normal processing but waiting for controller
2341 * to become available.
2343 skdev->state = SKD_DRVR_STATE_BUSY;
2344 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
2349 * Drive has just come online, driver is either in startup,
2350 * paused performing a task, or bust waiting for hardware.
2352 switch (skdev->state) {
2353 case SKD_DRVR_STATE_PAUSED:
2354 case SKD_DRVR_STATE_BUSY:
2355 case SKD_DRVR_STATE_BUSY_IMMINENT:
2356 case SKD_DRVR_STATE_BUSY_ERASE:
2357 case SKD_DRVR_STATE_STARTING:
2358 case SKD_DRVR_STATE_RESTARTING:
2359 case SKD_DRVR_STATE_FAULT:
2360 case SKD_DRVR_STATE_IDLE:
2361 case SKD_DRVR_STATE_LOAD:
2362 skdev->state = SKD_DRVR_STATE_ONLINE;
2363 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2364 skd_skdev_state_to_str(prev_driver_state),
2365 prev_driver_state, skd_skdev_state_to_str(skdev->state),
2367 dev_dbg(&skdev->pdev->dev,
2368 "**** device ONLINE...starting block queue\n");
2369 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2370 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
2371 schedule_work(&skdev->start_queue);
2372 skdev->gendisk_on = 1;
2373 wake_up_interruptible(&skdev->waitq);
2376 case SKD_DRVR_STATE_DISAPPEARED:
2378 dev_dbg(&skdev->pdev->dev,
2379 "**** driver state %d, not implemented\n",
2387 *****************************************************************************
2388 * PCIe MSI/MSI-X INTERRUPT HANDLERS
2389 *****************************************************************************
2392 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
2394 struct skd_device *skdev = skd_host_data;
2395 unsigned long flags;
2397 spin_lock_irqsave(&skdev->lock, flags);
2398 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2399 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2400 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2401 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2402 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2403 spin_unlock_irqrestore(&skdev->lock, flags);
2407 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
2409 struct skd_device *skdev = skd_host_data;
2410 unsigned long flags;
2412 spin_lock_irqsave(&skdev->lock, flags);
2413 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2414 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2415 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2416 skd_isr_fwstate(skdev);
2417 spin_unlock_irqrestore(&skdev->lock, flags);
2421 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
2423 struct skd_device *skdev = skd_host_data;
2424 unsigned long flags;
2425 int flush_enqueued = 0;
2428 spin_lock_irqsave(&skdev->lock, flags);
2429 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2430 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2431 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2432 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2435 schedule_work(&skdev->start_queue);
2438 schedule_work(&skdev->completion_worker);
2439 else if (!flush_enqueued)
2440 schedule_work(&skdev->start_queue);
2442 spin_unlock_irqrestore(&skdev->lock, flags);
2447 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
2449 struct skd_device *skdev = skd_host_data;
2450 unsigned long flags;
2452 spin_lock_irqsave(&skdev->lock, flags);
2453 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2454 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2455 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2456 skd_isr_msg_from_dev(skdev);
2457 spin_unlock_irqrestore(&skdev->lock, flags);
2461 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
2463 struct skd_device *skdev = skd_host_data;
2464 unsigned long flags;
2466 spin_lock_irqsave(&skdev->lock, flags);
2467 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2468 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2469 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2470 spin_unlock_irqrestore(&skdev->lock, flags);
2475 *****************************************************************************
2476 * PCIe MSI/MSI-X SETUP
2477 *****************************************************************************
2480 struct skd_msix_entry {
2484 struct skd_init_msix_entry {
2486 irq_handler_t handler;
2489 #define SKD_MAX_MSIX_COUNT 13
2490 #define SKD_MIN_MSIX_COUNT 7
2491 #define SKD_BASE_MSIX_IRQ 4
2493 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
2494 { "(DMA 0)", skd_reserved_isr },
2495 { "(DMA 1)", skd_reserved_isr },
2496 { "(DMA 2)", skd_reserved_isr },
2497 { "(DMA 3)", skd_reserved_isr },
2498 { "(State Change)", skd_statec_isr },
2499 { "(COMPL_Q)", skd_comp_q },
2500 { "(MSG)", skd_msg_isr },
2501 { "(Reserved)", skd_reserved_isr },
2502 { "(Reserved)", skd_reserved_isr },
2503 { "(Queue Full 0)", skd_qfull_isr },
2504 { "(Queue Full 1)", skd_qfull_isr },
2505 { "(Queue Full 2)", skd_qfull_isr },
2506 { "(Queue Full 3)", skd_qfull_isr },
2509 static int skd_acquire_msix(struct skd_device *skdev)
2512 struct pci_dev *pdev = skdev->pdev;
2514 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
2517 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
2521 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2522 sizeof(struct skd_msix_entry), GFP_KERNEL);
2523 if (!skdev->msix_entries) {
2525 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
2529 /* Enable MSI-X vectors for the base queue */
2530 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2531 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2533 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
2534 "%s%d-msix %s", DRV_NAME, skdev->devno,
2535 msix_entries[i].name);
2537 rc = devm_request_irq(&skdev->pdev->dev,
2538 pci_irq_vector(skdev->pdev, i),
2539 msix_entries[i].handler, 0,
2540 qentry->isr_name, skdev);
2542 dev_err(&skdev->pdev->dev,
2543 "Unable to register(%d) MSI-X handler %d: %s\n",
2544 rc, i, qentry->isr_name);
2549 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2550 SKD_MAX_MSIX_COUNT);
2555 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
2557 kfree(skdev->msix_entries);
2558 skdev->msix_entries = NULL;
2562 static int skd_acquire_irq(struct skd_device *skdev)
2564 struct pci_dev *pdev = skdev->pdev;
2565 unsigned int irq_flag = PCI_IRQ_LEGACY;
2568 if (skd_isr_type == SKD_IRQ_MSIX) {
2569 rc = skd_acquire_msix(skdev);
2573 dev_err(&skdev->pdev->dev,
2574 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
2577 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2580 if (skd_isr_type != SKD_IRQ_LEGACY)
2581 irq_flag |= PCI_IRQ_MSI;
2582 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
2584 dev_err(&skdev->pdev->dev,
2585 "failed to allocate the MSI interrupt %d\n", rc);
2589 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
2590 pdev->msi_enabled ? 0 : IRQF_SHARED,
2591 skdev->isr_name, skdev);
2593 pci_free_irq_vectors(pdev);
2594 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2602 static void skd_release_irq(struct skd_device *skdev)
2604 struct pci_dev *pdev = skdev->pdev;
2606 if (skdev->msix_entries) {
2609 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2610 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
2614 kfree(skdev->msix_entries);
2615 skdev->msix_entries = NULL;
2617 devm_free_irq(&pdev->dev, pdev->irq, skdev);
2620 pci_free_irq_vectors(pdev);
2624 *****************************************************************************
2626 *****************************************************************************
2629 static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2630 dma_addr_t *dma_handle, gfp_t gfp,
2631 enum dma_data_direction dir)
2633 struct device *dev = &skdev->pdev->dev;
2636 buf = kmem_cache_alloc(s, gfp);
2639 *dma_handle = dma_map_single(dev, buf, s->size, dir);
2640 if (dma_mapping_error(dev, *dma_handle)) {
2647 static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2648 void *vaddr, dma_addr_t dma_handle,
2649 enum dma_data_direction dir)
2654 dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir);
2655 kmem_cache_free(s, vaddr);
2658 static int skd_cons_skcomp(struct skd_device *skdev)
2661 struct fit_completion_entry_v1 *skcomp;
2663 dev_dbg(&skdev->pdev->dev,
2664 "comp pci_alloc, total bytes %zd entries %d\n",
2665 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2667 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
2668 &skdev->cq_dma_address);
2670 if (skcomp == NULL) {
2675 skdev->skcomp_table = skcomp;
2676 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2678 SKD_N_COMPLETION_ENTRY);
2684 static int skd_cons_skmsg(struct skd_device *skdev)
2689 dev_dbg(&skdev->pdev->dev,
2690 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
2691 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2692 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
2694 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2695 sizeof(struct skd_fitmsg_context),
2697 if (skdev->skmsg_table == NULL) {
2702 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2703 struct skd_fitmsg_context *skmsg;
2705 skmsg = &skdev->skmsg_table[i];
2707 skmsg->id = i + SKD_ID_FIT_MSG;
2709 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
2711 &skmsg->mb_dma_address);
2713 if (skmsg->msg_buf == NULL) {
2718 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
2719 (FIT_QCMD_ALIGN - 1),
2720 "not aligned: msg_buf %p mb_dma_address %#llx\n",
2721 skmsg->msg_buf, skmsg->mb_dma_address);
2722 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
2729 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2731 dma_addr_t *ret_dma_addr)
2733 struct fit_sg_descriptor *sg_list;
2735 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
2736 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
2738 if (sg_list != NULL) {
2739 uint64_t dma_address = *ret_dma_addr;
2742 for (i = 0; i < n_sg - 1; i++) {
2744 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
2746 sg_list[i].next_desc_ptr = dma_address + ndp_off;
2748 sg_list[i].next_desc_ptr = 0LL;
2754 static void skd_free_sg_list(struct skd_device *skdev,
2755 struct fit_sg_descriptor *sg_list,
2756 dma_addr_t dma_addr)
2758 if (WARN_ON_ONCE(!sg_list))
2761 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
2765 static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
2766 unsigned int hctx_idx, unsigned int numa_node)
2768 struct skd_device *skdev = set->driver_data;
2769 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
2771 skreq->state = SKD_REQ_STATE_IDLE;
2772 skreq->sg = (void *)(skreq + 1);
2773 sg_init_table(skreq->sg, skd_sgs_per_request);
2774 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2775 &skreq->sksg_dma_address);
2777 return skreq->sksg_list ? 0 : -ENOMEM;
2780 static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2781 unsigned int hctx_idx)
2783 struct skd_device *skdev = set->driver_data;
2784 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
2786 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
2789 static int skd_cons_sksb(struct skd_device *skdev)
2792 struct skd_special_context *skspcl;
2794 skspcl = &skdev->internal_skspcl;
2796 skspcl->req.id = 0 + SKD_ID_INTERNAL;
2797 skspcl->req.state = SKD_REQ_STATE_IDLE;
2799 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
2800 &skspcl->db_dma_address,
2801 GFP_DMA | __GFP_ZERO,
2803 if (skspcl->data_buf == NULL) {
2808 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
2809 &skspcl->mb_dma_address,
2810 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
2811 if (skspcl->msg_buf == NULL) {
2816 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2817 &skspcl->req.sksg_dma_address);
2818 if (skspcl->req.sksg_list == NULL) {
2823 if (!skd_format_internal_skspcl(skdev)) {
2832 static const struct blk_mq_ops skd_mq_ops = {
2833 .queue_rq = skd_mq_queue_rq,
2834 .init_request = skd_init_request,
2835 .exit_request = skd_exit_request,
2838 static int skd_cons_disk(struct skd_device *skdev)
2841 struct gendisk *disk;
2842 struct request_queue *q;
2843 unsigned long flags;
2845 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
2852 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2854 disk->major = skdev->major;
2855 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2856 disk->fops = &skd_blockdev_ops;
2857 disk->private_data = skdev;
2859 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
2860 skdev->tag_set.ops = &skd_mq_ops;
2861 skdev->tag_set.nr_hw_queues = 1;
2862 skdev->tag_set.queue_depth = skd_max_queue_depth;
2863 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
2864 skdev->sgs_per_request * sizeof(struct scatterlist);
2865 skdev->tag_set.numa_node = NUMA_NO_NODE;
2866 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2868 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
2869 skdev->tag_set.driver_data = skdev;
2870 rc = blk_mq_alloc_tag_set(&skdev->tag_set);
2873 q = blk_mq_init_queue(&skdev->tag_set);
2875 blk_mq_free_tag_set(&skdev->tag_set);
2879 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2880 q->queuedata = skdev;
2881 q->nr_requests = skd_max_queue_depth / 2;
2886 blk_queue_write_cache(q, true, true);
2887 blk_queue_max_segments(q, skdev->sgs_per_request);
2888 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
2890 /* set optimal I/O size to 8KB */
2891 blk_queue_io_opt(q, 8192);
2893 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
2894 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
2896 blk_queue_rq_timeout(q, 8 * HZ);
2897 blk_queue_rq_timed_out(q, skd_timed_out);
2898 blk_queue_softirq_done(q, skd_softirq_done);
2900 spin_lock_irqsave(&skdev->lock, flags);
2901 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2902 blk_mq_stop_hw_queues(skdev->queue);
2903 spin_unlock_irqrestore(&skdev->lock, flags);
2909 #define SKD_N_DEV_TABLE 16u
2910 static u32 skd_next_devno;
2912 static struct skd_device *skd_construct(struct pci_dev *pdev)
2914 struct skd_device *skdev;
2915 int blk_major = skd_major;
2919 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
2922 dev_err(&pdev->dev, "memory alloc failure\n");
2926 skdev->state = SKD_DRVR_STATE_LOAD;
2928 skdev->devno = skd_next_devno++;
2929 skdev->major = blk_major;
2930 skdev->dev_max_queue_depth = 0;
2932 skdev->num_req_context = skd_max_queue_depth;
2933 skdev->num_fitmsg_context = skd_max_queue_depth;
2934 skdev->cur_max_queue_depth = 1;
2935 skdev->queue_low_water_mark = 1;
2936 skdev->proto_ver = 99;
2937 skdev->sgs_per_request = skd_sgs_per_request;
2938 skdev->dbg_level = skd_dbg_level;
2940 spin_lock_init(&skdev->lock);
2942 INIT_WORK(&skdev->start_queue, skd_start_queue);
2943 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
2945 size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
2946 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
2947 SLAB_HWCACHE_ALIGN, NULL);
2948 if (!skdev->msgbuf_cache)
2950 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
2951 "skd-msgbuf: %d < %zd\n",
2952 kmem_cache_size(skdev->msgbuf_cache), size);
2953 size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
2954 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
2955 SLAB_HWCACHE_ALIGN, NULL);
2956 if (!skdev->sglist_cache)
2958 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
2959 "skd-sglist: %d < %zd\n",
2960 kmem_cache_size(skdev->sglist_cache), size);
2961 size = SKD_N_INTERNAL_BYTES;
2962 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
2963 SLAB_HWCACHE_ALIGN, NULL);
2964 if (!skdev->databuf_cache)
2966 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
2967 "skd-databuf: %d < %zd\n",
2968 kmem_cache_size(skdev->databuf_cache), size);
2970 dev_dbg(&skdev->pdev->dev, "skcomp\n");
2971 rc = skd_cons_skcomp(skdev);
2975 dev_dbg(&skdev->pdev->dev, "skmsg\n");
2976 rc = skd_cons_skmsg(skdev);
2980 dev_dbg(&skdev->pdev->dev, "sksb\n");
2981 rc = skd_cons_sksb(skdev);
2985 dev_dbg(&skdev->pdev->dev, "disk\n");
2986 rc = skd_cons_disk(skdev);
2990 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
2994 dev_dbg(&skdev->pdev->dev, "construct failed\n");
2995 skd_destruct(skdev);
3000 *****************************************************************************
3002 *****************************************************************************
3005 static void skd_free_skcomp(struct skd_device *skdev)
3007 if (skdev->skcomp_table)
3008 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
3009 skdev->skcomp_table, skdev->cq_dma_address);
3011 skdev->skcomp_table = NULL;
3012 skdev->cq_dma_address = 0;
3015 static void skd_free_skmsg(struct skd_device *skdev)
3019 if (skdev->skmsg_table == NULL)
3022 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3023 struct skd_fitmsg_context *skmsg;
3025 skmsg = &skdev->skmsg_table[i];
3027 if (skmsg->msg_buf != NULL) {
3028 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
3030 skmsg->mb_dma_address);
3032 skmsg->msg_buf = NULL;
3033 skmsg->mb_dma_address = 0;
3036 kfree(skdev->skmsg_table);
3037 skdev->skmsg_table = NULL;
3040 static void skd_free_sksb(struct skd_device *skdev)
3042 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3044 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
3045 skspcl->db_dma_address, DMA_BIDIRECTIONAL);
3047 skspcl->data_buf = NULL;
3048 skspcl->db_dma_address = 0;
3050 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
3051 skspcl->mb_dma_address, DMA_TO_DEVICE);
3053 skspcl->msg_buf = NULL;
3054 skspcl->mb_dma_address = 0;
3056 skd_free_sg_list(skdev, skspcl->req.sksg_list,
3057 skspcl->req.sksg_dma_address);
3059 skspcl->req.sksg_list = NULL;
3060 skspcl->req.sksg_dma_address = 0;
3063 static void skd_free_disk(struct skd_device *skdev)
3065 struct gendisk *disk = skdev->disk;
3067 if (disk && (disk->flags & GENHD_FL_UP))
3071 blk_cleanup_queue(skdev->queue);
3072 skdev->queue = NULL;
3076 if (skdev->tag_set.tags)
3077 blk_mq_free_tag_set(&skdev->tag_set);
3083 static void skd_destruct(struct skd_device *skdev)
3088 cancel_work_sync(&skdev->start_queue);
3090 dev_dbg(&skdev->pdev->dev, "disk\n");
3091 skd_free_disk(skdev);
3093 dev_dbg(&skdev->pdev->dev, "sksb\n");
3094 skd_free_sksb(skdev);
3096 dev_dbg(&skdev->pdev->dev, "skmsg\n");
3097 skd_free_skmsg(skdev);
3099 dev_dbg(&skdev->pdev->dev, "skcomp\n");
3100 skd_free_skcomp(skdev);
3102 kmem_cache_destroy(skdev->databuf_cache);
3103 kmem_cache_destroy(skdev->sglist_cache);
3104 kmem_cache_destroy(skdev->msgbuf_cache);
3106 dev_dbg(&skdev->pdev->dev, "skdev\n");
3111 *****************************************************************************
3112 * BLOCK DEVICE (BDEV) GLUE
3113 *****************************************************************************
3116 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3118 struct skd_device *skdev;
3121 skdev = bdev->bd_disk->private_data;
3123 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3124 bdev->bd_disk->disk_name, current->comm);
3126 if (skdev->read_cap_is_valid) {
3127 capacity = get_capacity(skdev->disk);
3130 geo->cylinders = (capacity) / (255 * 64);
3137 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
3139 dev_dbg(&skdev->pdev->dev, "add_disk\n");
3140 device_add_disk(parent, skdev->disk);
3144 static const struct block_device_operations skd_blockdev_ops = {
3145 .owner = THIS_MODULE,
3146 .getgeo = skd_bdev_getgeo,
3150 *****************************************************************************
3152 *****************************************************************************
3155 static const struct pci_device_id skd_pci_tbl[] = {
3156 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
3157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
3158 { 0 } /* terminate list */
3161 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
3163 static char *skd_pci_info(struct skd_device *skdev, char *str)
3167 strcpy(str, "PCIe (");
3168 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3173 uint16_t pcie_lstat, lspeed, lwidth;
3176 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3177 lspeed = pcie_lstat & (0xF);
3178 lwidth = (pcie_lstat & 0x3F0) >> 4;
3181 strcat(str, "2.5GT/s ");
3182 else if (lspeed == 2)
3183 strcat(str, "5.0GT/s ");
3185 strcat(str, "<unknown> ");
3186 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
3192 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3197 struct skd_device *skdev;
3199 dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
3202 rc = pci_enable_device(pdev);
3205 rc = pci_request_regions(pdev, DRV_NAME);
3208 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3210 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3211 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3215 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3217 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3218 goto err_out_regions;
3223 rc = register_blkdev(0, DRV_NAME);
3225 goto err_out_regions;
3230 skdev = skd_construct(pdev);
3231 if (skdev == NULL) {
3233 goto err_out_regions;
3236 skd_pci_info(skdev, pci_str);
3237 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
3239 pci_set_master(pdev);
3240 rc = pci_enable_pcie_error_reporting(pdev);
3243 "bad enable of PCIe error reporting rc=%d\n", rc);
3244 skdev->pcie_error_reporting_is_enabled = 0;
3246 skdev->pcie_error_reporting_is_enabled = 1;
3248 pci_set_drvdata(pdev, skdev);
3250 for (i = 0; i < SKD_MAX_BARS; i++) {
3251 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3252 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3253 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3254 skdev->mem_size[i]);
3255 if (!skdev->mem_map[i]) {
3257 "Unable to map adapter memory!\n");
3259 goto err_out_iounmap;
3261 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3262 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3263 skdev->mem_size[i]);
3266 rc = skd_acquire_irq(skdev);
3268 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3269 goto err_out_iounmap;
3272 rc = skd_start_timer(skdev);
3276 init_waitqueue_head(&skdev->waitq);
3278 skd_start_device(skdev);
3280 rc = wait_event_interruptible_timeout(skdev->waitq,
3281 (skdev->gendisk_on),
3282 (SKD_START_WAIT_SECONDS * HZ));
3283 if (skdev->gendisk_on > 0) {
3284 /* device came on-line after reset */
3285 skd_bdev_attach(&pdev->dev, skdev);
3288 /* we timed out, something is wrong with the device,
3289 don't add the disk structure */
3290 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
3292 /* in case of no error; we timeout with ENXIO */
3301 skd_stop_device(skdev);
3302 skd_release_irq(skdev);
3305 for (i = 0; i < SKD_MAX_BARS; i++)
3306 if (skdev->mem_map[i])
3307 iounmap(skdev->mem_map[i]);
3309 if (skdev->pcie_error_reporting_is_enabled)
3310 pci_disable_pcie_error_reporting(pdev);
3312 skd_destruct(skdev);
3315 pci_release_regions(pdev);
3318 pci_disable_device(pdev);
3319 pci_set_drvdata(pdev, NULL);
3323 static void skd_pci_remove(struct pci_dev *pdev)
3326 struct skd_device *skdev;
3328 skdev = pci_get_drvdata(pdev);
3330 dev_err(&pdev->dev, "no device data for PCI\n");
3333 skd_stop_device(skdev);
3334 skd_release_irq(skdev);
3336 for (i = 0; i < SKD_MAX_BARS; i++)
3337 if (skdev->mem_map[i])
3338 iounmap(skdev->mem_map[i]);
3340 if (skdev->pcie_error_reporting_is_enabled)
3341 pci_disable_pcie_error_reporting(pdev);
3343 skd_destruct(skdev);
3345 pci_release_regions(pdev);
3346 pci_disable_device(pdev);
3347 pci_set_drvdata(pdev, NULL);
3352 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3355 struct skd_device *skdev;
3357 skdev = pci_get_drvdata(pdev);
3359 dev_err(&pdev->dev, "no device data for PCI\n");
3363 skd_stop_device(skdev);
3365 skd_release_irq(skdev);
3367 for (i = 0; i < SKD_MAX_BARS; i++)
3368 if (skdev->mem_map[i])
3369 iounmap(skdev->mem_map[i]);
3371 if (skdev->pcie_error_reporting_is_enabled)
3372 pci_disable_pcie_error_reporting(pdev);
3374 pci_release_regions(pdev);
3375 pci_save_state(pdev);
3376 pci_disable_device(pdev);
3377 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3381 static int skd_pci_resume(struct pci_dev *pdev)
3385 struct skd_device *skdev;
3387 skdev = pci_get_drvdata(pdev);
3389 dev_err(&pdev->dev, "no device data for PCI\n");
3393 pci_set_power_state(pdev, PCI_D0);
3394 pci_enable_wake(pdev, PCI_D0, 0);
3395 pci_restore_state(pdev);
3397 rc = pci_enable_device(pdev);
3400 rc = pci_request_regions(pdev, DRV_NAME);
3403 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3405 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3407 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3411 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3414 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3415 goto err_out_regions;
3419 pci_set_master(pdev);
3420 rc = pci_enable_pcie_error_reporting(pdev);
3423 "bad enable of PCIe error reporting rc=%d\n", rc);
3424 skdev->pcie_error_reporting_is_enabled = 0;
3426 skdev->pcie_error_reporting_is_enabled = 1;
3428 for (i = 0; i < SKD_MAX_BARS; i++) {
3430 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3431 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3432 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3433 skdev->mem_size[i]);
3434 if (!skdev->mem_map[i]) {
3435 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
3437 goto err_out_iounmap;
3439 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3440 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3441 skdev->mem_size[i]);
3443 rc = skd_acquire_irq(skdev);
3445 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3446 goto err_out_iounmap;
3449 rc = skd_start_timer(skdev);
3453 init_waitqueue_head(&skdev->waitq);
3455 skd_start_device(skdev);
3460 skd_stop_device(skdev);
3461 skd_release_irq(skdev);
3464 for (i = 0; i < SKD_MAX_BARS; i++)
3465 if (skdev->mem_map[i])
3466 iounmap(skdev->mem_map[i]);
3468 if (skdev->pcie_error_reporting_is_enabled)
3469 pci_disable_pcie_error_reporting(pdev);
3472 pci_release_regions(pdev);
3475 pci_disable_device(pdev);
3479 static void skd_pci_shutdown(struct pci_dev *pdev)
3481 struct skd_device *skdev;
3483 dev_err(&pdev->dev, "%s called\n", __func__);
3485 skdev = pci_get_drvdata(pdev);
3487 dev_err(&pdev->dev, "no device data for PCI\n");
3491 dev_err(&pdev->dev, "calling stop\n");
3492 skd_stop_device(skdev);
3495 static struct pci_driver skd_driver = {
3497 .id_table = skd_pci_tbl,
3498 .probe = skd_pci_probe,
3499 .remove = skd_pci_remove,
3500 .suspend = skd_pci_suspend,
3501 .resume = skd_pci_resume,
3502 .shutdown = skd_pci_shutdown,
3506 *****************************************************************************
3508 *****************************************************************************
3511 const char *skd_drive_state_to_str(int state)
3514 case FIT_SR_DRIVE_OFFLINE:
3516 case FIT_SR_DRIVE_INIT:
3518 case FIT_SR_DRIVE_ONLINE:
3520 case FIT_SR_DRIVE_BUSY:
3522 case FIT_SR_DRIVE_FAULT:
3524 case FIT_SR_DRIVE_DEGRADED:
3526 case FIT_SR_PCIE_LINK_DOWN:
3528 case FIT_SR_DRIVE_SOFT_RESET:
3529 return "SOFT_RESET";
3530 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3532 case FIT_SR_DRIVE_INIT_FAULT:
3533 return "INIT_FAULT";
3534 case FIT_SR_DRIVE_BUSY_SANITIZE:
3535 return "BUSY_SANITIZE";
3536 case FIT_SR_DRIVE_BUSY_ERASE:
3537 return "BUSY_ERASE";
3538 case FIT_SR_DRIVE_FW_BOOTING:
3539 return "FW_BOOTING";
3545 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
3548 case SKD_DRVR_STATE_LOAD:
3550 case SKD_DRVR_STATE_IDLE:
3552 case SKD_DRVR_STATE_BUSY:
3554 case SKD_DRVR_STATE_STARTING:
3556 case SKD_DRVR_STATE_ONLINE:
3558 case SKD_DRVR_STATE_PAUSING:
3560 case SKD_DRVR_STATE_PAUSED:
3562 case SKD_DRVR_STATE_RESTARTING:
3563 return "RESTARTING";
3564 case SKD_DRVR_STATE_RESUMING:
3566 case SKD_DRVR_STATE_STOPPING:
3568 case SKD_DRVR_STATE_SYNCING:
3570 case SKD_DRVR_STATE_FAULT:
3572 case SKD_DRVR_STATE_DISAPPEARED:
3573 return "DISAPPEARED";
3574 case SKD_DRVR_STATE_BUSY_ERASE:
3575 return "BUSY_ERASE";
3576 case SKD_DRVR_STATE_BUSY_SANITIZE:
3577 return "BUSY_SANITIZE";
3578 case SKD_DRVR_STATE_BUSY_IMMINENT:
3579 return "BUSY_IMMINENT";
3580 case SKD_DRVR_STATE_WAIT_BOOT:
3588 static const char *skd_skreq_state_to_str(enum skd_req_state state)
3591 case SKD_REQ_STATE_IDLE:
3593 case SKD_REQ_STATE_SETUP:
3595 case SKD_REQ_STATE_BUSY:
3597 case SKD_REQ_STATE_COMPLETED:
3599 case SKD_REQ_STATE_TIMEOUT:
3606 static void skd_log_skdev(struct skd_device *skdev, const char *event)
3608 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3609 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
3610 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3611 skd_skdev_state_to_str(skdev->state), skdev->state);
3612 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
3613 skd_in_flight(skdev), skdev->cur_max_queue_depth,
3614 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3615 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
3616 skdev->skcomp_cycle, skdev->skcomp_ix);
3619 static void skd_log_skreq(struct skd_device *skdev,
3620 struct skd_request_context *skreq, const char *event)
3622 struct request *req = blk_mq_rq_from_pdu(skreq);
3623 u32 lba = blk_rq_pos(req);
3624 u32 count = blk_rq_sectors(req);
3626 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3627 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3628 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
3630 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
3631 skreq->data_dir, skreq->n_sg);
3633 dev_dbg(&skdev->pdev->dev,
3634 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
3635 count, count, (int)rq_data_dir(req));
3639 *****************************************************************************
3641 *****************************************************************************
3644 static int __init skd_init(void)
3646 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
3647 BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
3648 BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
3649 BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
3650 BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
3651 BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
3652 BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
3653 BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
3655 switch (skd_isr_type) {
3656 case SKD_IRQ_LEGACY:
3661 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
3662 skd_isr_type, SKD_IRQ_DEFAULT);
3663 skd_isr_type = SKD_IRQ_DEFAULT;
3666 if (skd_max_queue_depth < 1 ||
3667 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3668 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
3669 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3670 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3673 if (skd_max_req_per_msg < 1 ||
3674 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
3675 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
3676 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3677 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3680 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3681 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
3682 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3683 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3686 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3687 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
3692 if (skd_isr_comp_limit < 0) {
3693 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
3694 skd_isr_comp_limit, 0);
3695 skd_isr_comp_limit = 0;
3698 return pci_register_driver(&skd_driver);
3701 static void __exit skd_exit(void)
3703 pci_unregister_driver(&skd_driver);
3706 unregister_blkdev(skd_major, DRV_NAME);
3709 module_init(skd_init);
3710 module_exit(skd_exit);