2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
5 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/compiler.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/time.h>
26 #include <linux/hdreg.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/completion.h>
29 #include <linux/scatterlist.h>
30 #include <linux/version.h>
31 #include <linux/err.h>
32 #include <linux/aer.h>
33 #include <linux/wait.h>
34 #include <linux/stringify.h>
35 #include <linux/slab_def.h>
36 #include <scsi/scsi.h>
39 #include <linux/uaccess.h>
40 #include <asm/unaligned.h>
42 #include "skd_s1120.h"
44 static int skd_dbg_level;
45 static int skd_isr_comp_limit = 4;
47 #define SKD_ASSERT(expr) \
49 if (unlikely(!(expr))) { \
50 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
51 # expr, __FILE__, __func__, __LINE__); \
55 #define DRV_NAME "skd"
56 #define PFX DRV_NAME ": "
58 MODULE_LICENSE("GPL");
60 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
62 #define PCI_VENDOR_ID_STEC 0x1B39
63 #define PCI_DEVICE_ID_S1120 0x0001
65 #define SKD_FUA_NV (1 << 1)
66 #define SKD_MINORS_PER_DEVICE 16
68 #define SKD_MAX_QUEUE_DEPTH 200u
70 #define SKD_PAUSE_TIMEOUT (5 * 1000)
72 #define SKD_N_FITMSG_BYTES (512u)
73 #define SKD_MAX_REQ_PER_MSG 14
75 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
77 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
78 * 128KB limit. That allows 4096*4K = 16M xfer size
80 #define SKD_N_SG_PER_REQ_DEFAULT 256u
82 #define SKD_N_COMPLETION_ENTRY 256u
83 #define SKD_N_READ_CAP_BYTES (8u)
85 #define SKD_N_INTERNAL_BYTES (512u)
87 #define SKD_SKCOMP_SIZE \
88 ((sizeof(struct fit_completion_entry_v1) + \
89 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
91 /* 5 bits of uniqifier, 0xF800 */
92 #define SKD_ID_INCR (0x400)
93 #define SKD_ID_TABLE_MASK (3u << 8u)
94 #define SKD_ID_RW_REQUEST (0u << 8u)
95 #define SKD_ID_INTERNAL (1u << 8u)
96 #define SKD_ID_FIT_MSG (3u << 8u)
97 #define SKD_ID_SLOT_MASK 0x00FFu
98 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
100 #define SKD_N_MAX_SECTORS 2048u
102 #define SKD_MAX_RETRIES 2u
104 #define SKD_TIMER_SECONDS(seconds) (seconds)
105 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
107 #define INQ_STD_NBYTES 36
109 enum skd_drvr_state {
113 SKD_DRVR_STATE_STARTING,
114 SKD_DRVR_STATE_ONLINE,
115 SKD_DRVR_STATE_PAUSING,
116 SKD_DRVR_STATE_PAUSED,
117 SKD_DRVR_STATE_RESTARTING,
118 SKD_DRVR_STATE_RESUMING,
119 SKD_DRVR_STATE_STOPPING,
120 SKD_DRVR_STATE_FAULT,
121 SKD_DRVR_STATE_DISAPPEARED,
122 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
123 SKD_DRVR_STATE_BUSY_ERASE,
124 SKD_DRVR_STATE_BUSY_SANITIZE,
125 SKD_DRVR_STATE_BUSY_IMMINENT,
126 SKD_DRVR_STATE_WAIT_BOOT,
127 SKD_DRVR_STATE_SYNCING,
130 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
131 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
132 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
133 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
134 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
135 #define SKD_START_WAIT_SECONDS 90u
141 SKD_REQ_STATE_COMPLETED,
142 SKD_REQ_STATE_TIMEOUT,
145 enum skd_check_status_action {
146 SKD_CHECK_STATUS_REPORT_GOOD,
147 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
148 SKD_CHECK_STATUS_REQUEUE_REQUEST,
149 SKD_CHECK_STATUS_REPORT_ERROR,
150 SKD_CHECK_STATUS_BUSY_IMMINENT,
154 struct fit_msg_hdr fmh;
155 struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
158 struct skd_fitmsg_context {
163 struct skd_msg_buf *msg_buf;
164 dma_addr_t mb_dma_address;
167 struct skd_request_context {
168 enum skd_req_state state;
175 enum dma_data_direction data_dir;
176 struct scatterlist *sg;
180 struct fit_sg_descriptor *sksg_list;
181 dma_addr_t sksg_dma_address;
183 struct fit_completion_entry_v1 completion;
185 struct fit_comp_error_info err_info;
190 struct skd_special_context {
191 struct skd_request_context req;
194 dma_addr_t db_dma_address;
196 struct skd_msg_buf *msg_buf;
197 dma_addr_t mb_dma_address;
200 typedef enum skd_irq_type {
206 #define SKD_MAX_BARS 2
209 void __iomem *mem_map[SKD_MAX_BARS];
210 resource_size_t mem_phys[SKD_MAX_BARS];
211 u32 mem_size[SKD_MAX_BARS];
213 struct skd_msix_entry *msix_entries;
215 struct pci_dev *pdev;
216 int pcie_error_reporting_is_enabled;
219 struct gendisk *disk;
220 struct blk_mq_tag_set tag_set;
221 struct request_queue *queue;
222 struct skd_fitmsg_context *skmsg;
223 struct device *class_dev;
231 enum skd_drvr_state state;
234 u32 cur_max_queue_depth;
235 u32 queue_low_water_mark;
236 u32 dev_max_queue_depth;
238 u32 num_fitmsg_context;
241 struct skd_fitmsg_context *skmsg_table;
243 struct skd_special_context internal_skspcl;
244 u32 read_cap_blocksize;
245 u32 read_cap_last_lba;
246 int read_cap_is_valid;
247 int inquiry_is_valid;
248 u8 inq_serial_num[13]; /*12 chars plus null term */
252 struct kmem_cache *msgbuf_cache;
253 struct kmem_cache *sglist_cache;
254 struct kmem_cache *databuf_cache;
255 struct fit_completion_entry_v1 *skcomp_table;
256 struct fit_comp_error_info *skerr_table;
257 dma_addr_t cq_dma_address;
259 wait_queue_head_t waitq;
261 struct timer_list timer;
271 u32 connect_time_stamp;
273 #define SKD_MAX_CONNECT_RETRIES 16
278 struct work_struct start_queue;
279 struct work_struct completion_worker;
282 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
283 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
284 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
286 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
288 u32 val = readl(skdev->mem_map[1] + offset);
290 if (unlikely(skdev->dbg_level >= 2))
291 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
295 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
298 writel(val, skdev->mem_map[1] + offset);
299 if (unlikely(skdev->dbg_level >= 2))
300 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
303 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
306 writeq(val, skdev->mem_map[1] + offset);
307 if (unlikely(skdev->dbg_level >= 2))
308 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
313 #define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
314 static int skd_isr_type = SKD_IRQ_DEFAULT;
316 module_param(skd_isr_type, int, 0444);
317 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
318 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
320 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
321 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
323 module_param(skd_max_req_per_msg, int, 0444);
324 MODULE_PARM_DESC(skd_max_req_per_msg,
325 "Maximum SCSI requests packed in a single message."
326 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
328 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
329 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
330 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
332 module_param(skd_max_queue_depth, int, 0444);
333 MODULE_PARM_DESC(skd_max_queue_depth,
334 "Maximum SCSI requests issued to s1120."
335 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
337 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
338 module_param(skd_sgs_per_request, int, 0444);
339 MODULE_PARM_DESC(skd_sgs_per_request,
340 "Maximum SG elements per block request."
341 " (1-4096, default==256)");
343 static int skd_max_pass_thru = 1;
344 module_param(skd_max_pass_thru, int, 0444);
345 MODULE_PARM_DESC(skd_max_pass_thru,
346 "Maximum SCSI pass-thru at a time. IGNORED");
348 module_param(skd_dbg_level, int, 0444);
349 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
351 module_param(skd_isr_comp_limit, int, 0444);
352 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
354 /* Major device number dynamically assigned. */
355 static u32 skd_major;
357 static void skd_destruct(struct skd_device *skdev);
358 static const struct block_device_operations skd_blockdev_ops;
359 static void skd_send_fitmsg(struct skd_device *skdev,
360 struct skd_fitmsg_context *skmsg);
361 static void skd_send_special_fitmsg(struct skd_device *skdev,
362 struct skd_special_context *skspcl);
363 static void skd_end_request(struct skd_device *skdev, struct request *req,
364 blk_status_t status);
365 static bool skd_preop_sg_list(struct skd_device *skdev,
366 struct skd_request_context *skreq);
367 static void skd_postop_sg_list(struct skd_device *skdev,
368 struct skd_request_context *skreq);
370 static void skd_restart_device(struct skd_device *skdev);
371 static int skd_quiesce_dev(struct skd_device *skdev);
372 static int skd_unquiesce_dev(struct skd_device *skdev);
373 static void skd_disable_interrupts(struct skd_device *skdev);
374 static void skd_isr_fwstate(struct skd_device *skdev);
375 static void skd_recover_requests(struct skd_device *skdev);
376 static void skd_soft_reset(struct skd_device *skdev);
378 const char *skd_drive_state_to_str(int state);
379 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
380 static void skd_log_skdev(struct skd_device *skdev, const char *event);
381 static void skd_log_skreq(struct skd_device *skdev,
382 struct skd_request_context *skreq, const char *event);
385 *****************************************************************************
386 * READ/WRITE REQUESTS
387 *****************************************************************************
389 static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
396 static int skd_in_flight(struct skd_device *skdev)
400 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
406 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
407 int data_dir, unsigned lba,
410 if (data_dir == READ)
411 scsi_req->cdb[0] = READ_10;
413 scsi_req->cdb[0] = WRITE_10;
415 scsi_req->cdb[1] = 0;
416 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
417 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
418 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
419 scsi_req->cdb[5] = (lba & 0xff);
420 scsi_req->cdb[6] = 0;
421 scsi_req->cdb[7] = (count & 0xff00) >> 8;
422 scsi_req->cdb[8] = count & 0xff;
423 scsi_req->cdb[9] = 0;
427 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
428 struct skd_request_context *skreq)
430 skreq->flush_cmd = 1;
432 scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
433 scsi_req->cdb[1] = 0;
434 scsi_req->cdb[2] = 0;
435 scsi_req->cdb[3] = 0;
436 scsi_req->cdb[4] = 0;
437 scsi_req->cdb[5] = 0;
438 scsi_req->cdb[6] = 0;
439 scsi_req->cdb[7] = 0;
440 scsi_req->cdb[8] = 0;
441 scsi_req->cdb[9] = 0;
445 * Return true if and only if all pending requests should be failed.
447 static bool skd_fail_all(struct request_queue *q)
449 struct skd_device *skdev = q->queuedata;
451 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
453 skd_log_skdev(skdev, "req_not_online");
454 switch (skdev->state) {
455 case SKD_DRVR_STATE_PAUSING:
456 case SKD_DRVR_STATE_PAUSED:
457 case SKD_DRVR_STATE_STARTING:
458 case SKD_DRVR_STATE_RESTARTING:
459 case SKD_DRVR_STATE_WAIT_BOOT:
460 /* In case of starting, we haven't started the queue,
461 * so we can't get here... but requests are
462 * possibly hanging out waiting for us because we
463 * reported the dev/skd0 already. They'll wait
464 * forever if connect doesn't complete.
465 * What to do??? delay dev/skd0 ??
467 case SKD_DRVR_STATE_BUSY:
468 case SKD_DRVR_STATE_BUSY_IMMINENT:
469 case SKD_DRVR_STATE_BUSY_ERASE:
472 case SKD_DRVR_STATE_BUSY_SANITIZE:
473 case SKD_DRVR_STATE_STOPPING:
474 case SKD_DRVR_STATE_SYNCING:
475 case SKD_DRVR_STATE_FAULT:
476 case SKD_DRVR_STATE_DISAPPEARED:
482 static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
483 const struct blk_mq_queue_data *mqd)
485 struct request *const req = mqd->rq;
486 struct request_queue *const q = req->q;
487 struct skd_device *skdev = q->queuedata;
488 struct skd_fitmsg_context *skmsg;
489 struct fit_msg_hdr *fmh;
490 const u32 tag = blk_mq_unique_tag(req);
491 struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
492 struct skd_scsi_request *scsi_req;
493 unsigned long flags = 0;
494 const u32 lba = blk_rq_pos(req);
495 const u32 count = blk_rq_sectors(req);
496 const int data_dir = rq_data_dir(req);
498 if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
499 return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
501 blk_mq_start_request(req);
503 WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
504 tag, skd_max_queue_depth, q->nr_requests);
506 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
508 dev_dbg(&skdev->pdev->dev,
509 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
510 lba, count, count, data_dir);
512 skreq->id = tag + SKD_ID_RW_REQUEST;
513 skreq->flush_cmd = 0;
515 skreq->sg_byte_count = 0;
517 skreq->fitmsg_id = 0;
519 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
521 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
522 dev_dbg(&skdev->pdev->dev, "error Out\n");
523 skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
528 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
530 sizeof(struct fit_sg_descriptor),
533 /* Either a FIT msg is in progress or we have to start one. */
534 if (skd_max_req_per_msg == 1) {
537 spin_lock_irqsave(&skdev->lock, flags);
538 skmsg = skdev->skmsg;
541 skmsg = &skdev->skmsg_table[tag];
542 skdev->skmsg = skmsg;
544 /* Initialize the FIT msg header */
545 fmh = &skmsg->msg_buf->fmh;
546 memset(fmh, 0, sizeof(*fmh));
547 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
548 skmsg->length = sizeof(*fmh);
550 fmh = &skmsg->msg_buf->fmh;
553 skreq->fitmsg_id = skmsg->id;
555 scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
556 memset(scsi_req, 0, sizeof(*scsi_req));
558 scsi_req->hdr.tag = skreq->id;
559 scsi_req->hdr.sg_list_dma_address =
560 cpu_to_be64(skreq->sksg_dma_address);
562 if (req_op(req) == REQ_OP_FLUSH) {
563 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
564 SKD_ASSERT(skreq->flush_cmd == 1);
566 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
569 if (req->cmd_flags & REQ_FUA)
570 scsi_req->cdb[1] |= SKD_FUA_NV;
572 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
574 /* Complete resource allocations. */
575 skreq->state = SKD_REQ_STATE_BUSY;
577 skmsg->length += sizeof(struct skd_scsi_request);
578 fmh->num_protocol_cmds_coalesced++;
580 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
581 skd_in_flight(skdev));
584 * If the FIT msg buffer is full send it.
586 if (skd_max_req_per_msg == 1) {
587 skd_send_fitmsg(skdev, skmsg);
590 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
591 skd_send_fitmsg(skdev, skmsg);
594 spin_unlock_irqrestore(&skdev->lock, flags);
600 static enum blk_eh_timer_return skd_timed_out(struct request *req,
603 struct skd_device *skdev = req->q->queuedata;
605 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
606 blk_mq_unique_tag(req));
608 return BLK_EH_RESET_TIMER;
611 static void skd_end_request(struct skd_device *skdev, struct request *req,
614 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
616 if (unlikely(error)) {
617 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
618 u32 lba = (u32)blk_rq_pos(req);
619 u32 count = blk_rq_sectors(req);
621 dev_err(&skdev->pdev->dev,
622 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
625 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", req->tag,
628 skreq->status = error;
629 blk_mq_complete_request(req);
632 static void skd_softirq_done(struct request *req)
634 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
636 blk_mq_end_request(req, skreq->status);
639 static bool skd_preop_sg_list(struct skd_device *skdev,
640 struct skd_request_context *skreq)
642 struct request *req = blk_mq_rq_from_pdu(skreq);
643 struct scatterlist *sgl = &skreq->sg[0], *sg;
647 skreq->sg_byte_count = 0;
649 WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
650 skreq->data_dir != DMA_FROM_DEVICE);
652 n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
657 * Map scatterlist to PCI bus addresses.
658 * Note PCI might change the number of entries.
660 n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
664 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
668 for_each_sg(sgl, sg, n_sg, i) {
669 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
670 u32 cnt = sg_dma_len(sg);
671 uint64_t dma_addr = sg_dma_address(sg);
673 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
674 sgd->byte_count = cnt;
675 skreq->sg_byte_count += cnt;
676 sgd->host_side_addr = dma_addr;
677 sgd->dev_side_addr = 0;
680 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
681 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
683 if (unlikely(skdev->dbg_level > 1)) {
684 dev_dbg(&skdev->pdev->dev,
685 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
686 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
687 for (i = 0; i < n_sg; i++) {
688 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
690 dev_dbg(&skdev->pdev->dev,
691 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
692 i, sgd->byte_count, sgd->control,
693 sgd->host_side_addr, sgd->next_desc_ptr);
700 static void skd_postop_sg_list(struct skd_device *skdev,
701 struct skd_request_context *skreq)
704 * restore the next ptr for next IO request so we
705 * don't have to set it every time.
707 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
708 skreq->sksg_dma_address +
709 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
710 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
714 *****************************************************************************
716 *****************************************************************************
719 static void skd_timer_tick_not_online(struct skd_device *skdev);
721 static void skd_start_queue(struct work_struct *work)
723 struct skd_device *skdev = container_of(work, typeof(*skdev),
727 * Although it is safe to call blk_start_queue() from interrupt
728 * context, blk_mq_start_hw_queues() must not be called from
731 blk_mq_start_hw_queues(skdev->queue);
734 static void skd_timer_tick(ulong arg)
736 struct skd_device *skdev = (struct skd_device *)arg;
737 unsigned long reqflags;
740 if (skdev->state == SKD_DRVR_STATE_FAULT)
741 /* The driver has declared fault, and we want it to
742 * stay that way until driver is reloaded.
746 spin_lock_irqsave(&skdev->lock, reqflags);
748 state = SKD_READL(skdev, FIT_STATUS);
749 state &= FIT_SR_DRIVE_STATE_MASK;
750 if (state != skdev->drive_state)
751 skd_isr_fwstate(skdev);
753 if (skdev->state != SKD_DRVR_STATE_ONLINE)
754 skd_timer_tick_not_online(skdev);
756 mod_timer(&skdev->timer, (jiffies + HZ));
758 spin_unlock_irqrestore(&skdev->lock, reqflags);
761 static void skd_timer_tick_not_online(struct skd_device *skdev)
763 switch (skdev->state) {
764 case SKD_DRVR_STATE_IDLE:
765 case SKD_DRVR_STATE_LOAD:
767 case SKD_DRVR_STATE_BUSY_SANITIZE:
768 dev_dbg(&skdev->pdev->dev,
769 "drive busy sanitize[%x], driver[%x]\n",
770 skdev->drive_state, skdev->state);
771 /* If we've been in sanitize for 3 seconds, we figure we're not
772 * going to get anymore completions, so recover requests now
774 if (skdev->timer_countdown > 0) {
775 skdev->timer_countdown--;
778 skd_recover_requests(skdev);
781 case SKD_DRVR_STATE_BUSY:
782 case SKD_DRVR_STATE_BUSY_IMMINENT:
783 case SKD_DRVR_STATE_BUSY_ERASE:
784 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
785 skdev->state, skdev->timer_countdown);
786 if (skdev->timer_countdown > 0) {
787 skdev->timer_countdown--;
790 dev_dbg(&skdev->pdev->dev,
791 "busy[%x], timedout=%d, restarting device.",
792 skdev->state, skdev->timer_countdown);
793 skd_restart_device(skdev);
796 case SKD_DRVR_STATE_WAIT_BOOT:
797 case SKD_DRVR_STATE_STARTING:
798 if (skdev->timer_countdown > 0) {
799 skdev->timer_countdown--;
802 /* For now, we fault the drive. Could attempt resets to
803 * revcover at some point. */
804 skdev->state = SKD_DRVR_STATE_FAULT;
806 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
809 /*start the queue so we can respond with error to requests */
810 /* wakeup anyone waiting for startup complete */
811 schedule_work(&skdev->start_queue);
812 skdev->gendisk_on = -1;
813 wake_up_interruptible(&skdev->waitq);
816 case SKD_DRVR_STATE_ONLINE:
817 /* shouldn't get here. */
820 case SKD_DRVR_STATE_PAUSING:
821 case SKD_DRVR_STATE_PAUSED:
824 case SKD_DRVR_STATE_RESTARTING:
825 if (skdev->timer_countdown > 0) {
826 skdev->timer_countdown--;
829 /* For now, we fault the drive. Could attempt resets to
830 * revcover at some point. */
831 skdev->state = SKD_DRVR_STATE_FAULT;
832 dev_err(&skdev->pdev->dev,
833 "DriveFault Reconnect Timeout (%x)\n",
837 * Recovering does two things:
838 * 1. completes IO with error
839 * 2. reclaims dma resources
840 * When is it safe to recover requests?
841 * - if the drive state is faulted
842 * - if the state is still soft reset after out timeout
843 * - if the drive registers are dead (state = FF)
844 * If it is "unsafe", we still need to recover, so we will
845 * disable pci bus mastering and disable our interrupts.
848 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
849 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
850 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
851 /* It never came out of soft reset. Try to
852 * recover the requests and then let them
853 * fail. This is to mitigate hung processes. */
854 skd_recover_requests(skdev);
856 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
858 pci_disable_device(skdev->pdev);
859 skd_disable_interrupts(skdev);
860 skd_recover_requests(skdev);
863 /*start the queue so we can respond with error to requests */
864 /* wakeup anyone waiting for startup complete */
865 schedule_work(&skdev->start_queue);
866 skdev->gendisk_on = -1;
867 wake_up_interruptible(&skdev->waitq);
870 case SKD_DRVR_STATE_RESUMING:
871 case SKD_DRVR_STATE_STOPPING:
872 case SKD_DRVR_STATE_SYNCING:
873 case SKD_DRVR_STATE_FAULT:
874 case SKD_DRVR_STATE_DISAPPEARED:
880 static int skd_start_timer(struct skd_device *skdev)
884 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
886 rc = mod_timer(&skdev->timer, (jiffies + HZ));
888 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
892 static void skd_kill_timer(struct skd_device *skdev)
894 del_timer_sync(&skdev->timer);
898 *****************************************************************************
899 * INTERNAL REQUESTS -- generated by driver itself
900 *****************************************************************************
903 static int skd_format_internal_skspcl(struct skd_device *skdev)
905 struct skd_special_context *skspcl = &skdev->internal_skspcl;
906 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
907 struct fit_msg_hdr *fmh;
908 uint64_t dma_address;
909 struct skd_scsi_request *scsi;
911 fmh = &skspcl->msg_buf->fmh;
912 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
913 fmh->num_protocol_cmds_coalesced = 1;
915 scsi = &skspcl->msg_buf->scsi[0];
916 memset(scsi, 0, sizeof(*scsi));
917 dma_address = skspcl->req.sksg_dma_address;
918 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
919 skspcl->req.n_sg = 1;
920 sgd->control = FIT_SGD_CONTROL_LAST;
922 sgd->host_side_addr = skspcl->db_dma_address;
923 sgd->dev_side_addr = 0;
924 sgd->next_desc_ptr = 0LL;
929 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
931 static void skd_send_internal_skspcl(struct skd_device *skdev,
932 struct skd_special_context *skspcl,
935 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
936 struct skd_scsi_request *scsi;
937 unsigned char *buf = skspcl->data_buf;
940 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
942 * A refresh is already in progress.
943 * Just wait for it to finish.
947 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
948 skspcl->req.state = SKD_REQ_STATE_BUSY;
949 skspcl->req.id += SKD_ID_INCR;
951 scsi = &skspcl->msg_buf->scsi[0];
952 scsi->hdr.tag = skspcl->req.id;
954 memset(scsi->cdb, 0, sizeof(scsi->cdb));
957 case TEST_UNIT_READY:
958 scsi->cdb[0] = TEST_UNIT_READY;
960 scsi->hdr.sg_list_len_bytes = 0;
964 scsi->cdb[0] = READ_CAPACITY;
965 sgd->byte_count = SKD_N_READ_CAP_BYTES;
966 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
970 scsi->cdb[0] = INQUIRY;
971 scsi->cdb[1] = 0x01; /* evpd */
972 scsi->cdb[2] = 0x80; /* serial number page */
974 sgd->byte_count = 16;
975 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
978 case SYNCHRONIZE_CACHE:
979 scsi->cdb[0] = SYNCHRONIZE_CACHE;
981 scsi->hdr.sg_list_len_bytes = 0;
985 scsi->cdb[0] = WRITE_BUFFER;
987 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
988 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
989 sgd->byte_count = WR_BUF_SIZE;
990 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
991 /* fill incrementing byte pattern */
992 for (i = 0; i < sgd->byte_count; i++)
997 scsi->cdb[0] = READ_BUFFER;
999 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1000 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1001 sgd->byte_count = WR_BUF_SIZE;
1002 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1003 memset(skspcl->data_buf, 0, sgd->byte_count);
1007 SKD_ASSERT("Don't know what to send");
1011 skd_send_special_fitmsg(skdev, skspcl);
1014 static void skd_refresh_device_data(struct skd_device *skdev)
1016 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1018 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1021 static int skd_chk_read_buf(struct skd_device *skdev,
1022 struct skd_special_context *skspcl)
1024 unsigned char *buf = skspcl->data_buf;
1027 /* check for incrementing byte pattern */
1028 for (i = 0; i < WR_BUF_SIZE; i++)
1029 if (buf[i] != (i & 0xFF))
1035 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1036 u8 code, u8 qual, u8 fruc)
1038 /* If the check condition is of special interest, log a message */
1039 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1040 && (code == 0x04) && (qual == 0x06)) {
1041 dev_err(&skdev->pdev->dev,
1042 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1043 key, code, qual, fruc);
1047 static void skd_complete_internal(struct skd_device *skdev,
1048 struct fit_completion_entry_v1 *skcomp,
1049 struct fit_comp_error_info *skerr,
1050 struct skd_special_context *skspcl)
1052 u8 *buf = skspcl->data_buf;
1055 struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
1057 lockdep_assert_held(&skdev->lock);
1059 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1061 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1063 dma_sync_single_for_cpu(&skdev->pdev->dev,
1064 skspcl->db_dma_address,
1065 skspcl->req.sksg_list[0].byte_count,
1068 skspcl->req.completion = *skcomp;
1069 skspcl->req.state = SKD_REQ_STATE_IDLE;
1070 skspcl->req.id += SKD_ID_INCR;
1072 status = skspcl->req.completion.status;
1074 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1075 skerr->qual, skerr->fruc);
1077 switch (scsi->cdb[0]) {
1078 case TEST_UNIT_READY:
1079 if (status == SAM_STAT_GOOD)
1080 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1081 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1082 (skerr->key == MEDIUM_ERROR))
1083 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1085 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1086 dev_dbg(&skdev->pdev->dev,
1087 "TUR failed, don't send anymore state 0x%x\n",
1091 dev_dbg(&skdev->pdev->dev,
1092 "**** TUR failed, retry skerr\n");
1093 skd_send_internal_skspcl(skdev, skspcl,
1099 if (status == SAM_STAT_GOOD)
1100 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1102 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1103 dev_dbg(&skdev->pdev->dev,
1104 "write buffer failed, don't send anymore state 0x%x\n",
1108 dev_dbg(&skdev->pdev->dev,
1109 "**** write buffer failed, retry skerr\n");
1110 skd_send_internal_skspcl(skdev, skspcl,
1116 if (status == SAM_STAT_GOOD) {
1117 if (skd_chk_read_buf(skdev, skspcl) == 0)
1118 skd_send_internal_skspcl(skdev, skspcl,
1121 dev_err(&skdev->pdev->dev,
1122 "*** W/R Buffer mismatch %d ***\n",
1123 skdev->connect_retries);
1124 if (skdev->connect_retries <
1125 SKD_MAX_CONNECT_RETRIES) {
1126 skdev->connect_retries++;
1127 skd_soft_reset(skdev);
1129 dev_err(&skdev->pdev->dev,
1130 "W/R Buffer Connect Error\n");
1136 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1137 dev_dbg(&skdev->pdev->dev,
1138 "read buffer failed, don't send anymore state 0x%x\n",
1142 dev_dbg(&skdev->pdev->dev,
1143 "**** read buffer failed, retry skerr\n");
1144 skd_send_internal_skspcl(skdev, skspcl,
1150 skdev->read_cap_is_valid = 0;
1151 if (status == SAM_STAT_GOOD) {
1152 skdev->read_cap_last_lba =
1153 (buf[0] << 24) | (buf[1] << 16) |
1154 (buf[2] << 8) | buf[3];
1155 skdev->read_cap_blocksize =
1156 (buf[4] << 24) | (buf[5] << 16) |
1157 (buf[6] << 8) | buf[7];
1159 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1160 skdev->read_cap_last_lba,
1161 skdev->read_cap_blocksize);
1163 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1165 skdev->read_cap_is_valid = 1;
1167 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1168 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1169 (skerr->key == MEDIUM_ERROR)) {
1170 skdev->read_cap_last_lba = ~0;
1171 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1172 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1173 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1175 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
1176 skd_send_internal_skspcl(skdev, skspcl,
1182 skdev->inquiry_is_valid = 0;
1183 if (status == SAM_STAT_GOOD) {
1184 skdev->inquiry_is_valid = 1;
1186 for (i = 0; i < 12; i++)
1187 skdev->inq_serial_num[i] = buf[i + 4];
1188 skdev->inq_serial_num[12] = 0;
1191 if (skd_unquiesce_dev(skdev) < 0)
1192 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
1193 /* connection is complete */
1194 skdev->connect_retries = 0;
1197 case SYNCHRONIZE_CACHE:
1198 if (status == SAM_STAT_GOOD)
1199 skdev->sync_done = 1;
1201 skdev->sync_done = -1;
1202 wake_up_interruptible(&skdev->waitq);
1206 SKD_ASSERT("we didn't send this");
1211 *****************************************************************************
1213 *****************************************************************************
1216 static void skd_send_fitmsg(struct skd_device *skdev,
1217 struct skd_fitmsg_context *skmsg)
1221 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
1222 skmsg->mb_dma_address, skd_in_flight(skdev));
1223 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
1225 qcmd = skmsg->mb_dma_address;
1226 qcmd |= FIT_QCMD_QID_NORMAL;
1228 if (unlikely(skdev->dbg_level > 1)) {
1229 u8 *bp = (u8 *)skmsg->msg_buf;
1231 for (i = 0; i < skmsg->length; i += 8) {
1232 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1239 if (skmsg->length > 256)
1240 qcmd |= FIT_QCMD_MSGSIZE_512;
1241 else if (skmsg->length > 128)
1242 qcmd |= FIT_QCMD_MSGSIZE_256;
1243 else if (skmsg->length > 64)
1244 qcmd |= FIT_QCMD_MSGSIZE_128;
1247 * This makes no sense because the FIT msg header is
1248 * 64 bytes. If the msg is only 64 bytes long it has
1251 qcmd |= FIT_QCMD_MSGSIZE_64;
1253 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
1254 skmsg->length, DMA_TO_DEVICE);
1256 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1259 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1262 static void skd_send_special_fitmsg(struct skd_device *skdev,
1263 struct skd_special_context *skspcl)
1267 WARN_ON_ONCE(skspcl->req.n_sg != 1);
1269 if (unlikely(skdev->dbg_level > 1)) {
1270 u8 *bp = (u8 *)skspcl->msg_buf;
1273 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1274 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1280 dev_dbg(&skdev->pdev->dev,
1281 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
1282 skspcl, skspcl->req.id, skspcl->req.sksg_list,
1283 skspcl->req.sksg_dma_address);
1284 for (i = 0; i < skspcl->req.n_sg; i++) {
1285 struct fit_sg_descriptor *sgd =
1286 &skspcl->req.sksg_list[i];
1288 dev_dbg(&skdev->pdev->dev,
1289 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1290 i, sgd->byte_count, sgd->control,
1291 sgd->host_side_addr, sgd->next_desc_ptr);
1296 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1297 * and one 64-byte SSDI command.
1299 qcmd = skspcl->mb_dma_address;
1300 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1302 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
1303 SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
1304 dma_sync_single_for_device(&skdev->pdev->dev,
1305 skspcl->req.sksg_dma_address,
1306 1 * sizeof(struct fit_sg_descriptor),
1308 dma_sync_single_for_device(&skdev->pdev->dev,
1309 skspcl->db_dma_address,
1310 skspcl->req.sksg_list[0].byte_count,
1313 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1316 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1320 *****************************************************************************
1322 *****************************************************************************
1325 static void skd_complete_other(struct skd_device *skdev,
1326 struct fit_completion_entry_v1 *skcomp,
1327 struct fit_comp_error_info *skerr);
1336 enum skd_check_status_action action;
1339 static struct sns_info skd_chkstat_table[] = {
1341 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
1342 SKD_CHECK_STATUS_REPORT_GOOD },
1345 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1346 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1347 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1348 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1349 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
1350 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1352 /* Retry (with limits) */
1353 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
1354 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1355 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
1356 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1357 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
1358 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1359 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
1360 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1362 /* Busy (or about to be) */
1363 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
1364 SKD_CHECK_STATUS_BUSY_IMMINENT },
1368 * Look up status and sense data to decide how to handle the error
1370 * mask says which fields must match e.g., mask=0x18 means check
1371 * type and stat, ignore key, asc, ascq.
1374 static enum skd_check_status_action
1375 skd_check_status(struct skd_device *skdev,
1376 u8 cmp_status, struct fit_comp_error_info *skerr)
1380 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1381 skerr->key, skerr->code, skerr->qual, skerr->fruc);
1383 dev_dbg(&skdev->pdev->dev,
1384 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1385 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
1388 /* Does the info match an entry in the good category? */
1389 for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
1390 struct sns_info *sns = &skd_chkstat_table[i];
1392 if (sns->mask & 0x10)
1393 if (skerr->type != sns->type)
1396 if (sns->mask & 0x08)
1397 if (cmp_status != sns->stat)
1400 if (sns->mask & 0x04)
1401 if (skerr->key != sns->key)
1404 if (sns->mask & 0x02)
1405 if (skerr->code != sns->asc)
1408 if (sns->mask & 0x01)
1409 if (skerr->qual != sns->ascq)
1412 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1413 dev_err(&skdev->pdev->dev,
1414 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1415 skerr->key, skerr->code, skerr->qual);
1420 /* No other match, so nonzero status means error,
1421 * zero status means good
1424 dev_dbg(&skdev->pdev->dev, "status check: error\n");
1425 return SKD_CHECK_STATUS_REPORT_ERROR;
1428 dev_dbg(&skdev->pdev->dev, "status check good default\n");
1429 return SKD_CHECK_STATUS_REPORT_GOOD;
1432 static void skd_resolve_req_exception(struct skd_device *skdev,
1433 struct skd_request_context *skreq,
1434 struct request *req)
1436 u8 cmp_status = skreq->completion.status;
1438 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1439 case SKD_CHECK_STATUS_REPORT_GOOD:
1440 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
1441 skd_end_request(skdev, req, BLK_STS_OK);
1444 case SKD_CHECK_STATUS_BUSY_IMMINENT:
1445 skd_log_skreq(skdev, skreq, "retry(busy)");
1446 blk_requeue_request(skdev->queue, req);
1447 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
1448 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1449 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1450 skd_quiesce_dev(skdev);
1453 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
1454 if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
1455 skd_log_skreq(skdev, skreq, "retry");
1456 blk_requeue_request(skdev->queue, req);
1461 case SKD_CHECK_STATUS_REPORT_ERROR:
1463 skd_end_request(skdev, req, BLK_STS_IOERR);
1468 static void skd_release_skreq(struct skd_device *skdev,
1469 struct skd_request_context *skreq)
1472 * Reclaim the skd_request_context
1474 skreq->state = SKD_REQ_STATE_IDLE;
1475 skreq->id += SKD_ID_INCR;
1478 static int skd_isr_completion_posted(struct skd_device *skdev,
1479 int limit, int *enqueued)
1481 struct fit_completion_entry_v1 *skcmp;
1482 struct fit_comp_error_info *skerr;
1487 struct skd_request_context *skreq;
1495 lockdep_assert_held(&skdev->lock);
1498 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1500 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1501 cmp_cycle = skcmp->cycle;
1502 cmp_cntxt = skcmp->tag;
1503 cmp_status = skcmp->status;
1504 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1506 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1508 dev_dbg(&skdev->pdev->dev,
1509 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1510 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
1511 cmp_cntxt, cmp_status, skd_in_flight(skdev),
1512 cmp_bytes, skdev->proto_ver);
1514 if (cmp_cycle != skdev->skcomp_cycle) {
1515 dev_dbg(&skdev->pdev->dev, "end of completions\n");
1519 * Update the completion queue head index and possibly
1520 * the completion cycle count. 8-bit wrap-around.
1523 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1524 skdev->skcomp_ix = 0;
1525 skdev->skcomp_cycle++;
1529 * The command context is a unique 32-bit ID. The low order
1530 * bits help locate the request. The request is usually a
1531 * r/w request (see skd_start() above) or a special request.
1534 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1536 /* Is this other than a r/w request? */
1537 if (tag >= skdev->num_req_context) {
1539 * This is not a completion for a r/w request.
1541 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
1543 skd_complete_other(skdev, skcmp, skerr);
1547 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
1548 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
1551 skreq = blk_mq_rq_to_pdu(rq);
1554 * Make sure the request ID for the slot matches.
1556 if (skreq->id != req_id) {
1557 dev_err(&skdev->pdev->dev,
1558 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1559 req_id, skreq->id, cmp_cntxt);
1564 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
1566 skreq->completion = *skcmp;
1567 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
1568 skreq->err_info = *skerr;
1569 skd_log_check_status(skdev, cmp_status, skerr->key,
1570 skerr->code, skerr->qual,
1573 /* Release DMA resources for the request. */
1574 if (skreq->n_sg > 0)
1575 skd_postop_sg_list(skdev, skreq);
1577 skd_release_skreq(skdev, skreq);
1580 * Capture the outcome and post it back to the native request.
1582 if (likely(cmp_status == SAM_STAT_GOOD))
1583 skd_end_request(skdev, rq, BLK_STS_OK);
1585 skd_resolve_req_exception(skdev, skreq, rq);
1587 /* skd_isr_comp_limit equal zero means no limit */
1589 if (++processed >= limit) {
1596 if (skdev->state == SKD_DRVR_STATE_PAUSING &&
1597 skd_in_flight(skdev) == 0) {
1598 skdev->state = SKD_DRVR_STATE_PAUSED;
1599 wake_up_interruptible(&skdev->waitq);
1605 static void skd_complete_other(struct skd_device *skdev,
1606 struct fit_completion_entry_v1 *skcomp,
1607 struct fit_comp_error_info *skerr)
1612 struct skd_special_context *skspcl;
1614 lockdep_assert_held(&skdev->lock);
1616 req_id = skcomp->tag;
1617 req_table = req_id & SKD_ID_TABLE_MASK;
1618 req_slot = req_id & SKD_ID_SLOT_MASK;
1620 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1624 * Based on the request id, determine how to dispatch this completion.
1625 * This swich/case is finding the good cases and forwarding the
1626 * completion entry. Errors are reported below the switch.
1628 switch (req_table) {
1629 case SKD_ID_RW_REQUEST:
1631 * The caller, skd_isr_completion_posted() above,
1632 * handles r/w requests. The only way we get here
1633 * is if the req_slot is out of bounds.
1637 case SKD_ID_INTERNAL:
1638 if (req_slot == 0) {
1639 skspcl = &skdev->internal_skspcl;
1640 if (skspcl->req.id == req_id &&
1641 skspcl->req.state == SKD_REQ_STATE_BUSY) {
1642 skd_complete_internal(skdev,
1643 skcomp, skerr, skspcl);
1649 case SKD_ID_FIT_MSG:
1651 * These id's should never appear in a completion record.
1657 * These id's should never appear anywhere;
1663 * If we get here it is a bad or stale id.
1667 static void skd_reset_skcomp(struct skd_device *skdev)
1669 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
1671 skdev->skcomp_ix = 0;
1672 skdev->skcomp_cycle = 1;
1676 *****************************************************************************
1678 *****************************************************************************
1680 static void skd_completion_worker(struct work_struct *work)
1682 struct skd_device *skdev =
1683 container_of(work, struct skd_device, completion_worker);
1684 unsigned long flags;
1685 int flush_enqueued = 0;
1687 spin_lock_irqsave(&skdev->lock, flags);
1690 * pass in limit=0, which means no limit..
1691 * process everything in compq
1693 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
1694 schedule_work(&skdev->start_queue);
1696 spin_unlock_irqrestore(&skdev->lock, flags);
1699 static void skd_isr_msg_from_dev(struct skd_device *skdev);
1702 skd_isr(int irq, void *ptr)
1704 struct skd_device *skdev = ptr;
1709 int flush_enqueued = 0;
1711 spin_lock(&skdev->lock);
1714 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1716 ack = FIT_INT_DEF_MASK;
1719 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1722 /* As long as there is an int pending on device, keep
1723 * running loop. When none, get out, but if we've never
1724 * done any processing, call completion handler?
1727 /* No interrupts on device, but run the completion
1731 if (likely (skdev->state
1732 == SKD_DRVR_STATE_ONLINE))
1739 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1741 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1742 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1743 if (intstat & FIT_ISH_COMPLETION_POSTED) {
1745 * If we have already deferred completion
1746 * processing, don't bother running it again
1750 skd_isr_completion_posted(skdev,
1751 skd_isr_comp_limit, &flush_enqueued);
1754 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
1755 skd_isr_fwstate(skdev);
1756 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1758 SKD_DRVR_STATE_DISAPPEARED) {
1759 spin_unlock(&skdev->lock);
1764 if (intstat & FIT_ISH_MSG_FROM_DEV)
1765 skd_isr_msg_from_dev(skdev);
1769 if (unlikely(flush_enqueued))
1770 schedule_work(&skdev->start_queue);
1773 schedule_work(&skdev->completion_worker);
1774 else if (!flush_enqueued)
1775 schedule_work(&skdev->start_queue);
1777 spin_unlock(&skdev->lock);
1782 static void skd_drive_fault(struct skd_device *skdev)
1784 skdev->state = SKD_DRVR_STATE_FAULT;
1785 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
1788 static void skd_drive_disappeared(struct skd_device *skdev)
1790 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
1791 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
1794 static void skd_isr_fwstate(struct skd_device *skdev)
1799 int prev_driver_state = skdev->state;
1801 sense = SKD_READL(skdev, FIT_STATUS);
1802 state = sense & FIT_SR_DRIVE_STATE_MASK;
1804 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1805 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1806 skd_drive_state_to_str(state), state);
1808 skdev->drive_state = state;
1810 switch (skdev->drive_state) {
1811 case FIT_SR_DRIVE_INIT:
1812 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1813 skd_disable_interrupts(skdev);
1816 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
1817 skd_recover_requests(skdev);
1818 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1819 skdev->timer_countdown = SKD_STARTING_TIMO;
1820 skdev->state = SKD_DRVR_STATE_STARTING;
1821 skd_soft_reset(skdev);
1824 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
1825 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1826 skdev->last_mtd = mtd;
1829 case FIT_SR_DRIVE_ONLINE:
1830 skdev->cur_max_queue_depth = skd_max_queue_depth;
1831 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1832 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1834 skdev->queue_low_water_mark =
1835 skdev->cur_max_queue_depth * 2 / 3 + 1;
1836 if (skdev->queue_low_water_mark < 1)
1837 skdev->queue_low_water_mark = 1;
1838 dev_info(&skdev->pdev->dev,
1839 "Queue depth limit=%d dev=%d lowat=%d\n",
1840 skdev->cur_max_queue_depth,
1841 skdev->dev_max_queue_depth,
1842 skdev->queue_low_water_mark);
1844 skd_refresh_device_data(skdev);
1847 case FIT_SR_DRIVE_BUSY:
1848 skdev->state = SKD_DRVR_STATE_BUSY;
1849 skdev->timer_countdown = SKD_BUSY_TIMO;
1850 skd_quiesce_dev(skdev);
1852 case FIT_SR_DRIVE_BUSY_SANITIZE:
1853 /* set timer for 3 seconds, we'll abort any unfinished
1854 * commands after that expires
1856 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1857 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1858 schedule_work(&skdev->start_queue);
1860 case FIT_SR_DRIVE_BUSY_ERASE:
1861 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1862 skdev->timer_countdown = SKD_BUSY_TIMO;
1864 case FIT_SR_DRIVE_OFFLINE:
1865 skdev->state = SKD_DRVR_STATE_IDLE;
1867 case FIT_SR_DRIVE_SOFT_RESET:
1868 switch (skdev->state) {
1869 case SKD_DRVR_STATE_STARTING:
1870 case SKD_DRVR_STATE_RESTARTING:
1871 /* Expected by a caller of skd_soft_reset() */
1874 skdev->state = SKD_DRVR_STATE_RESTARTING;
1878 case FIT_SR_DRIVE_FW_BOOTING:
1879 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
1880 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1881 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1884 case FIT_SR_DRIVE_DEGRADED:
1885 case FIT_SR_PCIE_LINK_DOWN:
1886 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
1889 case FIT_SR_DRIVE_FAULT:
1890 skd_drive_fault(skdev);
1891 skd_recover_requests(skdev);
1892 schedule_work(&skdev->start_queue);
1895 /* PCIe bus returned all Fs? */
1897 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
1899 skd_drive_disappeared(skdev);
1900 skd_recover_requests(skdev);
1901 schedule_work(&skdev->start_queue);
1905 * Uknown FW State. Wait for a state we recognize.
1909 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
1910 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
1911 skd_skdev_state_to_str(skdev->state), skdev->state);
1914 static void skd_recover_request(struct request *req, void *data, bool reserved)
1916 struct skd_device *const skdev = data;
1917 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
1919 if (skreq->state != SKD_REQ_STATE_BUSY)
1922 skd_log_skreq(skdev, skreq, "recover");
1924 /* Release DMA resources for the request. */
1925 if (skreq->n_sg > 0)
1926 skd_postop_sg_list(skdev, skreq);
1928 skreq->state = SKD_REQ_STATE_IDLE;
1930 skd_end_request(skdev, req, BLK_STS_IOERR);
1933 static void skd_recover_requests(struct skd_device *skdev)
1935 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
1938 static void skd_isr_msg_from_dev(struct skd_device *skdev)
1944 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
1946 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
1949 /* ignore any mtd that is an ack for something we didn't send */
1950 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
1953 switch (FIT_MXD_TYPE(mfd)) {
1954 case FIT_MTD_FITFW_INIT:
1955 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
1957 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
1958 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
1959 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
1960 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
1961 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
1962 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
1963 skd_soft_reset(skdev);
1966 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
1967 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1968 skdev->last_mtd = mtd;
1971 case FIT_MTD_GET_CMDQ_DEPTH:
1972 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
1973 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
1974 SKD_N_COMPLETION_ENTRY);
1975 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1976 skdev->last_mtd = mtd;
1979 case FIT_MTD_SET_COMPQ_DEPTH:
1980 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
1981 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
1982 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1983 skdev->last_mtd = mtd;
1986 case FIT_MTD_SET_COMPQ_ADDR:
1987 skd_reset_skcomp(skdev);
1988 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
1989 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1990 skdev->last_mtd = mtd;
1993 case FIT_MTD_CMD_LOG_HOST_ID:
1994 skdev->connect_time_stamp = get_seconds();
1995 data = skdev->connect_time_stamp & 0xFFFF;
1996 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
1997 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1998 skdev->last_mtd = mtd;
2001 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
2002 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
2003 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
2004 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
2005 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2006 skdev->last_mtd = mtd;
2009 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
2010 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
2011 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
2012 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2013 skdev->last_mtd = mtd;
2015 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
2016 skdev->connect_time_stamp, skdev->drive_jiffies);
2019 case FIT_MTD_ARM_QUEUE:
2020 skdev->last_mtd = 0;
2022 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2031 static void skd_disable_interrupts(struct skd_device *skdev)
2035 sense = SKD_READL(skdev, FIT_CONTROL);
2036 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2037 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2038 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
2040 /* Note that the 1s is written. A 1-bit means
2041 * disable, a 0 means enable.
2043 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2046 static void skd_enable_interrupts(struct skd_device *skdev)
2050 /* unmask interrupts first */
2051 val = FIT_ISH_FW_STATE_CHANGE +
2052 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
2054 /* Note that the compliment of mask is written. A 1-bit means
2055 * disable, a 0 means enable. */
2056 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2057 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
2059 val = SKD_READL(skdev, FIT_CONTROL);
2060 val |= FIT_CR_ENABLE_INTERRUPTS;
2061 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2062 SKD_WRITEL(skdev, val, FIT_CONTROL);
2066 *****************************************************************************
2067 * START, STOP, RESTART, QUIESCE, UNQUIESCE
2068 *****************************************************************************
2071 static void skd_soft_reset(struct skd_device *skdev)
2075 val = SKD_READL(skdev, FIT_CONTROL);
2076 val |= (FIT_CR_SOFT_RESET);
2077 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
2078 SKD_WRITEL(skdev, val, FIT_CONTROL);
2081 static void skd_start_device(struct skd_device *skdev)
2083 unsigned long flags;
2087 spin_lock_irqsave(&skdev->lock, flags);
2089 /* ack all ghost interrupts */
2090 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2092 sense = SKD_READL(skdev, FIT_STATUS);
2094 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
2096 state = sense & FIT_SR_DRIVE_STATE_MASK;
2097 skdev->drive_state = state;
2098 skdev->last_mtd = 0;
2100 skdev->state = SKD_DRVR_STATE_STARTING;
2101 skdev->timer_countdown = SKD_STARTING_TIMO;
2103 skd_enable_interrupts(skdev);
2105 switch (skdev->drive_state) {
2106 case FIT_SR_DRIVE_OFFLINE:
2107 dev_err(&skdev->pdev->dev, "Drive offline...\n");
2110 case FIT_SR_DRIVE_FW_BOOTING:
2111 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
2112 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2113 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2116 case FIT_SR_DRIVE_BUSY_SANITIZE:
2117 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
2118 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2119 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2122 case FIT_SR_DRIVE_BUSY_ERASE:
2123 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
2124 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2125 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2128 case FIT_SR_DRIVE_INIT:
2129 case FIT_SR_DRIVE_ONLINE:
2130 skd_soft_reset(skdev);
2133 case FIT_SR_DRIVE_BUSY:
2134 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
2135 skdev->state = SKD_DRVR_STATE_BUSY;
2136 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2139 case FIT_SR_DRIVE_SOFT_RESET:
2140 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
2143 case FIT_SR_DRIVE_FAULT:
2144 /* Fault state is bad...soft reset won't do it...
2145 * Hard reset, maybe, but does it work on device?
2146 * For now, just fault so the system doesn't hang.
2148 skd_drive_fault(skdev);
2149 /*start the queue so we can respond with error to requests */
2150 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2151 schedule_work(&skdev->start_queue);
2152 skdev->gendisk_on = -1;
2153 wake_up_interruptible(&skdev->waitq);
2157 /* Most likely the device isn't there or isn't responding
2158 * to the BAR1 addresses. */
2159 skd_drive_disappeared(skdev);
2160 /*start the queue so we can respond with error to requests */
2161 dev_dbg(&skdev->pdev->dev,
2162 "starting queue to error-out reqs\n");
2163 schedule_work(&skdev->start_queue);
2164 skdev->gendisk_on = -1;
2165 wake_up_interruptible(&skdev->waitq);
2169 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2170 skdev->drive_state);
2174 state = SKD_READL(skdev, FIT_CONTROL);
2175 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
2177 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2178 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
2180 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2181 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
2183 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2184 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
2186 state = SKD_READL(skdev, FIT_HW_VERSION);
2187 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
2189 spin_unlock_irqrestore(&skdev->lock, flags);
2192 static void skd_stop_device(struct skd_device *skdev)
2194 unsigned long flags;
2195 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2199 spin_lock_irqsave(&skdev->lock, flags);
2201 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
2202 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
2206 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
2207 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
2211 skdev->state = SKD_DRVR_STATE_SYNCING;
2212 skdev->sync_done = 0;
2214 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2216 spin_unlock_irqrestore(&skdev->lock, flags);
2218 wait_event_interruptible_timeout(skdev->waitq,
2219 (skdev->sync_done), (10 * HZ));
2221 spin_lock_irqsave(&skdev->lock, flags);
2223 switch (skdev->sync_done) {
2225 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
2228 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
2231 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
2235 skdev->state = SKD_DRVR_STATE_STOPPING;
2236 spin_unlock_irqrestore(&skdev->lock, flags);
2238 skd_kill_timer(skdev);
2240 spin_lock_irqsave(&skdev->lock, flags);
2241 skd_disable_interrupts(skdev);
2243 /* ensure all ints on device are cleared */
2244 /* soft reset the device to unload with a clean slate */
2245 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2246 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2248 spin_unlock_irqrestore(&skdev->lock, flags);
2250 /* poll every 100ms, 1 second timeout */
2251 for (i = 0; i < 10; i++) {
2253 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2254 if (dev_state == FIT_SR_DRIVE_INIT)
2256 set_current_state(TASK_INTERRUPTIBLE);
2257 schedule_timeout(msecs_to_jiffies(100));
2260 if (dev_state != FIT_SR_DRIVE_INIT)
2261 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2265 /* assume spinlock is held */
2266 static void skd_restart_device(struct skd_device *skdev)
2270 /* ack all ghost interrupts */
2271 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2273 state = SKD_READL(skdev, FIT_STATUS);
2275 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
2277 state &= FIT_SR_DRIVE_STATE_MASK;
2278 skdev->drive_state = state;
2279 skdev->last_mtd = 0;
2281 skdev->state = SKD_DRVR_STATE_RESTARTING;
2282 skdev->timer_countdown = SKD_RESTARTING_TIMO;
2284 skd_soft_reset(skdev);
2287 /* assume spinlock is held */
2288 static int skd_quiesce_dev(struct skd_device *skdev)
2292 switch (skdev->state) {
2293 case SKD_DRVR_STATE_BUSY:
2294 case SKD_DRVR_STATE_BUSY_IMMINENT:
2295 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2296 blk_mq_stop_hw_queues(skdev->queue);
2298 case SKD_DRVR_STATE_ONLINE:
2299 case SKD_DRVR_STATE_STOPPING:
2300 case SKD_DRVR_STATE_SYNCING:
2301 case SKD_DRVR_STATE_PAUSING:
2302 case SKD_DRVR_STATE_PAUSED:
2303 case SKD_DRVR_STATE_STARTING:
2304 case SKD_DRVR_STATE_RESTARTING:
2305 case SKD_DRVR_STATE_RESUMING:
2308 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2314 /* assume spinlock is held */
2315 static int skd_unquiesce_dev(struct skd_device *skdev)
2317 int prev_driver_state = skdev->state;
2319 skd_log_skdev(skdev, "unquiesce");
2320 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
2321 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
2324 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2326 * If there has been an state change to other than
2327 * ONLINE, we will rely on controller state change
2328 * to come back online and restart the queue.
2329 * The BUSY state means that driver is ready to
2330 * continue normal processing but waiting for controller
2331 * to become available.
2333 skdev->state = SKD_DRVR_STATE_BUSY;
2334 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
2339 * Drive has just come online, driver is either in startup,
2340 * paused performing a task, or bust waiting for hardware.
2342 switch (skdev->state) {
2343 case SKD_DRVR_STATE_PAUSED:
2344 case SKD_DRVR_STATE_BUSY:
2345 case SKD_DRVR_STATE_BUSY_IMMINENT:
2346 case SKD_DRVR_STATE_BUSY_ERASE:
2347 case SKD_DRVR_STATE_STARTING:
2348 case SKD_DRVR_STATE_RESTARTING:
2349 case SKD_DRVR_STATE_FAULT:
2350 case SKD_DRVR_STATE_IDLE:
2351 case SKD_DRVR_STATE_LOAD:
2352 skdev->state = SKD_DRVR_STATE_ONLINE;
2353 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2354 skd_skdev_state_to_str(prev_driver_state),
2355 prev_driver_state, skd_skdev_state_to_str(skdev->state),
2357 dev_dbg(&skdev->pdev->dev,
2358 "**** device ONLINE...starting block queue\n");
2359 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2360 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
2361 schedule_work(&skdev->start_queue);
2362 skdev->gendisk_on = 1;
2363 wake_up_interruptible(&skdev->waitq);
2366 case SKD_DRVR_STATE_DISAPPEARED:
2368 dev_dbg(&skdev->pdev->dev,
2369 "**** driver state %d, not implemented\n",
2377 *****************************************************************************
2378 * PCIe MSI/MSI-X INTERRUPT HANDLERS
2379 *****************************************************************************
2382 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
2384 struct skd_device *skdev = skd_host_data;
2385 unsigned long flags;
2387 spin_lock_irqsave(&skdev->lock, flags);
2388 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2389 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2390 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2391 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2392 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2393 spin_unlock_irqrestore(&skdev->lock, flags);
2397 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
2399 struct skd_device *skdev = skd_host_data;
2400 unsigned long flags;
2402 spin_lock_irqsave(&skdev->lock, flags);
2403 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2404 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2405 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2406 skd_isr_fwstate(skdev);
2407 spin_unlock_irqrestore(&skdev->lock, flags);
2411 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
2413 struct skd_device *skdev = skd_host_data;
2414 unsigned long flags;
2415 int flush_enqueued = 0;
2418 spin_lock_irqsave(&skdev->lock, flags);
2419 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2420 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2421 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2422 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2425 schedule_work(&skdev->start_queue);
2428 schedule_work(&skdev->completion_worker);
2429 else if (!flush_enqueued)
2430 schedule_work(&skdev->start_queue);
2432 spin_unlock_irqrestore(&skdev->lock, flags);
2437 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
2439 struct skd_device *skdev = skd_host_data;
2440 unsigned long flags;
2442 spin_lock_irqsave(&skdev->lock, flags);
2443 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2444 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2445 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2446 skd_isr_msg_from_dev(skdev);
2447 spin_unlock_irqrestore(&skdev->lock, flags);
2451 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
2453 struct skd_device *skdev = skd_host_data;
2454 unsigned long flags;
2456 spin_lock_irqsave(&skdev->lock, flags);
2457 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2458 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2459 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2460 spin_unlock_irqrestore(&skdev->lock, flags);
2465 *****************************************************************************
2466 * PCIe MSI/MSI-X SETUP
2467 *****************************************************************************
2470 struct skd_msix_entry {
2474 struct skd_init_msix_entry {
2476 irq_handler_t handler;
2479 #define SKD_MAX_MSIX_COUNT 13
2480 #define SKD_MIN_MSIX_COUNT 7
2481 #define SKD_BASE_MSIX_IRQ 4
2483 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
2484 { "(DMA 0)", skd_reserved_isr },
2485 { "(DMA 1)", skd_reserved_isr },
2486 { "(DMA 2)", skd_reserved_isr },
2487 { "(DMA 3)", skd_reserved_isr },
2488 { "(State Change)", skd_statec_isr },
2489 { "(COMPL_Q)", skd_comp_q },
2490 { "(MSG)", skd_msg_isr },
2491 { "(Reserved)", skd_reserved_isr },
2492 { "(Reserved)", skd_reserved_isr },
2493 { "(Queue Full 0)", skd_qfull_isr },
2494 { "(Queue Full 1)", skd_qfull_isr },
2495 { "(Queue Full 2)", skd_qfull_isr },
2496 { "(Queue Full 3)", skd_qfull_isr },
2499 static int skd_acquire_msix(struct skd_device *skdev)
2502 struct pci_dev *pdev = skdev->pdev;
2504 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
2507 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
2511 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2512 sizeof(struct skd_msix_entry), GFP_KERNEL);
2513 if (!skdev->msix_entries) {
2515 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
2519 /* Enable MSI-X vectors for the base queue */
2520 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2521 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2523 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
2524 "%s%d-msix %s", DRV_NAME, skdev->devno,
2525 msix_entries[i].name);
2527 rc = devm_request_irq(&skdev->pdev->dev,
2528 pci_irq_vector(skdev->pdev, i),
2529 msix_entries[i].handler, 0,
2530 qentry->isr_name, skdev);
2532 dev_err(&skdev->pdev->dev,
2533 "Unable to register(%d) MSI-X handler %d: %s\n",
2534 rc, i, qentry->isr_name);
2539 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2540 SKD_MAX_MSIX_COUNT);
2545 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
2547 kfree(skdev->msix_entries);
2548 skdev->msix_entries = NULL;
2552 static int skd_acquire_irq(struct skd_device *skdev)
2554 struct pci_dev *pdev = skdev->pdev;
2555 unsigned int irq_flag = PCI_IRQ_LEGACY;
2558 if (skd_isr_type == SKD_IRQ_MSIX) {
2559 rc = skd_acquire_msix(skdev);
2563 dev_err(&skdev->pdev->dev,
2564 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
2567 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2570 if (skd_isr_type != SKD_IRQ_LEGACY)
2571 irq_flag |= PCI_IRQ_MSI;
2572 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
2574 dev_err(&skdev->pdev->dev,
2575 "failed to allocate the MSI interrupt %d\n", rc);
2579 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
2580 pdev->msi_enabled ? 0 : IRQF_SHARED,
2581 skdev->isr_name, skdev);
2583 pci_free_irq_vectors(pdev);
2584 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2592 static void skd_release_irq(struct skd_device *skdev)
2594 struct pci_dev *pdev = skdev->pdev;
2596 if (skdev->msix_entries) {
2599 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2600 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
2604 kfree(skdev->msix_entries);
2605 skdev->msix_entries = NULL;
2607 devm_free_irq(&pdev->dev, pdev->irq, skdev);
2610 pci_free_irq_vectors(pdev);
2614 *****************************************************************************
2616 *****************************************************************************
2619 static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2620 dma_addr_t *dma_handle, gfp_t gfp,
2621 enum dma_data_direction dir)
2623 struct device *dev = &skdev->pdev->dev;
2626 buf = kmem_cache_alloc(s, gfp);
2629 *dma_handle = dma_map_single(dev, buf, s->size, dir);
2630 if (dma_mapping_error(dev, *dma_handle)) {
2637 static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2638 void *vaddr, dma_addr_t dma_handle,
2639 enum dma_data_direction dir)
2644 dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir);
2645 kmem_cache_free(s, vaddr);
2648 static int skd_cons_skcomp(struct skd_device *skdev)
2651 struct fit_completion_entry_v1 *skcomp;
2653 dev_dbg(&skdev->pdev->dev,
2654 "comp pci_alloc, total bytes %zd entries %d\n",
2655 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2657 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
2658 &skdev->cq_dma_address);
2660 if (skcomp == NULL) {
2665 skdev->skcomp_table = skcomp;
2666 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2668 SKD_N_COMPLETION_ENTRY);
2674 static int skd_cons_skmsg(struct skd_device *skdev)
2679 dev_dbg(&skdev->pdev->dev,
2680 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
2681 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2682 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
2684 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2685 sizeof(struct skd_fitmsg_context),
2687 if (skdev->skmsg_table == NULL) {
2692 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2693 struct skd_fitmsg_context *skmsg;
2695 skmsg = &skdev->skmsg_table[i];
2697 skmsg->id = i + SKD_ID_FIT_MSG;
2699 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
2701 &skmsg->mb_dma_address);
2703 if (skmsg->msg_buf == NULL) {
2708 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
2709 (FIT_QCMD_ALIGN - 1),
2710 "not aligned: msg_buf %p mb_dma_address %#llx\n",
2711 skmsg->msg_buf, skmsg->mb_dma_address);
2712 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
2719 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2721 dma_addr_t *ret_dma_addr)
2723 struct fit_sg_descriptor *sg_list;
2725 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
2726 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
2728 if (sg_list != NULL) {
2729 uint64_t dma_address = *ret_dma_addr;
2732 for (i = 0; i < n_sg - 1; i++) {
2734 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
2736 sg_list[i].next_desc_ptr = dma_address + ndp_off;
2738 sg_list[i].next_desc_ptr = 0LL;
2744 static void skd_free_sg_list(struct skd_device *skdev,
2745 struct fit_sg_descriptor *sg_list,
2746 dma_addr_t dma_addr)
2748 if (WARN_ON_ONCE(!sg_list))
2751 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
2755 static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
2756 unsigned int hctx_idx, unsigned int numa_node)
2758 struct skd_device *skdev = set->driver_data;
2759 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
2761 skreq->state = SKD_REQ_STATE_IDLE;
2762 skreq->sg = (void *)(skreq + 1);
2763 sg_init_table(skreq->sg, skd_sgs_per_request);
2764 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2765 &skreq->sksg_dma_address);
2767 return skreq->sksg_list ? 0 : -ENOMEM;
2770 static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2771 unsigned int hctx_idx)
2773 struct skd_device *skdev = set->driver_data;
2774 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
2776 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
2779 static int skd_cons_sksb(struct skd_device *skdev)
2782 struct skd_special_context *skspcl;
2784 skspcl = &skdev->internal_skspcl;
2786 skspcl->req.id = 0 + SKD_ID_INTERNAL;
2787 skspcl->req.state = SKD_REQ_STATE_IDLE;
2789 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
2790 &skspcl->db_dma_address,
2791 GFP_DMA | __GFP_ZERO,
2793 if (skspcl->data_buf == NULL) {
2798 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
2799 &skspcl->mb_dma_address,
2800 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
2801 if (skspcl->msg_buf == NULL) {
2806 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2807 &skspcl->req.sksg_dma_address);
2808 if (skspcl->req.sksg_list == NULL) {
2813 if (!skd_format_internal_skspcl(skdev)) {
2822 static const struct blk_mq_ops skd_mq_ops = {
2823 .queue_rq = skd_mq_queue_rq,
2824 .complete = skd_softirq_done,
2825 .timeout = skd_timed_out,
2826 .init_request = skd_init_request,
2827 .exit_request = skd_exit_request,
2830 static int skd_cons_disk(struct skd_device *skdev)
2833 struct gendisk *disk;
2834 struct request_queue *q;
2835 unsigned long flags;
2837 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
2844 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2846 disk->major = skdev->major;
2847 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2848 disk->fops = &skd_blockdev_ops;
2849 disk->private_data = skdev;
2851 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
2852 skdev->tag_set.ops = &skd_mq_ops;
2853 skdev->tag_set.nr_hw_queues = 1;
2854 skdev->tag_set.queue_depth = skd_max_queue_depth;
2855 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
2856 skdev->sgs_per_request * sizeof(struct scatterlist);
2857 skdev->tag_set.numa_node = NUMA_NO_NODE;
2858 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2860 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
2861 skdev->tag_set.driver_data = skdev;
2862 rc = blk_mq_alloc_tag_set(&skdev->tag_set);
2865 q = blk_mq_init_queue(&skdev->tag_set);
2867 blk_mq_free_tag_set(&skdev->tag_set);
2871 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2872 q->queuedata = skdev;
2873 q->nr_requests = skd_max_queue_depth / 2;
2878 blk_queue_write_cache(q, true, true);
2879 blk_queue_max_segments(q, skdev->sgs_per_request);
2880 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
2882 /* set optimal I/O size to 8KB */
2883 blk_queue_io_opt(q, 8192);
2885 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
2886 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
2888 blk_queue_rq_timeout(q, 8 * HZ);
2890 spin_lock_irqsave(&skdev->lock, flags);
2891 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
2892 blk_mq_stop_hw_queues(skdev->queue);
2893 spin_unlock_irqrestore(&skdev->lock, flags);
2899 #define SKD_N_DEV_TABLE 16u
2900 static u32 skd_next_devno;
2902 static struct skd_device *skd_construct(struct pci_dev *pdev)
2904 struct skd_device *skdev;
2905 int blk_major = skd_major;
2909 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
2912 dev_err(&pdev->dev, "memory alloc failure\n");
2916 skdev->state = SKD_DRVR_STATE_LOAD;
2918 skdev->devno = skd_next_devno++;
2919 skdev->major = blk_major;
2920 skdev->dev_max_queue_depth = 0;
2922 skdev->num_req_context = skd_max_queue_depth;
2923 skdev->num_fitmsg_context = skd_max_queue_depth;
2924 skdev->cur_max_queue_depth = 1;
2925 skdev->queue_low_water_mark = 1;
2926 skdev->proto_ver = 99;
2927 skdev->sgs_per_request = skd_sgs_per_request;
2928 skdev->dbg_level = skd_dbg_level;
2930 spin_lock_init(&skdev->lock);
2932 INIT_WORK(&skdev->start_queue, skd_start_queue);
2933 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
2935 size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
2936 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
2937 SLAB_HWCACHE_ALIGN, NULL);
2938 if (!skdev->msgbuf_cache)
2940 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
2941 "skd-msgbuf: %d < %zd\n",
2942 kmem_cache_size(skdev->msgbuf_cache), size);
2943 size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
2944 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
2945 SLAB_HWCACHE_ALIGN, NULL);
2946 if (!skdev->sglist_cache)
2948 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
2949 "skd-sglist: %d < %zd\n",
2950 kmem_cache_size(skdev->sglist_cache), size);
2951 size = SKD_N_INTERNAL_BYTES;
2952 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
2953 SLAB_HWCACHE_ALIGN, NULL);
2954 if (!skdev->databuf_cache)
2956 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
2957 "skd-databuf: %d < %zd\n",
2958 kmem_cache_size(skdev->databuf_cache), size);
2960 dev_dbg(&skdev->pdev->dev, "skcomp\n");
2961 rc = skd_cons_skcomp(skdev);
2965 dev_dbg(&skdev->pdev->dev, "skmsg\n");
2966 rc = skd_cons_skmsg(skdev);
2970 dev_dbg(&skdev->pdev->dev, "sksb\n");
2971 rc = skd_cons_sksb(skdev);
2975 dev_dbg(&skdev->pdev->dev, "disk\n");
2976 rc = skd_cons_disk(skdev);
2980 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
2984 dev_dbg(&skdev->pdev->dev, "construct failed\n");
2985 skd_destruct(skdev);
2990 *****************************************************************************
2992 *****************************************************************************
2995 static void skd_free_skcomp(struct skd_device *skdev)
2997 if (skdev->skcomp_table)
2998 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
2999 skdev->skcomp_table, skdev->cq_dma_address);
3001 skdev->skcomp_table = NULL;
3002 skdev->cq_dma_address = 0;
3005 static void skd_free_skmsg(struct skd_device *skdev)
3009 if (skdev->skmsg_table == NULL)
3012 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3013 struct skd_fitmsg_context *skmsg;
3015 skmsg = &skdev->skmsg_table[i];
3017 if (skmsg->msg_buf != NULL) {
3018 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
3020 skmsg->mb_dma_address);
3022 skmsg->msg_buf = NULL;
3023 skmsg->mb_dma_address = 0;
3026 kfree(skdev->skmsg_table);
3027 skdev->skmsg_table = NULL;
3030 static void skd_free_sksb(struct skd_device *skdev)
3032 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3034 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
3035 skspcl->db_dma_address, DMA_BIDIRECTIONAL);
3037 skspcl->data_buf = NULL;
3038 skspcl->db_dma_address = 0;
3040 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
3041 skspcl->mb_dma_address, DMA_TO_DEVICE);
3043 skspcl->msg_buf = NULL;
3044 skspcl->mb_dma_address = 0;
3046 skd_free_sg_list(skdev, skspcl->req.sksg_list,
3047 skspcl->req.sksg_dma_address);
3049 skspcl->req.sksg_list = NULL;
3050 skspcl->req.sksg_dma_address = 0;
3053 static void skd_free_disk(struct skd_device *skdev)
3055 struct gendisk *disk = skdev->disk;
3057 if (disk && (disk->flags & GENHD_FL_UP))
3061 blk_cleanup_queue(skdev->queue);
3062 skdev->queue = NULL;
3066 if (skdev->tag_set.tags)
3067 blk_mq_free_tag_set(&skdev->tag_set);
3073 static void skd_destruct(struct skd_device *skdev)
3078 cancel_work_sync(&skdev->start_queue);
3080 dev_dbg(&skdev->pdev->dev, "disk\n");
3081 skd_free_disk(skdev);
3083 dev_dbg(&skdev->pdev->dev, "sksb\n");
3084 skd_free_sksb(skdev);
3086 dev_dbg(&skdev->pdev->dev, "skmsg\n");
3087 skd_free_skmsg(skdev);
3089 dev_dbg(&skdev->pdev->dev, "skcomp\n");
3090 skd_free_skcomp(skdev);
3092 kmem_cache_destroy(skdev->databuf_cache);
3093 kmem_cache_destroy(skdev->sglist_cache);
3094 kmem_cache_destroy(skdev->msgbuf_cache);
3096 dev_dbg(&skdev->pdev->dev, "skdev\n");
3101 *****************************************************************************
3102 * BLOCK DEVICE (BDEV) GLUE
3103 *****************************************************************************
3106 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3108 struct skd_device *skdev;
3111 skdev = bdev->bd_disk->private_data;
3113 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3114 bdev->bd_disk->disk_name, current->comm);
3116 if (skdev->read_cap_is_valid) {
3117 capacity = get_capacity(skdev->disk);
3120 geo->cylinders = (capacity) / (255 * 64);
3127 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
3129 dev_dbg(&skdev->pdev->dev, "add_disk\n");
3130 device_add_disk(parent, skdev->disk);
3134 static const struct block_device_operations skd_blockdev_ops = {
3135 .owner = THIS_MODULE,
3136 .getgeo = skd_bdev_getgeo,
3140 *****************************************************************************
3142 *****************************************************************************
3145 static const struct pci_device_id skd_pci_tbl[] = {
3146 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
3147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
3148 { 0 } /* terminate list */
3151 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
3153 static char *skd_pci_info(struct skd_device *skdev, char *str)
3157 strcpy(str, "PCIe (");
3158 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3163 uint16_t pcie_lstat, lspeed, lwidth;
3166 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3167 lspeed = pcie_lstat & (0xF);
3168 lwidth = (pcie_lstat & 0x3F0) >> 4;
3171 strcat(str, "2.5GT/s ");
3172 else if (lspeed == 2)
3173 strcat(str, "5.0GT/s ");
3175 strcat(str, "<unknown> ");
3176 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
3182 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3187 struct skd_device *skdev;
3189 dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
3192 rc = pci_enable_device(pdev);
3195 rc = pci_request_regions(pdev, DRV_NAME);
3198 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3200 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3201 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3205 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3207 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3208 goto err_out_regions;
3213 rc = register_blkdev(0, DRV_NAME);
3215 goto err_out_regions;
3220 skdev = skd_construct(pdev);
3221 if (skdev == NULL) {
3223 goto err_out_regions;
3226 skd_pci_info(skdev, pci_str);
3227 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
3229 pci_set_master(pdev);
3230 rc = pci_enable_pcie_error_reporting(pdev);
3233 "bad enable of PCIe error reporting rc=%d\n", rc);
3234 skdev->pcie_error_reporting_is_enabled = 0;
3236 skdev->pcie_error_reporting_is_enabled = 1;
3238 pci_set_drvdata(pdev, skdev);
3240 for (i = 0; i < SKD_MAX_BARS; i++) {
3241 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3242 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3243 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3244 skdev->mem_size[i]);
3245 if (!skdev->mem_map[i]) {
3247 "Unable to map adapter memory!\n");
3249 goto err_out_iounmap;
3251 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3252 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3253 skdev->mem_size[i]);
3256 rc = skd_acquire_irq(skdev);
3258 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3259 goto err_out_iounmap;
3262 rc = skd_start_timer(skdev);
3266 init_waitqueue_head(&skdev->waitq);
3268 skd_start_device(skdev);
3270 rc = wait_event_interruptible_timeout(skdev->waitq,
3271 (skdev->gendisk_on),
3272 (SKD_START_WAIT_SECONDS * HZ));
3273 if (skdev->gendisk_on > 0) {
3274 /* device came on-line after reset */
3275 skd_bdev_attach(&pdev->dev, skdev);
3278 /* we timed out, something is wrong with the device,
3279 don't add the disk structure */
3280 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
3282 /* in case of no error; we timeout with ENXIO */
3291 skd_stop_device(skdev);
3292 skd_release_irq(skdev);
3295 for (i = 0; i < SKD_MAX_BARS; i++)
3296 if (skdev->mem_map[i])
3297 iounmap(skdev->mem_map[i]);
3299 if (skdev->pcie_error_reporting_is_enabled)
3300 pci_disable_pcie_error_reporting(pdev);
3302 skd_destruct(skdev);
3305 pci_release_regions(pdev);
3308 pci_disable_device(pdev);
3309 pci_set_drvdata(pdev, NULL);
3313 static void skd_pci_remove(struct pci_dev *pdev)
3316 struct skd_device *skdev;
3318 skdev = pci_get_drvdata(pdev);
3320 dev_err(&pdev->dev, "no device data for PCI\n");
3323 skd_stop_device(skdev);
3324 skd_release_irq(skdev);
3326 for (i = 0; i < SKD_MAX_BARS; i++)
3327 if (skdev->mem_map[i])
3328 iounmap(skdev->mem_map[i]);
3330 if (skdev->pcie_error_reporting_is_enabled)
3331 pci_disable_pcie_error_reporting(pdev);
3333 skd_destruct(skdev);
3335 pci_release_regions(pdev);
3336 pci_disable_device(pdev);
3337 pci_set_drvdata(pdev, NULL);
3342 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3345 struct skd_device *skdev;
3347 skdev = pci_get_drvdata(pdev);
3349 dev_err(&pdev->dev, "no device data for PCI\n");
3353 skd_stop_device(skdev);
3355 skd_release_irq(skdev);
3357 for (i = 0; i < SKD_MAX_BARS; i++)
3358 if (skdev->mem_map[i])
3359 iounmap(skdev->mem_map[i]);
3361 if (skdev->pcie_error_reporting_is_enabled)
3362 pci_disable_pcie_error_reporting(pdev);
3364 pci_release_regions(pdev);
3365 pci_save_state(pdev);
3366 pci_disable_device(pdev);
3367 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3371 static int skd_pci_resume(struct pci_dev *pdev)
3375 struct skd_device *skdev;
3377 skdev = pci_get_drvdata(pdev);
3379 dev_err(&pdev->dev, "no device data for PCI\n");
3383 pci_set_power_state(pdev, PCI_D0);
3384 pci_enable_wake(pdev, PCI_D0, 0);
3385 pci_restore_state(pdev);
3387 rc = pci_enable_device(pdev);
3390 rc = pci_request_regions(pdev, DRV_NAME);
3393 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3395 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3397 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3401 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3404 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3405 goto err_out_regions;
3409 pci_set_master(pdev);
3410 rc = pci_enable_pcie_error_reporting(pdev);
3413 "bad enable of PCIe error reporting rc=%d\n", rc);
3414 skdev->pcie_error_reporting_is_enabled = 0;
3416 skdev->pcie_error_reporting_is_enabled = 1;
3418 for (i = 0; i < SKD_MAX_BARS; i++) {
3420 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3421 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3422 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3423 skdev->mem_size[i]);
3424 if (!skdev->mem_map[i]) {
3425 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
3427 goto err_out_iounmap;
3429 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3430 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3431 skdev->mem_size[i]);
3433 rc = skd_acquire_irq(skdev);
3435 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
3436 goto err_out_iounmap;
3439 rc = skd_start_timer(skdev);
3443 init_waitqueue_head(&skdev->waitq);
3445 skd_start_device(skdev);
3450 skd_stop_device(skdev);
3451 skd_release_irq(skdev);
3454 for (i = 0; i < SKD_MAX_BARS; i++)
3455 if (skdev->mem_map[i])
3456 iounmap(skdev->mem_map[i]);
3458 if (skdev->pcie_error_reporting_is_enabled)
3459 pci_disable_pcie_error_reporting(pdev);
3462 pci_release_regions(pdev);
3465 pci_disable_device(pdev);
3469 static void skd_pci_shutdown(struct pci_dev *pdev)
3471 struct skd_device *skdev;
3473 dev_err(&pdev->dev, "%s called\n", __func__);
3475 skdev = pci_get_drvdata(pdev);
3477 dev_err(&pdev->dev, "no device data for PCI\n");
3481 dev_err(&pdev->dev, "calling stop\n");
3482 skd_stop_device(skdev);
3485 static struct pci_driver skd_driver = {
3487 .id_table = skd_pci_tbl,
3488 .probe = skd_pci_probe,
3489 .remove = skd_pci_remove,
3490 .suspend = skd_pci_suspend,
3491 .resume = skd_pci_resume,
3492 .shutdown = skd_pci_shutdown,
3496 *****************************************************************************
3498 *****************************************************************************
3501 const char *skd_drive_state_to_str(int state)
3504 case FIT_SR_DRIVE_OFFLINE:
3506 case FIT_SR_DRIVE_INIT:
3508 case FIT_SR_DRIVE_ONLINE:
3510 case FIT_SR_DRIVE_BUSY:
3512 case FIT_SR_DRIVE_FAULT:
3514 case FIT_SR_DRIVE_DEGRADED:
3516 case FIT_SR_PCIE_LINK_DOWN:
3518 case FIT_SR_DRIVE_SOFT_RESET:
3519 return "SOFT_RESET";
3520 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3522 case FIT_SR_DRIVE_INIT_FAULT:
3523 return "INIT_FAULT";
3524 case FIT_SR_DRIVE_BUSY_SANITIZE:
3525 return "BUSY_SANITIZE";
3526 case FIT_SR_DRIVE_BUSY_ERASE:
3527 return "BUSY_ERASE";
3528 case FIT_SR_DRIVE_FW_BOOTING:
3529 return "FW_BOOTING";
3535 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
3538 case SKD_DRVR_STATE_LOAD:
3540 case SKD_DRVR_STATE_IDLE:
3542 case SKD_DRVR_STATE_BUSY:
3544 case SKD_DRVR_STATE_STARTING:
3546 case SKD_DRVR_STATE_ONLINE:
3548 case SKD_DRVR_STATE_PAUSING:
3550 case SKD_DRVR_STATE_PAUSED:
3552 case SKD_DRVR_STATE_RESTARTING:
3553 return "RESTARTING";
3554 case SKD_DRVR_STATE_RESUMING:
3556 case SKD_DRVR_STATE_STOPPING:
3558 case SKD_DRVR_STATE_SYNCING:
3560 case SKD_DRVR_STATE_FAULT:
3562 case SKD_DRVR_STATE_DISAPPEARED:
3563 return "DISAPPEARED";
3564 case SKD_DRVR_STATE_BUSY_ERASE:
3565 return "BUSY_ERASE";
3566 case SKD_DRVR_STATE_BUSY_SANITIZE:
3567 return "BUSY_SANITIZE";
3568 case SKD_DRVR_STATE_BUSY_IMMINENT:
3569 return "BUSY_IMMINENT";
3570 case SKD_DRVR_STATE_WAIT_BOOT:
3578 static const char *skd_skreq_state_to_str(enum skd_req_state state)
3581 case SKD_REQ_STATE_IDLE:
3583 case SKD_REQ_STATE_SETUP:
3585 case SKD_REQ_STATE_BUSY:
3587 case SKD_REQ_STATE_COMPLETED:
3589 case SKD_REQ_STATE_TIMEOUT:
3596 static void skd_log_skdev(struct skd_device *skdev, const char *event)
3598 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3599 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
3600 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3601 skd_skdev_state_to_str(skdev->state), skdev->state);
3602 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
3603 skd_in_flight(skdev), skdev->cur_max_queue_depth,
3604 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3605 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
3606 skdev->skcomp_cycle, skdev->skcomp_ix);
3609 static void skd_log_skreq(struct skd_device *skdev,
3610 struct skd_request_context *skreq, const char *event)
3612 struct request *req = blk_mq_rq_from_pdu(skreq);
3613 u32 lba = blk_rq_pos(req);
3614 u32 count = blk_rq_sectors(req);
3616 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3617 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3618 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
3620 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
3621 skreq->data_dir, skreq->n_sg);
3623 dev_dbg(&skdev->pdev->dev,
3624 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
3625 count, count, (int)rq_data_dir(req));
3629 *****************************************************************************
3631 *****************************************************************************
3634 static int __init skd_init(void)
3636 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
3637 BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
3638 BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
3639 BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
3640 BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
3641 BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
3642 BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
3643 BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
3645 switch (skd_isr_type) {
3646 case SKD_IRQ_LEGACY:
3651 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
3652 skd_isr_type, SKD_IRQ_DEFAULT);
3653 skd_isr_type = SKD_IRQ_DEFAULT;
3656 if (skd_max_queue_depth < 1 ||
3657 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3658 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
3659 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3660 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3663 if (skd_max_req_per_msg < 1 ||
3664 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
3665 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
3666 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3667 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3670 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3671 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
3672 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3673 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3676 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3677 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
3682 if (skd_isr_comp_limit < 0) {
3683 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
3684 skd_isr_comp_limit, 0);
3685 skd_isr_comp_limit = 0;
3688 return pci_register_driver(&skd_driver);
3691 static void __exit skd_exit(void)
3693 pci_unregister_driver(&skd_driver);
3696 unregister_blkdev(skd_major, DRV_NAME);
3699 module_init(skd_init);
3700 module_exit(skd_exit);