1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "1.2.8-026"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 8
40 #define DRIVER_REVISION 26
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION);
53 MODULE_LICENSE("GPL");
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
56 static void pqi_ctrl_offline_worker(struct work_struct *work);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59 static void pqi_scan_start(struct Scsi_Host *shost);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61 struct pqi_queue_group *queue_group, enum pqi_io_path path,
62 struct pqi_io_request *io_request);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_iu_header *request, unsigned int flags,
65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
69 struct pqi_encryption_info *encryption_info, bool raid_bypass);
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
80 /* for flags argument to pqi_submit_raid_request_synchronous() */
81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
83 static struct scsi_transport_template *pqi_sas_transport_template;
85 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
87 enum pqi_lockup_action {
93 static enum pqi_lockup_action pqi_lockup_action = NONE;
96 enum pqi_lockup_action action;
98 } pqi_lockup_actions[] = {
113 static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
119 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
123 static int pqi_disable_device_id_wildcards;
124 module_param_named(disable_device_id_wildcards,
125 pqi_disable_device_id_wildcards, int, 0644);
126 MODULE_PARM_DESC(disable_device_id_wildcards,
127 "Disable device ID wildcards.");
129 static int pqi_disable_heartbeat;
130 module_param_named(disable_heartbeat,
131 pqi_disable_heartbeat, int, 0644);
132 MODULE_PARM_DESC(disable_heartbeat,
133 "Disable heartbeat.");
135 static int pqi_disable_ctrl_shutdown;
136 module_param_named(disable_ctrl_shutdown,
137 pqi_disable_ctrl_shutdown, int, 0644);
138 MODULE_PARM_DESC(disable_ctrl_shutdown,
139 "Disable controller shutdown when controller locked up.");
141 static char *pqi_lockup_action_param;
142 module_param_named(lockup_action,
143 pqi_lockup_action_param, charp, 0644);
144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none");
148 static int pqi_expose_ld_first;
149 module_param_named(expose_ld_first,
150 pqi_expose_ld_first, int, 0644);
151 MODULE_PARM_DESC(expose_ld_first,
152 "Expose logical drives before physical drives.");
154 static int pqi_hide_vsep;
155 module_param_named(hide_vsep,
156 pqi_hide_vsep, int, 0644);
157 MODULE_PARM_DESC(hide_vsep,
158 "Hide the virtual SEP for direct attached drives.");
160 static char *raid_levels[] = {
170 static char *pqi_raid_level_to_string(u8 raid_level)
172 if (raid_level < ARRAY_SIZE(raid_levels))
173 return raid_levels[raid_level];
175 return "RAID UNKNOWN";
180 #define SA_RAID_1 2 /* also used for RAID 10 */
181 #define SA_RAID_5 3 /* also used for RAID 50 */
183 #define SA_RAID_6 5 /* also used for RAID 60 */
184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
185 #define SA_RAID_MAX SA_RAID_ADM
186 #define SA_RAID_UNKNOWN 0xff
188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
190 pqi_prep_for_scsi_done(scmd);
191 scmd->scsi_done(scmd);
194 static inline void pqi_disable_write_same(struct scsi_device *sdev)
196 sdev->no_write_same = 1;
199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
206 return !device->is_physical_device;
209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
211 return scsi3addr[2] != 0;
214 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
216 if (ctrl_info->controller_online)
217 if (!sis_is_firmware_running(ctrl_info))
218 pqi_take_ctrl_offline(ctrl_info);
221 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
223 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
226 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
227 struct pqi_ctrl_info *ctrl_info)
229 return sis_read_driver_scratch(ctrl_info);
232 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
233 enum pqi_ctrl_mode mode)
235 sis_write_driver_scratch(ctrl_info, mode);
238 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
240 ctrl_info->block_requests = true;
241 scsi_block_requests(ctrl_info->scsi_host);
244 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
246 ctrl_info->block_requests = false;
247 wake_up_all(&ctrl_info->block_requests_wait);
248 pqi_retry_raid_bypass_requests(ctrl_info);
249 scsi_unblock_requests(ctrl_info->scsi_host);
252 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
253 unsigned long timeout_msecs)
255 unsigned long remaining_msecs;
257 if (!pqi_ctrl_blocked(ctrl_info))
258 return timeout_msecs;
260 atomic_inc(&ctrl_info->num_blocked_threads);
262 if (timeout_msecs == NO_TIMEOUT) {
263 wait_event(ctrl_info->block_requests_wait,
264 !pqi_ctrl_blocked(ctrl_info));
265 remaining_msecs = timeout_msecs;
267 unsigned long remaining_jiffies;
270 wait_event_timeout(ctrl_info->block_requests_wait,
271 !pqi_ctrl_blocked(ctrl_info),
272 msecs_to_jiffies(timeout_msecs));
273 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
276 atomic_dec(&ctrl_info->num_blocked_threads);
278 return remaining_msecs;
281 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
283 while (atomic_read(&ctrl_info->num_busy_threads) >
284 atomic_read(&ctrl_info->num_blocked_threads))
285 usleep_range(1000, 2000);
288 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
290 return device->device_offline;
293 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
295 device->in_reset = true;
298 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
300 device->in_reset = false;
303 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
305 return device->in_reset;
308 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
310 ctrl_info->in_ofa = true;
313 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
315 ctrl_info->in_ofa = false;
318 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
320 return ctrl_info->in_ofa;
323 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
325 device->in_remove = true;
328 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
329 struct pqi_scsi_dev *device)
331 return device->in_remove && !ctrl_info->in_shutdown;
334 static inline void pqi_schedule_rescan_worker_with_delay(
335 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
337 if (pqi_ctrl_offline(ctrl_info))
339 if (pqi_ctrl_in_ofa(ctrl_info))
342 schedule_delayed_work(&ctrl_info->rescan_work, delay);
345 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
347 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
350 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
352 static inline void pqi_schedule_rescan_worker_delayed(
353 struct pqi_ctrl_info *ctrl_info)
355 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
358 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
360 cancel_delayed_work_sync(&ctrl_info->rescan_work);
363 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
365 if (!ctrl_info->heartbeat_counter)
368 return readl(ctrl_info->heartbeat_counter);
371 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
373 if (!ctrl_info->soft_reset_status)
376 return readb(ctrl_info->soft_reset_status);
379 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
384 if (!ctrl_info->soft_reset_status)
387 status = pqi_read_soft_reset_status(ctrl_info);
389 writeb(status, ctrl_info->soft_reset_status);
392 static int pqi_map_single(struct pci_dev *pci_dev,
393 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
394 size_t buffer_length, enum dma_data_direction data_direction)
396 dma_addr_t bus_address;
398 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
401 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
403 if (dma_mapping_error(&pci_dev->dev, bus_address))
406 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
407 put_unaligned_le32(buffer_length, &sg_descriptor->length);
408 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
413 static void pqi_pci_unmap(struct pci_dev *pci_dev,
414 struct pqi_sg_descriptor *descriptors, int num_descriptors,
415 enum dma_data_direction data_direction)
419 if (data_direction == DMA_NONE)
422 for (i = 0; i < num_descriptors; i++)
423 dma_unmap_single(&pci_dev->dev,
424 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
425 get_unaligned_le32(&descriptors[i].length),
429 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
430 struct pqi_raid_path_request *request, u8 cmd,
431 u8 *scsi3addr, void *buffer, size_t buffer_length,
432 u16 vpd_page, enum dma_data_direction *dir)
435 size_t cdb_length = buffer_length;
437 memset(request, 0, sizeof(*request));
439 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
440 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
441 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
442 &request->header.iu_length);
443 put_unaligned_le32(buffer_length, &request->buffer_length);
444 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
445 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
446 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
452 request->data_direction = SOP_READ_FLAG;
454 if (vpd_page & VPD_PAGE) {
456 cdb[2] = (u8)vpd_page;
458 cdb[4] = (u8)cdb_length;
460 case CISS_REPORT_LOG:
461 case CISS_REPORT_PHYS:
462 request->data_direction = SOP_READ_FLAG;
464 if (cmd == CISS_REPORT_PHYS)
465 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
467 cdb[1] = CISS_REPORT_LOG_EXTENDED;
468 put_unaligned_be32(cdb_length, &cdb[6]);
470 case CISS_GET_RAID_MAP:
471 request->data_direction = SOP_READ_FLAG;
473 cdb[1] = CISS_GET_RAID_MAP;
474 put_unaligned_be32(cdb_length, &cdb[6]);
477 request->data_direction = SOP_WRITE_FLAG;
479 cdb[6] = BMIC_FLUSH_CACHE;
480 put_unaligned_be16(cdb_length, &cdb[7]);
482 case BMIC_SENSE_DIAG_OPTIONS:
485 case BMIC_IDENTIFY_CONTROLLER:
486 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
487 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
488 request->data_direction = SOP_READ_FLAG;
491 put_unaligned_be16(cdb_length, &cdb[7]);
493 case BMIC_SET_DIAG_OPTIONS:
496 case BMIC_WRITE_HOST_WELLNESS:
497 request->data_direction = SOP_WRITE_FLAG;
500 put_unaligned_be16(cdb_length, &cdb[7]);
502 case BMIC_CSMI_PASSTHRU:
503 request->data_direction = SOP_BIDIRECTIONAL;
505 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
507 put_unaligned_be16(cdb_length, &cdb[7]);
510 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
515 switch (request->data_direction) {
517 *dir = DMA_FROM_DEVICE;
520 *dir = DMA_TO_DEVICE;
522 case SOP_NO_DIRECTION_FLAG:
526 *dir = DMA_BIDIRECTIONAL;
530 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
531 buffer, buffer_length, *dir);
534 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
536 io_request->scmd = NULL;
537 io_request->status = 0;
538 io_request->error_info = NULL;
539 io_request->raid_bypass = false;
542 static struct pqi_io_request *pqi_alloc_io_request(
543 struct pqi_ctrl_info *ctrl_info)
545 struct pqi_io_request *io_request;
546 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
549 io_request = &ctrl_info->io_request_pool[i];
550 if (atomic_inc_return(&io_request->refcount) == 1)
552 atomic_dec(&io_request->refcount);
553 i = (i + 1) % ctrl_info->max_io_slots;
557 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
559 pqi_reinit_io_request(io_request);
564 static void pqi_free_io_request(struct pqi_io_request *io_request)
566 atomic_dec(&io_request->refcount);
569 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
570 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
571 struct pqi_raid_error_info *error_info,
572 unsigned long timeout_msecs)
575 enum dma_data_direction dir;
576 struct pqi_raid_path_request request;
578 rc = pqi_build_raid_path_request(ctrl_info, &request,
579 cmd, scsi3addr, buffer,
580 buffer_length, vpd_page, &dir);
584 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
585 0, error_info, timeout_msecs);
587 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
591 /* Helper functions for pqi_send_scsi_raid_request */
593 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
594 u8 cmd, void *buffer, size_t buffer_length)
596 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
597 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
600 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
601 u8 cmd, void *buffer, size_t buffer_length,
602 struct pqi_raid_error_info *error_info)
604 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
605 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
609 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
610 struct bmic_identify_controller *buffer)
612 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
613 buffer, sizeof(*buffer));
616 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
617 struct bmic_sense_subsystem_info *sense_info)
619 return pqi_send_ctrl_raid_request(ctrl_info,
620 BMIC_SENSE_SUBSYSTEM_INFORMATION,
621 sense_info, sizeof(*sense_info));
624 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
625 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
627 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
628 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
631 static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
632 u8 *scsi3addr, u16 vpd_page)
637 unsigned char *buf, bufsize;
639 buf = kzalloc(256, GFP_KERNEL);
643 /* Get the size of the page list first */
644 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
645 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
646 buf, SCSI_VPD_HEADER_SZ);
648 goto exit_unsupported;
651 if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
652 bufsize = pages + SCSI_VPD_HEADER_SZ;
656 /* Get the whole VPD page list */
657 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
658 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
661 goto exit_unsupported;
664 for (i = 1; i <= pages; i++)
665 if (buf[3 + i] == vpd_page)
677 static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
678 u8 *scsi3addr, u8 *device_id, int buflen)
683 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
684 return 1; /* function not supported */
686 buf = kzalloc(64, GFP_KERNEL);
690 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
691 VPD_PAGE | SCSI_VPD_DEVICE_ID,
696 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
704 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
705 struct pqi_scsi_dev *device,
706 struct bmic_identify_physical_device *buffer,
707 size_t buffer_length)
710 enum dma_data_direction dir;
711 u16 bmic_device_index;
712 struct pqi_raid_path_request request;
714 rc = pqi_build_raid_path_request(ctrl_info, &request,
715 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
716 buffer_length, 0, &dir);
720 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
721 request.cdb[2] = (u8)bmic_device_index;
722 request.cdb[9] = (u8)(bmic_device_index >> 8);
724 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
725 0, NULL, NO_TIMEOUT);
727 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
731 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
732 enum bmic_flush_cache_shutdown_event shutdown_event)
735 struct bmic_flush_cache *flush_cache;
738 * Don't bother trying to flush the cache if the controller is
741 if (pqi_ctrl_offline(ctrl_info))
744 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
748 flush_cache->shutdown_event = shutdown_event;
750 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
751 sizeof(*flush_cache));
758 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
759 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
760 struct pqi_raid_error_info *error_info)
762 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
763 buffer, buffer_length, error_info);
766 #define PQI_FETCH_PTRAID_DATA (1UL<<31)
768 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
771 struct bmic_diag_options *diag;
773 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
777 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
778 diag, sizeof(*diag));
782 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
784 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
785 diag, sizeof(*diag));
792 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
793 void *buffer, size_t buffer_length)
795 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
796 buffer, buffer_length);
801 struct bmic_host_wellness_driver_version {
803 u8 driver_version_tag[2];
804 __le16 driver_version_length;
805 char driver_version[32];
806 u8 dont_write_tag[2];
812 static int pqi_write_driver_version_to_host_wellness(
813 struct pqi_ctrl_info *ctrl_info)
816 struct bmic_host_wellness_driver_version *buffer;
817 size_t buffer_length;
819 buffer_length = sizeof(*buffer);
821 buffer = kmalloc(buffer_length, GFP_KERNEL);
825 buffer->start_tag[0] = '<';
826 buffer->start_tag[1] = 'H';
827 buffer->start_tag[2] = 'W';
828 buffer->start_tag[3] = '>';
829 buffer->driver_version_tag[0] = 'D';
830 buffer->driver_version_tag[1] = 'V';
831 put_unaligned_le16(sizeof(buffer->driver_version),
832 &buffer->driver_version_length);
833 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
834 sizeof(buffer->driver_version) - 1);
835 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
836 buffer->dont_write_tag[0] = 'D';
837 buffer->dont_write_tag[1] = 'W';
838 buffer->end_tag[0] = 'Z';
839 buffer->end_tag[1] = 'Z';
841 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
850 struct bmic_host_wellness_time {
855 u8 dont_write_tag[2];
861 static int pqi_write_current_time_to_host_wellness(
862 struct pqi_ctrl_info *ctrl_info)
865 struct bmic_host_wellness_time *buffer;
866 size_t buffer_length;
871 buffer_length = sizeof(*buffer);
873 buffer = kmalloc(buffer_length, GFP_KERNEL);
877 buffer->start_tag[0] = '<';
878 buffer->start_tag[1] = 'H';
879 buffer->start_tag[2] = 'W';
880 buffer->start_tag[3] = '>';
881 buffer->time_tag[0] = 'T';
882 buffer->time_tag[1] = 'D';
883 put_unaligned_le16(sizeof(buffer->time),
884 &buffer->time_length);
886 local_time = ktime_get_real_seconds();
887 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
888 year = tm.tm_year + 1900;
890 buffer->time[0] = bin2bcd(tm.tm_hour);
891 buffer->time[1] = bin2bcd(tm.tm_min);
892 buffer->time[2] = bin2bcd(tm.tm_sec);
894 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
895 buffer->time[5] = bin2bcd(tm.tm_mday);
896 buffer->time[6] = bin2bcd(year / 100);
897 buffer->time[7] = bin2bcd(year % 100);
899 buffer->dont_write_tag[0] = 'D';
900 buffer->dont_write_tag[1] = 'W';
901 buffer->end_tag[0] = 'Z';
902 buffer->end_tag[1] = 'Z';
904 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
911 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
913 static void pqi_update_time_worker(struct work_struct *work)
916 struct pqi_ctrl_info *ctrl_info;
918 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
921 if (pqi_ctrl_offline(ctrl_info))
924 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
926 dev_warn(&ctrl_info->pci_dev->dev,
927 "error updating time on controller\n");
929 schedule_delayed_work(&ctrl_info->update_time_work,
930 PQI_UPDATE_TIME_WORK_INTERVAL);
933 static inline void pqi_schedule_update_time_worker(
934 struct pqi_ctrl_info *ctrl_info)
936 schedule_delayed_work(&ctrl_info->update_time_work, 0);
939 static inline void pqi_cancel_update_time_worker(
940 struct pqi_ctrl_info *ctrl_info)
942 cancel_delayed_work_sync(&ctrl_info->update_time_work);
945 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
946 void *buffer, size_t buffer_length)
948 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
952 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
956 size_t lun_list_length;
957 size_t lun_data_length;
958 size_t new_lun_list_length;
959 void *lun_data = NULL;
960 struct report_lun_header *report_lun_header;
962 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
963 if (!report_lun_header) {
968 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
969 sizeof(*report_lun_header));
973 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
976 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
978 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
984 if (lun_list_length == 0) {
985 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
989 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
993 new_lun_list_length = get_unaligned_be32(
994 &((struct report_lun_header *)lun_data)->list_length);
996 if (new_lun_list_length > lun_list_length) {
997 lun_list_length = new_lun_list_length;
1003 kfree(report_lun_header);
1015 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
1018 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
1022 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
1025 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1028 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1029 struct report_phys_lun_extended **physdev_list,
1030 struct report_log_lun_extended **logdev_list)
1033 size_t logdev_list_length;
1034 size_t logdev_data_length;
1035 struct report_log_lun_extended *internal_logdev_list;
1036 struct report_log_lun_extended *logdev_data;
1037 struct report_lun_header report_lun_header;
1039 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1041 dev_err(&ctrl_info->pci_dev->dev,
1042 "report physical LUNs failed\n");
1044 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1046 dev_err(&ctrl_info->pci_dev->dev,
1047 "report logical LUNs failed\n");
1050 * Tack the controller itself onto the end of the logical device list.
1053 logdev_data = *logdev_list;
1056 logdev_list_length =
1057 get_unaligned_be32(&logdev_data->header.list_length);
1059 memset(&report_lun_header, 0, sizeof(report_lun_header));
1061 (struct report_log_lun_extended *)&report_lun_header;
1062 logdev_list_length = 0;
1065 logdev_data_length = sizeof(struct report_lun_header) +
1068 internal_logdev_list = kmalloc(logdev_data_length +
1069 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1070 if (!internal_logdev_list) {
1071 kfree(*logdev_list);
1072 *logdev_list = NULL;
1076 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1077 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1078 sizeof(struct report_log_lun_extended_entry));
1079 put_unaligned_be32(logdev_list_length +
1080 sizeof(struct report_log_lun_extended_entry),
1081 &internal_logdev_list->header.list_length);
1083 kfree(*logdev_list);
1084 *logdev_list = internal_logdev_list;
1089 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1090 int bus, int target, int lun)
1093 device->target = target;
1097 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1105 scsi3addr = device->scsi3addr;
1106 lunid = get_unaligned_le32(scsi3addr);
1108 if (pqi_is_hba_lunid(scsi3addr)) {
1109 /* The specified device is the controller. */
1110 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1111 device->target_lun_valid = true;
1115 if (pqi_is_logical_device(device)) {
1116 if (device->is_external_raid_device) {
1117 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1118 target = (lunid >> 16) & 0x3fff;
1121 bus = PQI_RAID_VOLUME_BUS;
1123 lun = lunid & 0x3fff;
1125 pqi_set_bus_target_lun(device, bus, target, lun);
1126 device->target_lun_valid = true;
1131 * Defer target and LUN assignment for non-controller physical devices
1132 * because the SAS transport layer will make these assignments later.
1134 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1137 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1138 struct pqi_scsi_dev *device)
1144 raid_level = SA_RAID_UNKNOWN;
1146 buffer = kmalloc(64, GFP_KERNEL);
1148 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1149 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1151 raid_level = buffer[8];
1152 if (raid_level > SA_RAID_MAX)
1153 raid_level = SA_RAID_UNKNOWN;
1158 device->raid_level = raid_level;
1161 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1162 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1166 u32 r5or6_blocks_per_row;
1168 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1170 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1171 err_msg = "RAID map too small";
1175 if (device->raid_level == SA_RAID_1) {
1176 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1177 err_msg = "invalid RAID-1 map";
1180 } else if (device->raid_level == SA_RAID_ADM) {
1181 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1182 err_msg = "invalid RAID-1(ADM) map";
1185 } else if ((device->raid_level == SA_RAID_5 ||
1186 device->raid_level == SA_RAID_6) &&
1187 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1189 r5or6_blocks_per_row =
1190 get_unaligned_le16(&raid_map->strip_size) *
1191 get_unaligned_le16(&raid_map->data_disks_per_row);
1192 if (r5or6_blocks_per_row == 0) {
1193 err_msg = "invalid RAID-5 or RAID-6 map";
1201 dev_warn(&ctrl_info->pci_dev->dev,
1202 "logical device %08x%08x %s\n",
1203 *((u32 *)&device->scsi3addr),
1204 *((u32 *)&device->scsi3addr[4]), err_msg);
1209 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1210 struct pqi_scsi_dev *device)
1214 struct raid_map *raid_map;
1216 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1220 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1221 device->scsi3addr, raid_map, sizeof(*raid_map),
1222 0, NULL, NO_TIMEOUT);
1227 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1229 if (raid_map_size > sizeof(*raid_map)) {
1233 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1237 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1238 device->scsi3addr, raid_map, raid_map_size,
1239 0, NULL, NO_TIMEOUT);
1243 if (get_unaligned_le32(&raid_map->structure_size)
1245 dev_warn(&ctrl_info->pci_dev->dev,
1246 "Requested %d bytes, received %d bytes",
1248 get_unaligned_le32(&raid_map->structure_size));
1253 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1257 device->raid_map = raid_map;
1267 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1268 struct pqi_scsi_dev *device)
1274 buffer = kmalloc(64, GFP_KERNEL);
1278 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1279 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1283 #define RAID_BYPASS_STATUS 4
1284 #define RAID_BYPASS_CONFIGURED 0x1
1285 #define RAID_BYPASS_ENABLED 0x2
1287 bypass_status = buffer[RAID_BYPASS_STATUS];
1288 device->raid_bypass_configured =
1289 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1290 if (device->raid_bypass_configured &&
1291 (bypass_status & RAID_BYPASS_ENABLED) &&
1292 pqi_get_raid_map(ctrl_info, device) == 0)
1293 device->raid_bypass_enabled = true;
1300 * Use vendor-specific VPD to determine online/offline status of a volume.
1303 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1304 struct pqi_scsi_dev *device)
1308 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1309 bool volume_offline = true;
1311 struct ciss_vpd_logical_volume_status *vpd;
1313 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1317 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1318 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1322 if (vpd->page_code != CISS_VPD_LV_STATUS)
1325 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1326 volume_status) + vpd->page_length;
1327 if (page_length < sizeof(*vpd))
1330 volume_status = vpd->volume_status;
1331 volume_flags = get_unaligned_be32(&vpd->flags);
1332 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1337 device->volume_status = volume_status;
1338 device->volume_offline = volume_offline;
1341 #define PQI_INQUIRY_PAGE0_RETRIES 3
1343 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1344 struct pqi_scsi_dev *device)
1348 unsigned int retries;
1350 if (device->is_expander_smp_device)
1353 buffer = kmalloc(64, GFP_KERNEL);
1357 /* Send an inquiry to the device to see what it is. */
1358 for (retries = 0;;) {
1359 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1363 if (pqi_is_logical_device(device) ||
1364 rc != PQI_CMD_STATUS_ABORTED ||
1365 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1369 scsi_sanitize_inquiry_string(&buffer[8], 8);
1370 scsi_sanitize_inquiry_string(&buffer[16], 16);
1372 device->devtype = buffer[0] & 0x1f;
1373 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1374 memcpy(device->model, &buffer[16], sizeof(device->model));
1376 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1377 if (device->is_external_raid_device) {
1378 device->raid_level = SA_RAID_UNKNOWN;
1379 device->volume_status = CISS_LV_OK;
1380 device->volume_offline = false;
1382 pqi_get_raid_level(ctrl_info, device);
1383 pqi_get_raid_bypass_status(ctrl_info, device);
1384 pqi_get_volume_status(ctrl_info, device);
1388 if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1389 device->unique_id, sizeof(device->unique_id)) < 0)
1390 dev_warn(&ctrl_info->pci_dev->dev,
1391 "Can't get device id for scsi %d:%d:%d:%d\n",
1392 ctrl_info->scsi_host->host_no,
1393 device->bus, device->target,
1402 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1403 struct pqi_scsi_dev *device,
1404 struct bmic_identify_physical_device *id_phys)
1408 memset(id_phys, 0, sizeof(*id_phys));
1410 rc = pqi_identify_physical_device(ctrl_info, device,
1411 id_phys, sizeof(*id_phys));
1413 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1416 device->box_index = id_phys->box_index;
1417 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1418 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1419 device->queue_depth =
1420 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1421 device->device_type = id_phys->device_type;
1422 device->active_path_index = id_phys->active_path_number;
1423 device->path_map = id_phys->redundant_path_present_map;
1424 memcpy(&device->box,
1425 &id_phys->alternate_paths_phys_box_on_port,
1426 sizeof(device->box));
1427 memcpy(&device->phys_connector,
1428 &id_phys->alternate_paths_phys_connector,
1429 sizeof(device->phys_connector));
1430 device->bay = id_phys->phys_bay_in_box;
1433 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1434 struct pqi_scsi_dev *device)
1437 static const char unknown_state_str[] =
1438 "Volume is in an unknown state (%u)";
1439 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1441 switch (device->volume_status) {
1443 status = "Volume online";
1445 case CISS_LV_FAILED:
1446 status = "Volume failed";
1448 case CISS_LV_NOT_CONFIGURED:
1449 status = "Volume not configured";
1451 case CISS_LV_DEGRADED:
1452 status = "Volume degraded";
1454 case CISS_LV_READY_FOR_RECOVERY:
1455 status = "Volume ready for recovery operation";
1457 case CISS_LV_UNDERGOING_RECOVERY:
1458 status = "Volume undergoing recovery";
1460 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1461 status = "Wrong physical drive was replaced";
1463 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1464 status = "A physical drive not properly connected";
1466 case CISS_LV_HARDWARE_OVERHEATING:
1467 status = "Hardware is overheating";
1469 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1470 status = "Hardware has overheated";
1472 case CISS_LV_UNDERGOING_EXPANSION:
1473 status = "Volume undergoing expansion";
1475 case CISS_LV_NOT_AVAILABLE:
1476 status = "Volume waiting for transforming volume";
1478 case CISS_LV_QUEUED_FOR_EXPANSION:
1479 status = "Volume queued for expansion";
1481 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1482 status = "Volume disabled due to SCSI ID conflict";
1484 case CISS_LV_EJECTED:
1485 status = "Volume has been ejected";
1487 case CISS_LV_UNDERGOING_ERASE:
1488 status = "Volume undergoing background erase";
1490 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1491 status = "Volume ready for predictive spare rebuild";
1493 case CISS_LV_UNDERGOING_RPI:
1494 status = "Volume undergoing rapid parity initialization";
1496 case CISS_LV_PENDING_RPI:
1497 status = "Volume queued for rapid parity initialization";
1499 case CISS_LV_ENCRYPTED_NO_KEY:
1500 status = "Encrypted volume inaccessible - key not present";
1502 case CISS_LV_UNDERGOING_ENCRYPTION:
1503 status = "Volume undergoing encryption process";
1505 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1506 status = "Volume undergoing encryption re-keying process";
1508 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1509 status = "Volume encrypted but encryption is disabled";
1511 case CISS_LV_PENDING_ENCRYPTION:
1512 status = "Volume pending migration to encrypted state";
1514 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1515 status = "Volume pending encryption rekeying";
1517 case CISS_LV_NOT_SUPPORTED:
1518 status = "Volume not supported on this controller";
1520 case CISS_LV_STATUS_UNAVAILABLE:
1521 status = "Volume status not available";
1524 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1525 unknown_state_str, device->volume_status);
1526 status = unknown_state_buffer;
1530 dev_info(&ctrl_info->pci_dev->dev,
1531 "scsi %d:%d:%d:%d %s\n",
1532 ctrl_info->scsi_host->host_no,
1533 device->bus, device->target, device->lun, status);
1536 static void pqi_rescan_worker(struct work_struct *work)
1538 struct pqi_ctrl_info *ctrl_info;
1540 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1543 pqi_scan_scsi_devices(ctrl_info);
1546 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1547 struct pqi_scsi_dev *device)
1551 if (pqi_is_logical_device(device))
1552 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1553 device->target, device->lun);
1555 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1560 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1562 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1563 struct pqi_scsi_dev *device)
1567 pqi_device_remove_start(device);
1569 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1570 PQI_PENDING_IO_TIMEOUT_SECS);
1572 dev_err(&ctrl_info->pci_dev->dev,
1573 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1574 ctrl_info->scsi_host->host_no, device->bus,
1575 device->target, device->lun,
1576 atomic_read(&device->scsi_cmds_outstanding));
1578 if (pqi_is_logical_device(device))
1579 scsi_remove_device(device->sdev);
1581 pqi_remove_sas_device(device);
1584 /* Assumes the SCSI device list lock is held. */
1586 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1587 int bus, int target, int lun)
1589 struct pqi_scsi_dev *device;
1591 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1592 scsi_device_list_entry)
1593 if (device->bus == bus && device->target == target &&
1600 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1601 struct pqi_scsi_dev *dev2)
1603 if (dev1->is_physical_device != dev2->is_physical_device)
1606 if (dev1->is_physical_device)
1607 return dev1->wwid == dev2->wwid;
1609 return memcmp(dev1->volume_id, dev2->volume_id,
1610 sizeof(dev1->volume_id)) == 0;
1613 enum pqi_find_result {
1619 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1620 struct pqi_scsi_dev *device_to_find,
1621 struct pqi_scsi_dev **matching_device)
1623 struct pqi_scsi_dev *device;
1625 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1626 scsi_device_list_entry) {
1627 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1628 device->scsi3addr)) {
1629 *matching_device = device;
1630 if (pqi_device_equal(device_to_find, device)) {
1631 if (device_to_find->volume_offline)
1632 return DEVICE_CHANGED;
1635 return DEVICE_CHANGED;
1639 return DEVICE_NOT_FOUND;
1642 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1644 if (device->is_expander_smp_device)
1645 return "Enclosure SMP ";
1647 return scsi_device_type(device->devtype);
1650 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1652 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1653 char *action, struct pqi_scsi_dev *device)
1656 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1658 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1659 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1661 if (device->target_lun_valid)
1662 count += snprintf(buffer + count,
1663 PQI_DEV_INFO_BUFFER_LENGTH - count,
1668 count += snprintf(buffer + count,
1669 PQI_DEV_INFO_BUFFER_LENGTH - count,
1672 if (pqi_is_logical_device(device))
1673 count += snprintf(buffer + count,
1674 PQI_DEV_INFO_BUFFER_LENGTH - count,
1676 *((u32 *)&device->scsi3addr),
1677 *((u32 *)&device->scsi3addr[4]));
1679 count += snprintf(buffer + count,
1680 PQI_DEV_INFO_BUFFER_LENGTH - count,
1681 " %016llx", device->sas_address);
1683 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1685 pqi_device_type(device),
1689 if (pqi_is_logical_device(device)) {
1690 if (device->devtype == TYPE_DISK)
1691 count += snprintf(buffer + count,
1692 PQI_DEV_INFO_BUFFER_LENGTH - count,
1693 "SSDSmartPathCap%c En%c %-12s",
1694 device->raid_bypass_configured ? '+' : '-',
1695 device->raid_bypass_enabled ? '+' : '-',
1696 pqi_raid_level_to_string(device->raid_level));
1698 count += snprintf(buffer + count,
1699 PQI_DEV_INFO_BUFFER_LENGTH - count,
1700 "AIO%c", device->aio_enabled ? '+' : '-');
1701 if (device->devtype == TYPE_DISK ||
1702 device->devtype == TYPE_ZBC)
1703 count += snprintf(buffer + count,
1704 PQI_DEV_INFO_BUFFER_LENGTH - count,
1705 " qd=%-6d", device->queue_depth);
1708 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1711 /* Assumes the SCSI device list lock is held. */
1713 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1714 struct pqi_scsi_dev *new_device)
1716 existing_device->devtype = new_device->devtype;
1717 existing_device->device_type = new_device->device_type;
1718 existing_device->bus = new_device->bus;
1719 if (new_device->target_lun_valid) {
1720 existing_device->target = new_device->target;
1721 existing_device->lun = new_device->lun;
1722 existing_device->target_lun_valid = true;
1725 /* By definition, the scsi3addr and wwid fields are already the same. */
1727 existing_device->is_physical_device = new_device->is_physical_device;
1728 existing_device->is_external_raid_device =
1729 new_device->is_external_raid_device;
1730 existing_device->is_expander_smp_device =
1731 new_device->is_expander_smp_device;
1732 existing_device->aio_enabled = new_device->aio_enabled;
1733 memcpy(existing_device->vendor, new_device->vendor,
1734 sizeof(existing_device->vendor));
1735 memcpy(existing_device->model, new_device->model,
1736 sizeof(existing_device->model));
1737 existing_device->sas_address = new_device->sas_address;
1738 existing_device->raid_level = new_device->raid_level;
1739 existing_device->queue_depth = new_device->queue_depth;
1740 existing_device->aio_handle = new_device->aio_handle;
1741 existing_device->volume_status = new_device->volume_status;
1742 existing_device->active_path_index = new_device->active_path_index;
1743 existing_device->path_map = new_device->path_map;
1744 existing_device->bay = new_device->bay;
1745 existing_device->box_index = new_device->box_index;
1746 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1747 existing_device->phy_connected_dev_type =
1748 new_device->phy_connected_dev_type;
1749 memcpy(existing_device->box, new_device->box,
1750 sizeof(existing_device->box));
1751 memcpy(existing_device->phys_connector, new_device->phys_connector,
1752 sizeof(existing_device->phys_connector));
1753 existing_device->offload_to_mirror = 0;
1754 kfree(existing_device->raid_map);
1755 existing_device->raid_map = new_device->raid_map;
1756 existing_device->raid_bypass_configured =
1757 new_device->raid_bypass_configured;
1758 existing_device->raid_bypass_enabled =
1759 new_device->raid_bypass_enabled;
1760 existing_device->device_offline = false;
1762 /* To prevent this from being freed later. */
1763 new_device->raid_map = NULL;
1766 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1769 kfree(device->raid_map);
1775 * Called when exposing a new device to the OS fails in order to re-adjust
1776 * our internal SCSI device list to match the SCSI ML's view.
1779 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1780 struct pqi_scsi_dev *device)
1782 unsigned long flags;
1784 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1785 list_del(&device->scsi_device_list_entry);
1786 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1788 /* Allow the device structure to be freed later. */
1789 device->keep_device = false;
1792 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1794 if (device->is_expander_smp_device)
1795 return device->sas_port != NULL;
1797 return device->sdev != NULL;
1800 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1801 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1805 unsigned long flags;
1806 enum pqi_find_result find_result;
1807 struct pqi_scsi_dev *device;
1808 struct pqi_scsi_dev *next;
1809 struct pqi_scsi_dev *matching_device;
1810 LIST_HEAD(add_list);
1811 LIST_HEAD(delete_list);
1814 * The idea here is to do as little work as possible while holding the
1815 * spinlock. That's why we go to great pains to defer anything other
1816 * than updating the internal device list until after we release the
1820 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1822 /* Assume that all devices in the existing list have gone away. */
1823 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1824 scsi_device_list_entry)
1825 device->device_gone = true;
1827 for (i = 0; i < num_new_devices; i++) {
1828 device = new_device_list[i];
1830 find_result = pqi_scsi_find_entry(ctrl_info, device,
1833 switch (find_result) {
1836 * The newly found device is already in the existing
1839 device->new_device = false;
1840 matching_device->device_gone = false;
1841 pqi_scsi_update_device(matching_device, device);
1843 case DEVICE_NOT_FOUND:
1845 * The newly found device is NOT in the existing device
1848 device->new_device = true;
1850 case DEVICE_CHANGED:
1852 * The original device has gone away and we need to add
1855 device->new_device = true;
1860 /* Process all devices that have gone away. */
1861 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1862 scsi_device_list_entry) {
1863 if (device->device_gone) {
1864 list_del(&device->scsi_device_list_entry);
1865 list_add_tail(&device->delete_list_entry, &delete_list);
1869 /* Process all new devices. */
1870 for (i = 0; i < num_new_devices; i++) {
1871 device = new_device_list[i];
1872 if (!device->new_device)
1874 if (device->volume_offline)
1876 list_add_tail(&device->scsi_device_list_entry,
1877 &ctrl_info->scsi_device_list);
1878 list_add_tail(&device->add_list_entry, &add_list);
1879 /* To prevent this device structure from being freed later. */
1880 device->keep_device = true;
1883 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1885 if (pqi_ctrl_in_ofa(ctrl_info))
1886 pqi_ctrl_ofa_done(ctrl_info);
1888 /* Remove all devices that have gone away. */
1889 list_for_each_entry_safe(device, next, &delete_list,
1890 delete_list_entry) {
1891 if (device->volume_offline) {
1892 pqi_dev_info(ctrl_info, "offline", device);
1893 pqi_show_volume_status(ctrl_info, device);
1895 pqi_dev_info(ctrl_info, "removed", device);
1897 if (pqi_is_device_added(device))
1898 pqi_remove_device(ctrl_info, device);
1899 list_del(&device->delete_list_entry);
1900 pqi_free_device(device);
1904 * Notify the SCSI ML if the queue depth of any existing device has
1907 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1908 scsi_device_list_entry) {
1909 if (device->sdev && device->queue_depth !=
1910 device->advertised_queue_depth) {
1911 device->advertised_queue_depth = device->queue_depth;
1912 scsi_change_queue_depth(device->sdev,
1913 device->advertised_queue_depth);
1917 /* Expose any new devices. */
1918 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1919 if (!pqi_is_device_added(device)) {
1920 pqi_dev_info(ctrl_info, "added", device);
1921 rc = pqi_add_device(ctrl_info, device);
1923 dev_warn(&ctrl_info->pci_dev->dev,
1924 "scsi %d:%d:%d:%d addition failed, device not added\n",
1925 ctrl_info->scsi_host->host_no,
1926 device->bus, device->target,
1928 pqi_fixup_botched_add(ctrl_info, device);
1934 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1938 if (device->is_expander_smp_device)
1941 is_supported = false;
1943 switch (device->devtype) {
1947 case TYPE_MEDIUM_CHANGER:
1948 case TYPE_ENCLOSURE:
1949 is_supported = true;
1953 * Only support the HBA controller itself as a RAID
1954 * controller. If it's a RAID controller other than
1955 * the HBA itself (an external RAID controller, for
1956 * example), we don't support it.
1958 if (pqi_is_hba_lunid(device->scsi3addr))
1959 is_supported = true;
1963 return is_supported;
1966 static inline bool pqi_skip_device(u8 *scsi3addr)
1968 /* Ignore all masked devices. */
1969 if (MASKED_DEVICE(scsi3addr))
1975 static inline void pqi_mask_device(u8 *scsi3addr)
1977 scsi3addr[3] |= 0xc0;
1980 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1982 if (!device->is_physical_device)
1985 if (device->is_expander_smp_device)
1988 switch (device->devtype) {
1991 case TYPE_ENCLOSURE:
1998 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2000 return !device->is_physical_device ||
2001 !pqi_skip_device(device->scsi3addr);
2004 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2008 LIST_HEAD(new_device_list_head);
2009 struct report_phys_lun_extended *physdev_list = NULL;
2010 struct report_log_lun_extended *logdev_list = NULL;
2011 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
2012 struct report_log_lun_extended_entry *log_lun_ext_entry;
2013 struct bmic_identify_physical_device *id_phys = NULL;
2016 struct pqi_scsi_dev **new_device_list = NULL;
2017 struct pqi_scsi_dev *device;
2018 struct pqi_scsi_dev *next;
2019 unsigned int num_new_devices;
2020 unsigned int num_valid_devices;
2021 bool is_physical_device;
2023 unsigned int physical_index;
2024 unsigned int logical_index;
2025 static char *out_of_memory_msg =
2026 "failed to allocate memory, device discovery stopped";
2028 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2034 get_unaligned_be32(&physdev_list->header.list_length)
2035 / sizeof(physdev_list->lun_entries[0]);
2041 get_unaligned_be32(&logdev_list->header.list_length)
2042 / sizeof(logdev_list->lun_entries[0]);
2046 if (num_physicals) {
2048 * We need this buffer for calls to pqi_get_physical_disk_info()
2049 * below. We allocate it here instead of inside
2050 * pqi_get_physical_disk_info() because it's a fairly large
2053 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2055 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2060 if (pqi_hide_vsep) {
2063 for (i = num_physicals - 1; i >= 0; i--) {
2064 phys_lun_ext_entry =
2065 &physdev_list->lun_entries[i];
2066 if (CISS_GET_DRIVE_NUMBER(
2067 phys_lun_ext_entry->lunid) ==
2068 PQI_VSEP_CISS_BTL) {
2070 phys_lun_ext_entry->lunid);
2077 num_new_devices = num_physicals + num_logicals;
2079 new_device_list = kmalloc_array(num_new_devices,
2080 sizeof(*new_device_list),
2082 if (!new_device_list) {
2083 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2088 for (i = 0; i < num_new_devices; i++) {
2089 device = kzalloc(sizeof(*device), GFP_KERNEL);
2091 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2096 list_add_tail(&device->new_device_list_entry,
2097 &new_device_list_head);
2101 num_valid_devices = 0;
2105 for (i = 0; i < num_new_devices; i++) {
2107 if ((!pqi_expose_ld_first && i < num_physicals) ||
2108 (pqi_expose_ld_first && i >= num_logicals)) {
2109 is_physical_device = true;
2110 phys_lun_ext_entry =
2111 &physdev_list->lun_entries[physical_index++];
2112 log_lun_ext_entry = NULL;
2113 scsi3addr = phys_lun_ext_entry->lunid;
2115 is_physical_device = false;
2116 phys_lun_ext_entry = NULL;
2118 &logdev_list->lun_entries[logical_index++];
2119 scsi3addr = log_lun_ext_entry->lunid;
2122 if (is_physical_device && pqi_skip_device(scsi3addr))
2126 device = list_next_entry(device, new_device_list_entry);
2128 device = list_first_entry(&new_device_list_head,
2129 struct pqi_scsi_dev, new_device_list_entry);
2131 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2132 device->is_physical_device = is_physical_device;
2133 if (is_physical_device) {
2134 if (phys_lun_ext_entry->device_type ==
2135 SA_EXPANDER_SMP_DEVICE)
2136 device->is_expander_smp_device = true;
2138 device->is_external_raid_device =
2139 pqi_is_external_raid_addr(scsi3addr);
2142 /* Gather information about the device. */
2143 rc = pqi_get_device_info(ctrl_info, device);
2144 if (rc == -ENOMEM) {
2145 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2150 if (device->is_physical_device)
2151 dev_warn(&ctrl_info->pci_dev->dev,
2152 "obtaining device info failed, skipping physical device %016llx\n",
2154 &phys_lun_ext_entry->wwid));
2156 dev_warn(&ctrl_info->pci_dev->dev,
2157 "obtaining device info failed, skipping logical device %08x%08x\n",
2158 *((u32 *)&device->scsi3addr),
2159 *((u32 *)&device->scsi3addr[4]));
2164 if (!pqi_is_supported_device(device))
2167 pqi_assign_bus_target_lun(device);
2169 if (device->is_physical_device) {
2170 device->wwid = phys_lun_ext_entry->wwid;
2171 if ((phys_lun_ext_entry->device_flags &
2172 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
2173 phys_lun_ext_entry->aio_handle) {
2174 device->aio_enabled = true;
2175 device->aio_handle =
2176 phys_lun_ext_entry->aio_handle;
2179 pqi_get_physical_disk_info(ctrl_info,
2183 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2184 sizeof(device->volume_id));
2187 if (pqi_is_device_with_sas_address(device))
2188 device->sas_address = get_unaligned_be64(&device->wwid);
2190 new_device_list[num_valid_devices++] = device;
2193 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2196 list_for_each_entry_safe(device, next, &new_device_list_head,
2197 new_device_list_entry) {
2198 if (device->keep_device)
2200 list_del(&device->new_device_list_entry);
2201 pqi_free_device(device);
2204 kfree(new_device_list);
2205 kfree(physdev_list);
2212 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2214 unsigned long flags;
2215 struct pqi_scsi_dev *device;
2218 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2220 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2221 struct pqi_scsi_dev, scsi_device_list_entry);
2223 list_del(&device->scsi_device_list_entry);
2225 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2231 if (pqi_is_device_added(device))
2232 pqi_remove_device(ctrl_info, device);
2233 pqi_free_device(device);
2237 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2241 if (pqi_ctrl_offline(ctrl_info))
2244 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2245 pqi_schedule_rescan_worker_delayed(ctrl_info);
2248 rc = pqi_update_scsi_devices(ctrl_info);
2250 pqi_schedule_rescan_worker_delayed(ctrl_info);
2251 mutex_unlock(&ctrl_info->scan_mutex);
2257 static void pqi_scan_start(struct Scsi_Host *shost)
2259 struct pqi_ctrl_info *ctrl_info;
2261 ctrl_info = shost_to_hba(shost);
2262 if (pqi_ctrl_in_ofa(ctrl_info))
2265 pqi_scan_scsi_devices(ctrl_info);
2268 /* Returns TRUE if scan is finished. */
2270 static int pqi_scan_finished(struct Scsi_Host *shost,
2271 unsigned long elapsed_time)
2273 struct pqi_ctrl_info *ctrl_info;
2275 ctrl_info = shost_priv(shost);
2277 return !mutex_is_locked(&ctrl_info->scan_mutex);
2280 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2282 mutex_lock(&ctrl_info->scan_mutex);
2283 mutex_unlock(&ctrl_info->scan_mutex);
2286 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2288 mutex_lock(&ctrl_info->lun_reset_mutex);
2289 mutex_unlock(&ctrl_info->lun_reset_mutex);
2292 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2294 mutex_lock(&ctrl_info->ofa_mutex);
2295 mutex_unlock(&ctrl_info->ofa_mutex);
2298 static inline void pqi_set_encryption_info(
2299 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2302 u32 volume_blk_size;
2305 * Set the encryption tweak values based on logical block address.
2306 * If the block size is 512, the tweak value is equal to the LBA.
2307 * For other block sizes, tweak value is (LBA * block size) / 512.
2309 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2310 if (volume_blk_size != 512)
2311 first_block = (first_block * volume_blk_size) / 512;
2313 encryption_info->data_encryption_key_index =
2314 get_unaligned_le16(&raid_map->data_encryption_key_index);
2315 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2316 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2320 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2323 #define PQI_RAID_BYPASS_INELIGIBLE 1
2325 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2326 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2327 struct pqi_queue_group *queue_group)
2329 struct raid_map *raid_map;
2330 bool is_write = false;
2338 u32 first_row_offset;
2339 u32 last_row_offset;
2344 u32 r5or6_blocks_per_row;
2345 u64 r5or6_first_row;
2347 u32 r5or6_first_row_offset;
2348 u32 r5or6_last_row_offset;
2349 u32 r5or6_first_column;
2350 u32 r5or6_last_column;
2351 u16 data_disks_per_row;
2352 u32 total_disks_per_row;
2353 u16 layout_map_count;
2365 int offload_to_mirror;
2366 struct pqi_encryption_info *encryption_info_ptr;
2367 struct pqi_encryption_info encryption_info;
2368 #if BITS_PER_LONG == 32
2372 /* Check for valid opcode, get LBA and block count. */
2373 switch (scmd->cmnd[0]) {
2378 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2379 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2380 block_cnt = (u32)scmd->cmnd[4];
2388 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2389 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2395 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2396 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2402 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2403 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2406 /* Process via normal I/O path. */
2407 return PQI_RAID_BYPASS_INELIGIBLE;
2410 /* Check for write to non-RAID-0. */
2411 if (is_write && device->raid_level != SA_RAID_0)
2412 return PQI_RAID_BYPASS_INELIGIBLE;
2414 if (unlikely(block_cnt == 0))
2415 return PQI_RAID_BYPASS_INELIGIBLE;
2417 last_block = first_block + block_cnt - 1;
2418 raid_map = device->raid_map;
2420 /* Check for invalid block or wraparound. */
2421 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2422 last_block < first_block)
2423 return PQI_RAID_BYPASS_INELIGIBLE;
2425 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2426 strip_size = get_unaligned_le16(&raid_map->strip_size);
2427 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2429 /* Calculate stripe information for the request. */
2430 blocks_per_row = data_disks_per_row * strip_size;
2431 #if BITS_PER_LONG == 32
2432 tmpdiv = first_block;
2433 do_div(tmpdiv, blocks_per_row);
2435 tmpdiv = last_block;
2436 do_div(tmpdiv, blocks_per_row);
2438 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2439 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2440 tmpdiv = first_row_offset;
2441 do_div(tmpdiv, strip_size);
2442 first_column = tmpdiv;
2443 tmpdiv = last_row_offset;
2444 do_div(tmpdiv, strip_size);
2445 last_column = tmpdiv;
2447 first_row = first_block / blocks_per_row;
2448 last_row = last_block / blocks_per_row;
2449 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2450 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2451 first_column = first_row_offset / strip_size;
2452 last_column = last_row_offset / strip_size;
2455 /* If this isn't a single row/column then give to the controller. */
2456 if (first_row != last_row || first_column != last_column)
2457 return PQI_RAID_BYPASS_INELIGIBLE;
2459 /* Proceeding with driver mapping. */
2460 total_disks_per_row = data_disks_per_row +
2461 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2462 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2463 get_unaligned_le16(&raid_map->row_cnt);
2464 map_index = (map_row * total_disks_per_row) + first_column;
2467 if (device->raid_level == SA_RAID_1) {
2468 if (device->offload_to_mirror)
2469 map_index += data_disks_per_row;
2470 device->offload_to_mirror = !device->offload_to_mirror;
2471 } else if (device->raid_level == SA_RAID_ADM) {
2474 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2477 offload_to_mirror = device->offload_to_mirror;
2478 if (offload_to_mirror == 0) {
2479 /* use physical disk in the first mirrored group. */
2480 map_index %= data_disks_per_row;
2484 * Determine mirror group that map_index
2487 current_group = map_index / data_disks_per_row;
2489 if (offload_to_mirror != current_group) {
2491 layout_map_count - 1) {
2493 * Select raid index from
2496 map_index += data_disks_per_row;
2500 * Select raid index from first
2503 map_index %= data_disks_per_row;
2507 } while (offload_to_mirror != current_group);
2510 /* Set mirror group to use next time. */
2512 (offload_to_mirror >= layout_map_count - 1) ?
2513 0 : offload_to_mirror + 1;
2514 WARN_ON(offload_to_mirror >= layout_map_count);
2515 device->offload_to_mirror = offload_to_mirror;
2517 * Avoid direct use of device->offload_to_mirror within this
2518 * function since multiple threads might simultaneously
2519 * increment it beyond the range of device->layout_map_count -1.
2521 } else if ((device->raid_level == SA_RAID_5 ||
2522 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2524 /* Verify first and last block are in same RAID group */
2525 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2526 stripesize = r5or6_blocks_per_row * layout_map_count;
2527 #if BITS_PER_LONG == 32
2528 tmpdiv = first_block;
2529 first_group = do_div(tmpdiv, stripesize);
2530 tmpdiv = first_group;
2531 do_div(tmpdiv, r5or6_blocks_per_row);
2532 first_group = tmpdiv;
2533 tmpdiv = last_block;
2534 last_group = do_div(tmpdiv, stripesize);
2535 tmpdiv = last_group;
2536 do_div(tmpdiv, r5or6_blocks_per_row);
2537 last_group = tmpdiv;
2539 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2540 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2542 if (first_group != last_group)
2543 return PQI_RAID_BYPASS_INELIGIBLE;
2545 /* Verify request is in a single row of RAID 5/6 */
2546 #if BITS_PER_LONG == 32
2547 tmpdiv = first_block;
2548 do_div(tmpdiv, stripesize);
2549 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2550 tmpdiv = last_block;
2551 do_div(tmpdiv, stripesize);
2552 r5or6_last_row = r0_last_row = tmpdiv;
2554 first_row = r5or6_first_row = r0_first_row =
2555 first_block / stripesize;
2556 r5or6_last_row = r0_last_row = last_block / stripesize;
2558 if (r5or6_first_row != r5or6_last_row)
2559 return PQI_RAID_BYPASS_INELIGIBLE;
2561 /* Verify request is in a single column */
2562 #if BITS_PER_LONG == 32
2563 tmpdiv = first_block;
2564 first_row_offset = do_div(tmpdiv, stripesize);
2565 tmpdiv = first_row_offset;
2566 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2567 r5or6_first_row_offset = first_row_offset;
2568 tmpdiv = last_block;
2569 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2570 tmpdiv = r5or6_last_row_offset;
2571 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2572 tmpdiv = r5or6_first_row_offset;
2573 do_div(tmpdiv, strip_size);
2574 first_column = r5or6_first_column = tmpdiv;
2575 tmpdiv = r5or6_last_row_offset;
2576 do_div(tmpdiv, strip_size);
2577 r5or6_last_column = tmpdiv;
2579 first_row_offset = r5or6_first_row_offset =
2580 (u32)((first_block % stripesize) %
2581 r5or6_blocks_per_row);
2583 r5or6_last_row_offset =
2584 (u32)((last_block % stripesize) %
2585 r5or6_blocks_per_row);
2587 first_column = r5or6_first_row_offset / strip_size;
2588 r5or6_first_column = first_column;
2589 r5or6_last_column = r5or6_last_row_offset / strip_size;
2591 if (r5or6_first_column != r5or6_last_column)
2592 return PQI_RAID_BYPASS_INELIGIBLE;
2594 /* Request is eligible */
2596 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2597 get_unaligned_le16(&raid_map->row_cnt);
2599 map_index = (first_group *
2600 (get_unaligned_le16(&raid_map->row_cnt) *
2601 total_disks_per_row)) +
2602 (map_row * total_disks_per_row) + first_column;
2605 aio_handle = raid_map->disk_data[map_index].aio_handle;
2606 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2607 first_row * strip_size +
2608 (first_row_offset - first_column * strip_size);
2609 disk_block_cnt = block_cnt;
2611 /* Handle differing logical/physical block sizes. */
2612 if (raid_map->phys_blk_shift) {
2613 disk_block <<= raid_map->phys_blk_shift;
2614 disk_block_cnt <<= raid_map->phys_blk_shift;
2617 if (unlikely(disk_block_cnt > 0xffff))
2618 return PQI_RAID_BYPASS_INELIGIBLE;
2620 /* Build the new CDB for the physical disk I/O. */
2621 if (disk_block > 0xffffffff) {
2622 cdb[0] = is_write ? WRITE_16 : READ_16;
2624 put_unaligned_be64(disk_block, &cdb[2]);
2625 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2630 cdb[0] = is_write ? WRITE_10 : READ_10;
2632 put_unaligned_be32((u32)disk_block, &cdb[2]);
2634 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2639 if (get_unaligned_le16(&raid_map->flags) &
2640 RAID_MAP_ENCRYPTION_ENABLED) {
2641 pqi_set_encryption_info(&encryption_info, raid_map,
2643 encryption_info_ptr = &encryption_info;
2645 encryption_info_ptr = NULL;
2648 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2649 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2652 #define PQI_STATUS_IDLE 0x0
2654 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2655 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2657 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2658 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2659 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2660 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2661 #define PQI_DEVICE_STATE_ERROR 0x4
2663 #define PQI_MODE_READY_TIMEOUT_SECS 30
2664 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2666 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2668 struct pqi_device_registers __iomem *pqi_registers;
2669 unsigned long timeout;
2673 pqi_registers = ctrl_info->pqi_registers;
2674 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2677 signature = readq(&pqi_registers->signature);
2678 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2679 sizeof(signature)) == 0)
2681 if (time_after(jiffies, timeout)) {
2682 dev_err(&ctrl_info->pci_dev->dev,
2683 "timed out waiting for PQI signature\n");
2686 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2690 status = readb(&pqi_registers->function_and_status_code);
2691 if (status == PQI_STATUS_IDLE)
2693 if (time_after(jiffies, timeout)) {
2694 dev_err(&ctrl_info->pci_dev->dev,
2695 "timed out waiting for PQI IDLE\n");
2698 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2702 if (readl(&pqi_registers->device_status) ==
2703 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2705 if (time_after(jiffies, timeout)) {
2706 dev_err(&ctrl_info->pci_dev->dev,
2707 "timed out waiting for PQI all registers ready\n");
2710 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2716 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2718 struct pqi_scsi_dev *device;
2720 device = io_request->scmd->device->hostdata;
2721 device->raid_bypass_enabled = false;
2722 device->aio_enabled = false;
2725 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2727 struct pqi_ctrl_info *ctrl_info;
2728 struct pqi_scsi_dev *device;
2730 device = sdev->hostdata;
2731 if (device->device_offline)
2734 device->device_offline = true;
2735 ctrl_info = shost_to_hba(sdev->host);
2736 pqi_schedule_rescan_worker(ctrl_info);
2737 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2738 path, ctrl_info->scsi_host->host_no, device->bus,
2739 device->target, device->lun);
2742 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2746 struct scsi_cmnd *scmd;
2747 struct pqi_raid_error_info *error_info;
2748 size_t sense_data_length;
2751 struct scsi_sense_hdr sshdr;
2753 scmd = io_request->scmd;
2757 error_info = io_request->error_info;
2758 scsi_status = error_info->status;
2761 switch (error_info->data_out_result) {
2762 case PQI_DATA_IN_OUT_GOOD:
2764 case PQI_DATA_IN_OUT_UNDERFLOW:
2766 get_unaligned_le32(&error_info->data_out_transferred);
2767 residual_count = scsi_bufflen(scmd) - xfer_count;
2768 scsi_set_resid(scmd, residual_count);
2769 if (xfer_count < scmd->underflow)
2770 host_byte = DID_SOFT_ERROR;
2772 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2773 case PQI_DATA_IN_OUT_ABORTED:
2774 host_byte = DID_ABORT;
2776 case PQI_DATA_IN_OUT_TIMEOUT:
2777 host_byte = DID_TIME_OUT;
2779 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2780 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2781 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2782 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2783 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2784 case PQI_DATA_IN_OUT_ERROR:
2785 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2786 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2787 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2788 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2789 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2790 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2791 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2792 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2793 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2794 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2796 host_byte = DID_ERROR;
2800 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2801 if (sense_data_length == 0)
2803 get_unaligned_le16(&error_info->response_data_length);
2804 if (sense_data_length) {
2805 if (sense_data_length > sizeof(error_info->data))
2806 sense_data_length = sizeof(error_info->data);
2808 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2809 scsi_normalize_sense(error_info->data,
2810 sense_data_length, &sshdr) &&
2811 sshdr.sense_key == HARDWARE_ERROR &&
2812 sshdr.asc == 0x3e) {
2813 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2814 struct pqi_scsi_dev *device = scmd->device->hostdata;
2816 switch (sshdr.ascq) {
2817 case 0x1: /* LOGICAL UNIT FAILURE */
2818 if (printk_ratelimit())
2819 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2820 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2821 pqi_take_device_offline(scmd->device, "RAID");
2822 host_byte = DID_NO_CONNECT;
2825 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2826 if (printk_ratelimit())
2827 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2828 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2833 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2834 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2835 memcpy(scmd->sense_buffer, error_info->data,
2839 scmd->result = scsi_status;
2840 set_host_byte(scmd, host_byte);
2843 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2847 struct scsi_cmnd *scmd;
2848 struct pqi_aio_error_info *error_info;
2849 size_t sense_data_length;
2852 bool device_offline;
2854 scmd = io_request->scmd;
2855 error_info = io_request->error_info;
2857 sense_data_length = 0;
2858 device_offline = false;
2860 switch (error_info->service_response) {
2861 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2862 scsi_status = error_info->status;
2864 case PQI_AIO_SERV_RESPONSE_FAILURE:
2865 switch (error_info->status) {
2866 case PQI_AIO_STATUS_IO_ABORTED:
2867 scsi_status = SAM_STAT_TASK_ABORTED;
2869 case PQI_AIO_STATUS_UNDERRUN:
2870 scsi_status = SAM_STAT_GOOD;
2871 residual_count = get_unaligned_le32(
2872 &error_info->residual_count);
2873 scsi_set_resid(scmd, residual_count);
2874 xfer_count = scsi_bufflen(scmd) - residual_count;
2875 if (xfer_count < scmd->underflow)
2876 host_byte = DID_SOFT_ERROR;
2878 case PQI_AIO_STATUS_OVERRUN:
2879 scsi_status = SAM_STAT_GOOD;
2881 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2882 pqi_aio_path_disabled(io_request);
2883 scsi_status = SAM_STAT_GOOD;
2884 io_request->status = -EAGAIN;
2886 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2887 case PQI_AIO_STATUS_INVALID_DEVICE:
2888 if (!io_request->raid_bypass) {
2889 device_offline = true;
2890 pqi_take_device_offline(scmd->device, "AIO");
2891 host_byte = DID_NO_CONNECT;
2893 scsi_status = SAM_STAT_CHECK_CONDITION;
2895 case PQI_AIO_STATUS_IO_ERROR:
2897 scsi_status = SAM_STAT_CHECK_CONDITION;
2901 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2902 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2903 scsi_status = SAM_STAT_GOOD;
2905 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2906 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2908 scsi_status = SAM_STAT_CHECK_CONDITION;
2912 if (error_info->data_present) {
2914 get_unaligned_le16(&error_info->data_length);
2915 if (sense_data_length) {
2916 if (sense_data_length > sizeof(error_info->data))
2917 sense_data_length = sizeof(error_info->data);
2918 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2919 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2920 memcpy(scmd->sense_buffer, error_info->data,
2925 if (device_offline && sense_data_length == 0)
2926 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2929 scmd->result = scsi_status;
2930 set_host_byte(scmd, host_byte);
2933 static void pqi_process_io_error(unsigned int iu_type,
2934 struct pqi_io_request *io_request)
2937 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2938 pqi_process_raid_io_error(io_request);
2940 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2941 pqi_process_aio_io_error(io_request);
2946 static int pqi_interpret_task_management_response(
2947 struct pqi_task_management_response *response)
2951 switch (response->response_code) {
2952 case SOP_TMF_COMPLETE:
2953 case SOP_TMF_FUNCTION_SUCCEEDED:
2956 case SOP_TMF_REJECTED:
2967 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2968 struct pqi_queue_group *queue_group)
2970 unsigned int num_responses;
2973 struct pqi_io_request *io_request;
2974 struct pqi_io_response *response;
2978 oq_ci = queue_group->oq_ci_copy;
2981 oq_pi = readl(queue_group->oq_pi);
2986 response = queue_group->oq_element_array +
2987 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2989 request_id = get_unaligned_le16(&response->request_id);
2990 WARN_ON(request_id >= ctrl_info->max_io_slots);
2992 io_request = &ctrl_info->io_request_pool[request_id];
2993 WARN_ON(atomic_read(&io_request->refcount) == 0);
2995 switch (response->header.iu_type) {
2996 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2997 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2998 if (io_request->scmd)
2999 io_request->scmd->result = 0;
3001 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3003 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3004 io_request->status =
3006 &((struct pqi_vendor_general_response *)
3009 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3010 io_request->status =
3011 pqi_interpret_task_management_response(
3014 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3015 pqi_aio_path_disabled(io_request);
3016 io_request->status = -EAGAIN;
3018 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3019 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3020 io_request->error_info = ctrl_info->error_buffer +
3021 (get_unaligned_le16(&response->error_index) *
3022 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3023 pqi_process_io_error(response->header.iu_type,
3027 dev_err(&ctrl_info->pci_dev->dev,
3028 "unexpected IU type: 0x%x\n",
3029 response->header.iu_type);
3033 io_request->io_complete_callback(io_request,
3034 io_request->context);
3037 * Note that the I/O request structure CANNOT BE TOUCHED after
3038 * returning from the I/O completion callback!
3041 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3044 if (num_responses) {
3045 queue_group->oq_ci_copy = oq_ci;
3046 writel(oq_ci, queue_group->oq_ci);
3049 return num_responses;
3052 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3053 unsigned int ci, unsigned int elements_in_queue)
3055 unsigned int num_elements_used;
3058 num_elements_used = pi - ci;
3060 num_elements_used = elements_in_queue - ci + pi;
3062 return elements_in_queue - num_elements_used - 1;
3065 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3066 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3070 unsigned long flags;
3072 struct pqi_queue_group *queue_group;
3074 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3075 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3078 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3080 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3081 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3083 if (pqi_num_elements_free(iq_pi, iq_ci,
3084 ctrl_info->num_elements_per_iq))
3087 spin_unlock_irqrestore(
3088 &queue_group->submit_lock[RAID_PATH], flags);
3090 if (pqi_ctrl_offline(ctrl_info))
3094 next_element = queue_group->iq_element_array[RAID_PATH] +
3095 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3097 memcpy(next_element, iu, iu_length);
3099 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3100 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3103 * This write notifies the controller that an IU is available to be
3106 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3108 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3111 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3112 struct pqi_event *event)
3114 struct pqi_event_acknowledge_request request;
3116 memset(&request, 0, sizeof(request));
3118 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3119 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3120 &request.header.iu_length);
3121 request.event_type = event->event_type;
3122 request.event_id = event->event_id;
3123 request.additional_event_id = event->additional_event_id;
3125 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3128 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3129 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3131 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3132 struct pqi_ctrl_info *ctrl_info)
3134 unsigned long timeout;
3137 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3140 status = pqi_read_soft_reset_status(ctrl_info);
3141 if (status & PQI_SOFT_RESET_INITIATE)
3142 return RESET_INITIATE_DRIVER;
3144 if (status & PQI_SOFT_RESET_ABORT)
3147 if (time_after(jiffies, timeout)) {
3148 dev_err(&ctrl_info->pci_dev->dev,
3149 "timed out waiting for soft reset status\n");
3150 return RESET_TIMEDOUT;
3153 if (!sis_is_firmware_running(ctrl_info))
3154 return RESET_NORESPONSE;
3156 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3160 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3161 enum pqi_soft_reset_status reset_status)
3165 switch (reset_status) {
3166 case RESET_INITIATE_DRIVER:
3168 case RESET_TIMEDOUT:
3169 dev_info(&ctrl_info->pci_dev->dev,
3170 "resetting controller %u\n", ctrl_info->ctrl_id);
3171 sis_soft_reset(ctrl_info);
3173 case RESET_INITIATE_FIRMWARE:
3174 rc = pqi_ofa_ctrl_restart(ctrl_info);
3175 pqi_ofa_free_host_buffer(ctrl_info);
3176 dev_info(&ctrl_info->pci_dev->dev,
3177 "Online Firmware Activation for controller %u: %s\n",
3178 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3181 pqi_ofa_ctrl_unquiesce(ctrl_info);
3182 dev_info(&ctrl_info->pci_dev->dev,
3183 "Online Firmware Activation for controller %u: %s\n",
3184 ctrl_info->ctrl_id, "ABORTED");
3186 case RESET_NORESPONSE:
3187 pqi_ofa_free_host_buffer(ctrl_info);
3188 pqi_take_ctrl_offline(ctrl_info);
3193 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3194 struct pqi_event *event)
3197 enum pqi_soft_reset_status status;
3199 event_id = get_unaligned_le16(&event->event_id);
3201 mutex_lock(&ctrl_info->ofa_mutex);
3203 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3204 dev_info(&ctrl_info->pci_dev->dev,
3205 "Received Online Firmware Activation quiesce event for controller %u\n",
3206 ctrl_info->ctrl_id);
3207 pqi_ofa_ctrl_quiesce(ctrl_info);
3208 pqi_acknowledge_event(ctrl_info, event);
3209 if (ctrl_info->soft_reset_handshake_supported) {
3210 status = pqi_poll_for_soft_reset_status(ctrl_info);
3211 pqi_process_soft_reset(ctrl_info, status);
3213 pqi_process_soft_reset(ctrl_info,
3214 RESET_INITIATE_FIRMWARE);
3217 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3218 pqi_acknowledge_event(ctrl_info, event);
3219 pqi_ofa_setup_host_buffer(ctrl_info,
3220 le32_to_cpu(event->ofa_bytes_requested));
3221 pqi_ofa_host_memory_update(ctrl_info);
3222 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3223 pqi_ofa_free_host_buffer(ctrl_info);
3224 pqi_acknowledge_event(ctrl_info, event);
3225 dev_info(&ctrl_info->pci_dev->dev,
3226 "Online Firmware Activation(%u) cancel reason : %u\n",
3227 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3230 mutex_unlock(&ctrl_info->ofa_mutex);
3233 static void pqi_event_worker(struct work_struct *work)
3236 struct pqi_ctrl_info *ctrl_info;
3237 struct pqi_event *event;
3239 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3241 pqi_ctrl_busy(ctrl_info);
3242 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3243 if (pqi_ctrl_offline(ctrl_info))
3246 pqi_schedule_rescan_worker_delayed(ctrl_info);
3248 event = ctrl_info->events;
3249 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3250 if (event->pending) {
3251 event->pending = false;
3252 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3253 pqi_ctrl_unbusy(ctrl_info);
3254 pqi_ofa_process_event(ctrl_info, event);
3257 pqi_acknowledge_event(ctrl_info, event);
3263 pqi_ctrl_unbusy(ctrl_info);
3266 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3268 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3271 u32 heartbeat_count;
3272 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3275 pqi_check_ctrl_health(ctrl_info);
3276 if (pqi_ctrl_offline(ctrl_info))
3279 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3280 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3282 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3283 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3284 dev_err(&ctrl_info->pci_dev->dev,
3285 "no heartbeat detected - last heartbeat count: %u\n",
3287 pqi_take_ctrl_offline(ctrl_info);
3291 ctrl_info->previous_num_interrupts = num_interrupts;
3294 ctrl_info->previous_heartbeat_count = heartbeat_count;
3295 mod_timer(&ctrl_info->heartbeat_timer,
3296 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3299 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3301 if (!ctrl_info->heartbeat_counter)
3304 ctrl_info->previous_num_interrupts =
3305 atomic_read(&ctrl_info->num_interrupts);
3306 ctrl_info->previous_heartbeat_count =
3307 pqi_read_heartbeat_counter(ctrl_info);
3309 ctrl_info->heartbeat_timer.expires =
3310 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3311 add_timer(&ctrl_info->heartbeat_timer);
3314 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3316 del_timer_sync(&ctrl_info->heartbeat_timer);
3319 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3323 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3324 if (event_type == pqi_supported_event_types[index])
3330 static inline bool pqi_is_supported_event(unsigned int event_type)
3332 return pqi_event_type_to_event_index(event_type) != -1;
3335 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3336 struct pqi_event_response *response)
3340 event_id = get_unaligned_le16(&event->event_id);
3342 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3343 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3344 event->ofa_bytes_requested =
3345 response->data.ofa_memory_allocation.bytes_requested;
3346 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3347 event->ofa_cancel_reason =
3348 response->data.ofa_cancelled.reason;
3353 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3355 unsigned int num_events;
3358 struct pqi_event_queue *event_queue;
3359 struct pqi_event_response *response;
3360 struct pqi_event *event;
3363 event_queue = &ctrl_info->event_queue;
3365 oq_ci = event_queue->oq_ci_copy;
3368 oq_pi = readl(event_queue->oq_pi);
3373 response = event_queue->oq_element_array +
3374 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3377 pqi_event_type_to_event_index(response->event_type);
3379 if (event_index >= 0) {
3380 if (response->request_acknowlege) {
3381 event = &ctrl_info->events[event_index];
3382 event->pending = true;
3383 event->event_type = response->event_type;
3384 event->event_id = response->event_id;
3385 event->additional_event_id =
3386 response->additional_event_id;
3387 pqi_ofa_capture_event_payload(event, response);
3391 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3395 event_queue->oq_ci_copy = oq_ci;
3396 writel(oq_ci, event_queue->oq_ci);
3397 schedule_work(&ctrl_info->event_work);
3403 #define PQI_LEGACY_INTX_MASK 0x1
3405 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3409 struct pqi_device_registers __iomem *pqi_registers;
3410 volatile void __iomem *register_addr;
3412 pqi_registers = ctrl_info->pqi_registers;
3415 register_addr = &pqi_registers->legacy_intx_mask_clear;
3417 register_addr = &pqi_registers->legacy_intx_mask_set;
3419 intx_mask = readl(register_addr);
3420 intx_mask |= PQI_LEGACY_INTX_MASK;
3421 writel(intx_mask, register_addr);
3424 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3425 enum pqi_irq_mode new_mode)
3427 switch (ctrl_info->irq_mode) {
3433 pqi_configure_legacy_intx(ctrl_info, true);
3434 sis_enable_intx(ctrl_info);
3443 pqi_configure_legacy_intx(ctrl_info, false);
3444 sis_enable_msix(ctrl_info);
3449 pqi_configure_legacy_intx(ctrl_info, false);
3456 sis_enable_msix(ctrl_info);
3459 pqi_configure_legacy_intx(ctrl_info, true);
3460 sis_enable_intx(ctrl_info);
3468 ctrl_info->irq_mode = new_mode;
3471 #define PQI_LEGACY_INTX_PENDING 0x1
3473 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3478 switch (ctrl_info->irq_mode) {
3484 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3485 if (intx_status & PQI_LEGACY_INTX_PENDING)
3499 static irqreturn_t pqi_irq_handler(int irq, void *data)
3501 struct pqi_ctrl_info *ctrl_info;
3502 struct pqi_queue_group *queue_group;
3503 unsigned int num_responses_handled;
3506 ctrl_info = queue_group->ctrl_info;
3508 if (!pqi_is_valid_irq(ctrl_info))
3511 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3513 if (irq == ctrl_info->event_irq)
3514 num_responses_handled += pqi_process_event_intr(ctrl_info);
3516 if (num_responses_handled)
3517 atomic_inc(&ctrl_info->num_interrupts);
3519 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3520 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3525 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3527 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3531 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3533 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3534 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3535 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3537 dev_err(&pci_dev->dev,
3538 "irq %u init failed with error %d\n",
3539 pci_irq_vector(pci_dev, i), rc);
3542 ctrl_info->num_msix_vectors_initialized++;
3548 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3552 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3553 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3554 &ctrl_info->queue_groups[i]);
3556 ctrl_info->num_msix_vectors_initialized = 0;
3559 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3561 int num_vectors_enabled;
3563 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3564 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3565 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3566 if (num_vectors_enabled < 0) {
3567 dev_err(&ctrl_info->pci_dev->dev,
3568 "MSI-X init failed with error %d\n",
3569 num_vectors_enabled);
3570 return num_vectors_enabled;
3573 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3574 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3578 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3580 if (ctrl_info->num_msix_vectors_enabled) {
3581 pci_free_irq_vectors(ctrl_info->pci_dev);
3582 ctrl_info->num_msix_vectors_enabled = 0;
3586 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3589 size_t alloc_length;
3590 size_t element_array_length_per_iq;
3591 size_t element_array_length_per_oq;
3592 void *element_array;
3593 void __iomem *next_queue_index;
3594 void *aligned_pointer;
3595 unsigned int num_inbound_queues;
3596 unsigned int num_outbound_queues;
3597 unsigned int num_queue_indexes;
3598 struct pqi_queue_group *queue_group;
3600 element_array_length_per_iq =
3601 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3602 ctrl_info->num_elements_per_iq;
3603 element_array_length_per_oq =
3604 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3605 ctrl_info->num_elements_per_oq;
3606 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3607 num_outbound_queues = ctrl_info->num_queue_groups;
3608 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3610 aligned_pointer = NULL;
3612 for (i = 0; i < num_inbound_queues; i++) {
3613 aligned_pointer = PTR_ALIGN(aligned_pointer,
3614 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3615 aligned_pointer += element_array_length_per_iq;
3618 for (i = 0; i < num_outbound_queues; i++) {
3619 aligned_pointer = PTR_ALIGN(aligned_pointer,
3620 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3621 aligned_pointer += element_array_length_per_oq;
3624 aligned_pointer = PTR_ALIGN(aligned_pointer,
3625 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3626 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3627 PQI_EVENT_OQ_ELEMENT_LENGTH;
3629 for (i = 0; i < num_queue_indexes; i++) {
3630 aligned_pointer = PTR_ALIGN(aligned_pointer,
3631 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3632 aligned_pointer += sizeof(pqi_index_t);
3635 alloc_length = (size_t)aligned_pointer +
3636 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3638 alloc_length += PQI_EXTRA_SGL_MEMORY;
3640 ctrl_info->queue_memory_base =
3641 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3642 &ctrl_info->queue_memory_base_dma_handle,
3645 if (!ctrl_info->queue_memory_base)
3648 ctrl_info->queue_memory_length = alloc_length;
3650 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3651 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3653 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3654 queue_group = &ctrl_info->queue_groups[i];
3655 queue_group->iq_element_array[RAID_PATH] = element_array;
3656 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3657 ctrl_info->queue_memory_base_dma_handle +
3658 (element_array - ctrl_info->queue_memory_base);
3659 element_array += element_array_length_per_iq;
3660 element_array = PTR_ALIGN(element_array,
3661 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3662 queue_group->iq_element_array[AIO_PATH] = element_array;
3663 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3664 ctrl_info->queue_memory_base_dma_handle +
3665 (element_array - ctrl_info->queue_memory_base);
3666 element_array += element_array_length_per_iq;
3667 element_array = PTR_ALIGN(element_array,
3668 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3671 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3672 queue_group = &ctrl_info->queue_groups[i];
3673 queue_group->oq_element_array = element_array;
3674 queue_group->oq_element_array_bus_addr =
3675 ctrl_info->queue_memory_base_dma_handle +
3676 (element_array - ctrl_info->queue_memory_base);
3677 element_array += element_array_length_per_oq;
3678 element_array = PTR_ALIGN(element_array,
3679 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3682 ctrl_info->event_queue.oq_element_array = element_array;
3683 ctrl_info->event_queue.oq_element_array_bus_addr =
3684 ctrl_info->queue_memory_base_dma_handle +
3685 (element_array - ctrl_info->queue_memory_base);
3686 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3687 PQI_EVENT_OQ_ELEMENT_LENGTH;
3689 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3690 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3692 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3693 queue_group = &ctrl_info->queue_groups[i];
3694 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3695 queue_group->iq_ci_bus_addr[RAID_PATH] =
3696 ctrl_info->queue_memory_base_dma_handle +
3698 (void __iomem *)ctrl_info->queue_memory_base);
3699 next_queue_index += sizeof(pqi_index_t);
3700 next_queue_index = PTR_ALIGN(next_queue_index,
3701 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3702 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3703 queue_group->iq_ci_bus_addr[AIO_PATH] =
3704 ctrl_info->queue_memory_base_dma_handle +
3706 (void __iomem *)ctrl_info->queue_memory_base);
3707 next_queue_index += sizeof(pqi_index_t);
3708 next_queue_index = PTR_ALIGN(next_queue_index,
3709 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3710 queue_group->oq_pi = next_queue_index;
3711 queue_group->oq_pi_bus_addr =
3712 ctrl_info->queue_memory_base_dma_handle +
3714 (void __iomem *)ctrl_info->queue_memory_base);
3715 next_queue_index += sizeof(pqi_index_t);
3716 next_queue_index = PTR_ALIGN(next_queue_index,
3717 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3720 ctrl_info->event_queue.oq_pi = next_queue_index;
3721 ctrl_info->event_queue.oq_pi_bus_addr =
3722 ctrl_info->queue_memory_base_dma_handle +
3724 (void __iomem *)ctrl_info->queue_memory_base);
3729 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3732 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3733 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3736 * Initialize the backpointers to the controller structure in
3737 * each operational queue group structure.
3739 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3740 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3743 * Assign IDs to all operational queues. Note that the IDs
3744 * assigned to operational IQs are independent of the IDs
3745 * assigned to operational OQs.
3747 ctrl_info->event_queue.oq_id = next_oq_id++;
3748 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3749 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3750 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3751 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3755 * Assign MSI-X table entry indexes to all queues. Note that the
3756 * interrupt for the event queue is shared with the first queue group.
3758 ctrl_info->event_queue.int_msg_num = 0;
3759 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3760 ctrl_info->queue_groups[i].int_msg_num = i;
3762 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3763 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3764 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3765 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3766 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3770 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3772 size_t alloc_length;
3773 struct pqi_admin_queues_aligned *admin_queues_aligned;
3774 struct pqi_admin_queues *admin_queues;
3776 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3777 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3779 ctrl_info->admin_queue_memory_base =
3780 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3781 &ctrl_info->admin_queue_memory_base_dma_handle,
3784 if (!ctrl_info->admin_queue_memory_base)
3787 ctrl_info->admin_queue_memory_length = alloc_length;
3789 admin_queues = &ctrl_info->admin_queues;
3790 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3791 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3792 admin_queues->iq_element_array =
3793 &admin_queues_aligned->iq_element_array;
3794 admin_queues->oq_element_array =
3795 &admin_queues_aligned->oq_element_array;
3796 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3797 admin_queues->oq_pi =
3798 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3800 admin_queues->iq_element_array_bus_addr =
3801 ctrl_info->admin_queue_memory_base_dma_handle +
3802 (admin_queues->iq_element_array -
3803 ctrl_info->admin_queue_memory_base);
3804 admin_queues->oq_element_array_bus_addr =
3805 ctrl_info->admin_queue_memory_base_dma_handle +
3806 (admin_queues->oq_element_array -
3807 ctrl_info->admin_queue_memory_base);
3808 admin_queues->iq_ci_bus_addr =
3809 ctrl_info->admin_queue_memory_base_dma_handle +
3810 ((void *)admin_queues->iq_ci -
3811 ctrl_info->admin_queue_memory_base);
3812 admin_queues->oq_pi_bus_addr =
3813 ctrl_info->admin_queue_memory_base_dma_handle +
3814 ((void __iomem *)admin_queues->oq_pi -
3815 (void __iomem *)ctrl_info->admin_queue_memory_base);
3820 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3821 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3823 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3825 struct pqi_device_registers __iomem *pqi_registers;
3826 struct pqi_admin_queues *admin_queues;
3827 unsigned long timeout;
3831 pqi_registers = ctrl_info->pqi_registers;
3832 admin_queues = &ctrl_info->admin_queues;
3834 writeq((u64)admin_queues->iq_element_array_bus_addr,
3835 &pqi_registers->admin_iq_element_array_addr);
3836 writeq((u64)admin_queues->oq_element_array_bus_addr,
3837 &pqi_registers->admin_oq_element_array_addr);
3838 writeq((u64)admin_queues->iq_ci_bus_addr,
3839 &pqi_registers->admin_iq_ci_addr);
3840 writeq((u64)admin_queues->oq_pi_bus_addr,
3841 &pqi_registers->admin_oq_pi_addr);
3843 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3844 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3845 (admin_queues->int_msg_num << 16);
3846 writel(reg, &pqi_registers->admin_iq_num_elements);
3847 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3848 &pqi_registers->function_and_status_code);
3850 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3852 status = readb(&pqi_registers->function_and_status_code);
3853 if (status == PQI_STATUS_IDLE)
3855 if (time_after(jiffies, timeout))
3857 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3861 * The offset registers are not initialized to the correct
3862 * offsets until *after* the create admin queue pair command
3863 * completes successfully.
3865 admin_queues->iq_pi = ctrl_info->iomem_base +
3866 PQI_DEVICE_REGISTERS_OFFSET +
3867 readq(&pqi_registers->admin_iq_pi_offset);
3868 admin_queues->oq_ci = ctrl_info->iomem_base +
3869 PQI_DEVICE_REGISTERS_OFFSET +
3870 readq(&pqi_registers->admin_oq_ci_offset);
3875 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3876 struct pqi_general_admin_request *request)
3878 struct pqi_admin_queues *admin_queues;
3882 admin_queues = &ctrl_info->admin_queues;
3883 iq_pi = admin_queues->iq_pi_copy;
3885 next_element = admin_queues->iq_element_array +
3886 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3888 memcpy(next_element, request, sizeof(*request));
3890 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3891 admin_queues->iq_pi_copy = iq_pi;
3894 * This write notifies the controller that an IU is available to be
3897 writel(iq_pi, admin_queues->iq_pi);
3900 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3902 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3903 struct pqi_general_admin_response *response)
3905 struct pqi_admin_queues *admin_queues;
3908 unsigned long timeout;
3910 admin_queues = &ctrl_info->admin_queues;
3911 oq_ci = admin_queues->oq_ci_copy;
3913 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3916 oq_pi = readl(admin_queues->oq_pi);
3919 if (time_after(jiffies, timeout)) {
3920 dev_err(&ctrl_info->pci_dev->dev,
3921 "timed out waiting for admin response\n");
3924 if (!sis_is_firmware_running(ctrl_info))
3926 usleep_range(1000, 2000);
3929 memcpy(response, admin_queues->oq_element_array +
3930 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3932 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3933 admin_queues->oq_ci_copy = oq_ci;
3934 writel(oq_ci, admin_queues->oq_ci);
3939 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3940 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3941 struct pqi_io_request *io_request)
3943 struct pqi_io_request *next;
3948 unsigned long flags;
3949 unsigned int num_elements_needed;
3950 unsigned int num_elements_to_end_of_queue;
3952 struct pqi_iu_header *request;
3954 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3957 io_request->queue_group = queue_group;
3958 list_add_tail(&io_request->request_list_entry,
3959 &queue_group->request_list[path]);
3962 iq_pi = queue_group->iq_pi_copy[path];
3964 list_for_each_entry_safe(io_request, next,
3965 &queue_group->request_list[path], request_list_entry) {
3967 request = io_request->iu;
3969 iu_length = get_unaligned_le16(&request->iu_length) +
3970 PQI_REQUEST_HEADER_LENGTH;
3971 num_elements_needed =
3972 DIV_ROUND_UP(iu_length,
3973 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3975 iq_ci = readl(queue_group->iq_ci[path]);
3977 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3978 ctrl_info->num_elements_per_iq))
3981 put_unaligned_le16(queue_group->oq_id,
3982 &request->response_queue_id);
3984 next_element = queue_group->iq_element_array[path] +
3985 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3987 num_elements_to_end_of_queue =
3988 ctrl_info->num_elements_per_iq - iq_pi;
3990 if (num_elements_needed <= num_elements_to_end_of_queue) {
3991 memcpy(next_element, request, iu_length);
3993 copy_count = num_elements_to_end_of_queue *
3994 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3995 memcpy(next_element, request, copy_count);
3996 memcpy(queue_group->iq_element_array[path],
3997 (u8 *)request + copy_count,
3998 iu_length - copy_count);
4001 iq_pi = (iq_pi + num_elements_needed) %
4002 ctrl_info->num_elements_per_iq;
4004 list_del(&io_request->request_list_entry);
4007 if (iq_pi != queue_group->iq_pi_copy[path]) {
4008 queue_group->iq_pi_copy[path] = iq_pi;
4010 * This write notifies the controller that one or more IUs are
4011 * available to be processed.
4013 writel(iq_pi, queue_group->iq_pi[path]);
4016 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4019 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4021 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4022 struct completion *wait)
4027 if (wait_for_completion_io_timeout(wait,
4028 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
4033 pqi_check_ctrl_health(ctrl_info);
4034 if (pqi_ctrl_offline(ctrl_info)) {
4043 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4046 struct completion *waiting = context;
4051 static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
4056 switch (error_info->data_out_result) {
4057 case PQI_DATA_IN_OUT_GOOD:
4058 if (error_info->status == SAM_STAT_GOOD)
4061 case PQI_DATA_IN_OUT_UNDERFLOW:
4062 if (error_info->status == SAM_STAT_GOOD ||
4063 error_info->status == SAM_STAT_CHECK_CONDITION)
4066 case PQI_DATA_IN_OUT_ABORTED:
4067 rc = PQI_CMD_STATUS_ABORTED;
4074 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4075 struct pqi_iu_header *request, unsigned int flags,
4076 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4079 struct pqi_io_request *io_request;
4080 unsigned long start_jiffies;
4081 unsigned long msecs_blocked;
4083 DECLARE_COMPLETION_ONSTACK(wait);
4086 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4087 * are mutually exclusive.
4090 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4091 if (down_interruptible(&ctrl_info->sync_request_sem))
4092 return -ERESTARTSYS;
4094 if (timeout_msecs == NO_TIMEOUT) {
4095 down(&ctrl_info->sync_request_sem);
4097 start_jiffies = jiffies;
4098 if (down_timeout(&ctrl_info->sync_request_sem,
4099 msecs_to_jiffies(timeout_msecs)))
4102 jiffies_to_msecs(jiffies - start_jiffies);
4103 if (msecs_blocked >= timeout_msecs) {
4107 timeout_msecs -= msecs_blocked;
4111 pqi_ctrl_busy(ctrl_info);
4112 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4113 if (timeout_msecs == 0) {
4114 pqi_ctrl_unbusy(ctrl_info);
4119 if (pqi_ctrl_offline(ctrl_info)) {
4120 pqi_ctrl_unbusy(ctrl_info);
4125 io_request = pqi_alloc_io_request(ctrl_info);
4127 put_unaligned_le16(io_request->index,
4128 &(((struct pqi_raid_path_request *)request)->request_id));
4130 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4131 ((struct pqi_raid_path_request *)request)->error_index =
4132 ((struct pqi_raid_path_request *)request)->request_id;
4134 iu_length = get_unaligned_le16(&request->iu_length) +
4135 PQI_REQUEST_HEADER_LENGTH;
4136 memcpy(io_request->iu, request, iu_length);
4138 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4139 io_request->context = &wait;
4141 pqi_start_io(ctrl_info,
4142 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4145 pqi_ctrl_unbusy(ctrl_info);
4147 if (timeout_msecs == NO_TIMEOUT) {
4148 pqi_wait_for_completion_io(ctrl_info, &wait);
4150 if (!wait_for_completion_io_timeout(&wait,
4151 msecs_to_jiffies(timeout_msecs))) {
4152 dev_warn(&ctrl_info->pci_dev->dev,
4153 "command timed out\n");
4159 if (io_request->error_info)
4160 memcpy(error_info, io_request->error_info,
4161 sizeof(*error_info));
4163 memset(error_info, 0, sizeof(*error_info));
4164 } else if (rc == 0 && io_request->error_info) {
4165 rc = pqi_process_raid_io_error_synchronous(
4166 io_request->error_info);
4169 pqi_free_io_request(io_request);
4172 up(&ctrl_info->sync_request_sem);
4177 static int pqi_validate_admin_response(
4178 struct pqi_general_admin_response *response, u8 expected_function_code)
4180 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4183 if (get_unaligned_le16(&response->header.iu_length) !=
4184 PQI_GENERAL_ADMIN_IU_LENGTH)
4187 if (response->function_code != expected_function_code)
4190 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4196 static int pqi_submit_admin_request_synchronous(
4197 struct pqi_ctrl_info *ctrl_info,
4198 struct pqi_general_admin_request *request,
4199 struct pqi_general_admin_response *response)
4203 pqi_submit_admin_request(ctrl_info, request);
4205 rc = pqi_poll_for_admin_response(ctrl_info, response);
4208 rc = pqi_validate_admin_response(response,
4209 request->function_code);
4214 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4217 struct pqi_general_admin_request request;
4218 struct pqi_general_admin_response response;
4219 struct pqi_device_capability *capability;
4220 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4222 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4226 memset(&request, 0, sizeof(request));
4228 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4229 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4230 &request.header.iu_length);
4231 request.function_code =
4232 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4233 put_unaligned_le32(sizeof(*capability),
4234 &request.data.report_device_capability.buffer_length);
4236 rc = pqi_map_single(ctrl_info->pci_dev,
4237 &request.data.report_device_capability.sg_descriptor,
4238 capability, sizeof(*capability),
4243 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4246 pqi_pci_unmap(ctrl_info->pci_dev,
4247 &request.data.report_device_capability.sg_descriptor, 1,
4253 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4258 ctrl_info->max_inbound_queues =
4259 get_unaligned_le16(&capability->max_inbound_queues);
4260 ctrl_info->max_elements_per_iq =
4261 get_unaligned_le16(&capability->max_elements_per_iq);
4262 ctrl_info->max_iq_element_length =
4263 get_unaligned_le16(&capability->max_iq_element_length)
4265 ctrl_info->max_outbound_queues =
4266 get_unaligned_le16(&capability->max_outbound_queues);
4267 ctrl_info->max_elements_per_oq =
4268 get_unaligned_le16(&capability->max_elements_per_oq);
4269 ctrl_info->max_oq_element_length =
4270 get_unaligned_le16(&capability->max_oq_element_length)
4273 sop_iu_layer_descriptor =
4274 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4276 ctrl_info->max_inbound_iu_length_per_firmware =
4278 &sop_iu_layer_descriptor->max_inbound_iu_length);
4279 ctrl_info->inbound_spanning_supported =
4280 sop_iu_layer_descriptor->inbound_spanning_supported;
4281 ctrl_info->outbound_spanning_supported =
4282 sop_iu_layer_descriptor->outbound_spanning_supported;
4290 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4292 if (ctrl_info->max_iq_element_length <
4293 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4294 dev_err(&ctrl_info->pci_dev->dev,
4295 "max. inbound queue element length of %d is less than the required length of %d\n",
4296 ctrl_info->max_iq_element_length,
4297 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4301 if (ctrl_info->max_oq_element_length <
4302 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4303 dev_err(&ctrl_info->pci_dev->dev,
4304 "max. outbound queue element length of %d is less than the required length of %d\n",
4305 ctrl_info->max_oq_element_length,
4306 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4310 if (ctrl_info->max_inbound_iu_length_per_firmware <
4311 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4312 dev_err(&ctrl_info->pci_dev->dev,
4313 "max. inbound IU length of %u is less than the min. required length of %d\n",
4314 ctrl_info->max_inbound_iu_length_per_firmware,
4315 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4319 if (!ctrl_info->inbound_spanning_supported) {
4320 dev_err(&ctrl_info->pci_dev->dev,
4321 "the controller does not support inbound spanning\n");
4325 if (ctrl_info->outbound_spanning_supported) {
4326 dev_err(&ctrl_info->pci_dev->dev,
4327 "the controller supports outbound spanning but this driver does not\n");
4334 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4337 struct pqi_event_queue *event_queue;
4338 struct pqi_general_admin_request request;
4339 struct pqi_general_admin_response response;
4341 event_queue = &ctrl_info->event_queue;
4344 * Create OQ (Outbound Queue - device to host queue) to dedicate
4347 memset(&request, 0, sizeof(request));
4348 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4349 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4350 &request.header.iu_length);
4351 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4352 put_unaligned_le16(event_queue->oq_id,
4353 &request.data.create_operational_oq.queue_id);
4354 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4355 &request.data.create_operational_oq.element_array_addr);
4356 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4357 &request.data.create_operational_oq.pi_addr);
4358 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4359 &request.data.create_operational_oq.num_elements);
4360 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4361 &request.data.create_operational_oq.element_length);
4362 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4363 put_unaligned_le16(event_queue->int_msg_num,
4364 &request.data.create_operational_oq.int_msg_num);
4366 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4371 event_queue->oq_ci = ctrl_info->iomem_base +
4372 PQI_DEVICE_REGISTERS_OFFSET +
4374 &response.data.create_operational_oq.oq_ci_offset);
4379 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4380 unsigned int group_number)
4383 struct pqi_queue_group *queue_group;
4384 struct pqi_general_admin_request request;
4385 struct pqi_general_admin_response response;
4387 queue_group = &ctrl_info->queue_groups[group_number];
4390 * Create IQ (Inbound Queue - host to device queue) for
4393 memset(&request, 0, sizeof(request));
4394 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4395 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4396 &request.header.iu_length);
4397 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4398 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4399 &request.data.create_operational_iq.queue_id);
4401 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4402 &request.data.create_operational_iq.element_array_addr);
4403 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4404 &request.data.create_operational_iq.ci_addr);
4405 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4406 &request.data.create_operational_iq.num_elements);
4407 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4408 &request.data.create_operational_iq.element_length);
4409 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4411 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4414 dev_err(&ctrl_info->pci_dev->dev,
4415 "error creating inbound RAID queue\n");
4419 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4420 PQI_DEVICE_REGISTERS_OFFSET +
4422 &response.data.create_operational_iq.iq_pi_offset);
4425 * Create IQ (Inbound Queue - host to device queue) for
4426 * Advanced I/O (AIO) path.
4428 memset(&request, 0, sizeof(request));
4429 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4430 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4431 &request.header.iu_length);
4432 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4433 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4434 &request.data.create_operational_iq.queue_id);
4435 put_unaligned_le64((u64)queue_group->
4436 iq_element_array_bus_addr[AIO_PATH],
4437 &request.data.create_operational_iq.element_array_addr);
4438 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4439 &request.data.create_operational_iq.ci_addr);
4440 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4441 &request.data.create_operational_iq.num_elements);
4442 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4443 &request.data.create_operational_iq.element_length);
4444 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4446 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4449 dev_err(&ctrl_info->pci_dev->dev,
4450 "error creating inbound AIO queue\n");
4454 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4455 PQI_DEVICE_REGISTERS_OFFSET +
4457 &response.data.create_operational_iq.iq_pi_offset);
4460 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4461 * assumed to be for RAID path I/O unless we change the queue's
4464 memset(&request, 0, sizeof(request));
4465 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4466 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4467 &request.header.iu_length);
4468 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4469 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4470 &request.data.change_operational_iq_properties.queue_id);
4471 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4472 &request.data.change_operational_iq_properties.vendor_specific);
4474 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4477 dev_err(&ctrl_info->pci_dev->dev,
4478 "error changing queue property\n");
4483 * Create OQ (Outbound Queue - device to host queue).
4485 memset(&request, 0, sizeof(request));
4486 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4487 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4488 &request.header.iu_length);
4489 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4490 put_unaligned_le16(queue_group->oq_id,
4491 &request.data.create_operational_oq.queue_id);
4492 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4493 &request.data.create_operational_oq.element_array_addr);
4494 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4495 &request.data.create_operational_oq.pi_addr);
4496 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4497 &request.data.create_operational_oq.num_elements);
4498 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4499 &request.data.create_operational_oq.element_length);
4500 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4501 put_unaligned_le16(queue_group->int_msg_num,
4502 &request.data.create_operational_oq.int_msg_num);
4504 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4507 dev_err(&ctrl_info->pci_dev->dev,
4508 "error creating outbound queue\n");
4512 queue_group->oq_ci = ctrl_info->iomem_base +
4513 PQI_DEVICE_REGISTERS_OFFSET +
4515 &response.data.create_operational_oq.oq_ci_offset);
4520 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4525 rc = pqi_create_event_queue(ctrl_info);
4527 dev_err(&ctrl_info->pci_dev->dev,
4528 "error creating event queue\n");
4532 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4533 rc = pqi_create_queue_group(ctrl_info, i);
4535 dev_err(&ctrl_info->pci_dev->dev,
4536 "error creating queue group number %u/%u\n",
4537 i, ctrl_info->num_queue_groups);
4545 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4546 (offsetof(struct pqi_event_config, descriptors) + \
4547 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4549 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4554 struct pqi_event_config *event_config;
4555 struct pqi_event_descriptor *event_descriptor;
4556 struct pqi_general_management_request request;
4558 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4563 memset(&request, 0, sizeof(request));
4565 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4566 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4567 data.report_event_configuration.sg_descriptors[1]) -
4568 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4569 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4570 &request.data.report_event_configuration.buffer_length);
4572 rc = pqi_map_single(ctrl_info->pci_dev,
4573 request.data.report_event_configuration.sg_descriptors,
4574 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4580 0, NULL, NO_TIMEOUT);
4582 pqi_pci_unmap(ctrl_info->pci_dev,
4583 request.data.report_event_configuration.sg_descriptors, 1,
4589 for (i = 0; i < event_config->num_event_descriptors; i++) {
4590 event_descriptor = &event_config->descriptors[i];
4591 if (enable_events &&
4592 pqi_is_supported_event(event_descriptor->event_type))
4593 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4594 &event_descriptor->oq_id);
4596 put_unaligned_le16(0, &event_descriptor->oq_id);
4599 memset(&request, 0, sizeof(request));
4601 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4602 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4603 data.report_event_configuration.sg_descriptors[1]) -
4604 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4605 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4606 &request.data.report_event_configuration.buffer_length);
4608 rc = pqi_map_single(ctrl_info->pci_dev,
4609 request.data.report_event_configuration.sg_descriptors,
4610 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4618 pqi_pci_unmap(ctrl_info->pci_dev,
4619 request.data.report_event_configuration.sg_descriptors, 1,
4623 kfree(event_config);
4628 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4630 return pqi_configure_events(ctrl_info, true);
4633 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4635 return pqi_configure_events(ctrl_info, false);
4638 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4642 size_t sg_chain_buffer_length;
4643 struct pqi_io_request *io_request;
4645 if (!ctrl_info->io_request_pool)
4648 dev = &ctrl_info->pci_dev->dev;
4649 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4650 io_request = ctrl_info->io_request_pool;
4652 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4653 kfree(io_request->iu);
4654 if (!io_request->sg_chain_buffer)
4656 dma_free_coherent(dev, sg_chain_buffer_length,
4657 io_request->sg_chain_buffer,
4658 io_request->sg_chain_buffer_dma_handle);
4662 kfree(ctrl_info->io_request_pool);
4663 ctrl_info->io_request_pool = NULL;
4666 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4668 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4669 ctrl_info->error_buffer_length,
4670 &ctrl_info->error_buffer_dma_handle,
4673 if (!ctrl_info->error_buffer)
4679 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4682 void *sg_chain_buffer;
4683 size_t sg_chain_buffer_length;
4684 dma_addr_t sg_chain_buffer_dma_handle;
4686 struct pqi_io_request *io_request;
4688 ctrl_info->io_request_pool =
4689 kcalloc(ctrl_info->max_io_slots,
4690 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4692 if (!ctrl_info->io_request_pool) {
4693 dev_err(&ctrl_info->pci_dev->dev,
4694 "failed to allocate I/O request pool\n");
4698 dev = &ctrl_info->pci_dev->dev;
4699 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4700 io_request = ctrl_info->io_request_pool;
4702 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4704 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4706 if (!io_request->iu) {
4707 dev_err(&ctrl_info->pci_dev->dev,
4708 "failed to allocate IU buffers\n");
4712 sg_chain_buffer = dma_alloc_coherent(dev,
4713 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4716 if (!sg_chain_buffer) {
4717 dev_err(&ctrl_info->pci_dev->dev,
4718 "failed to allocate PQI scatter-gather chain buffers\n");
4722 io_request->index = i;
4723 io_request->sg_chain_buffer = sg_chain_buffer;
4724 io_request->sg_chain_buffer_dma_handle =
4725 sg_chain_buffer_dma_handle;
4732 pqi_free_all_io_requests(ctrl_info);
4738 * Calculate required resources that are sized based on max. outstanding
4739 * requests and max. transfer size.
4742 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4744 u32 max_transfer_size;
4747 ctrl_info->scsi_ml_can_queue =
4748 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4749 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4751 ctrl_info->error_buffer_length =
4752 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4755 max_transfer_size = min(ctrl_info->max_transfer_size,
4756 PQI_MAX_TRANSFER_SIZE_KDUMP);
4758 max_transfer_size = min(ctrl_info->max_transfer_size,
4759 PQI_MAX_TRANSFER_SIZE);
4761 max_sg_entries = max_transfer_size / PAGE_SIZE;
4763 /* +1 to cover when the buffer is not page-aligned. */
4766 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4768 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4770 ctrl_info->sg_chain_buffer_length =
4771 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4772 PQI_EXTRA_SGL_MEMORY;
4773 ctrl_info->sg_tablesize = max_sg_entries;
4774 ctrl_info->max_sectors = max_transfer_size / 512;
4777 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4779 int num_queue_groups;
4780 u16 num_elements_per_iq;
4781 u16 num_elements_per_oq;
4783 if (reset_devices) {
4784 num_queue_groups = 1;
4787 int max_queue_groups;
4789 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4790 ctrl_info->max_outbound_queues - 1);
4791 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4793 num_cpus = num_online_cpus();
4794 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4795 num_queue_groups = min(num_queue_groups, max_queue_groups);
4798 ctrl_info->num_queue_groups = num_queue_groups;
4799 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4802 * Make sure that the max. inbound IU length is an even multiple
4803 * of our inbound element length.
4805 ctrl_info->max_inbound_iu_length =
4806 (ctrl_info->max_inbound_iu_length_per_firmware /
4807 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4808 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4810 num_elements_per_iq =
4811 (ctrl_info->max_inbound_iu_length /
4812 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4814 /* Add one because one element in each queue is unusable. */
4815 num_elements_per_iq++;
4817 num_elements_per_iq = min(num_elements_per_iq,
4818 ctrl_info->max_elements_per_iq);
4820 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4821 num_elements_per_oq = min(num_elements_per_oq,
4822 ctrl_info->max_elements_per_oq);
4824 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4825 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4827 ctrl_info->max_sg_per_iu =
4828 ((ctrl_info->max_inbound_iu_length -
4829 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4830 sizeof(struct pqi_sg_descriptor)) +
4831 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4834 static inline void pqi_set_sg_descriptor(
4835 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4837 u64 address = (u64)sg_dma_address(sg);
4838 unsigned int length = sg_dma_len(sg);
4840 put_unaligned_le64(address, &sg_descriptor->address);
4841 put_unaligned_le32(length, &sg_descriptor->length);
4842 put_unaligned_le32(0, &sg_descriptor->flags);
4845 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4846 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4847 struct pqi_io_request *io_request)
4853 unsigned int num_sg_in_iu;
4854 unsigned int max_sg_per_iu;
4855 struct scatterlist *sg;
4856 struct pqi_sg_descriptor *sg_descriptor;
4858 sg_count = scsi_dma_map(scmd);
4862 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4863 PQI_REQUEST_HEADER_LENGTH;
4868 sg = scsi_sglist(scmd);
4869 sg_descriptor = request->sg_descriptors;
4870 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4876 pqi_set_sg_descriptor(sg_descriptor, sg);
4883 if (i == max_sg_per_iu) {
4885 (u64)io_request->sg_chain_buffer_dma_handle,
4886 &sg_descriptor->address);
4887 put_unaligned_le32((sg_count - num_sg_in_iu)
4888 * sizeof(*sg_descriptor),
4889 &sg_descriptor->length);
4890 put_unaligned_le32(CISS_SG_CHAIN,
4891 &sg_descriptor->flags);
4894 sg_descriptor = io_request->sg_chain_buffer;
4899 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4900 request->partial = chained;
4901 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4904 put_unaligned_le16(iu_length, &request->header.iu_length);
4909 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4910 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4911 struct pqi_io_request *io_request)
4917 unsigned int num_sg_in_iu;
4918 unsigned int max_sg_per_iu;
4919 struct scatterlist *sg;
4920 struct pqi_sg_descriptor *sg_descriptor;
4922 sg_count = scsi_dma_map(scmd);
4926 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4927 PQI_REQUEST_HEADER_LENGTH;
4933 sg = scsi_sglist(scmd);
4934 sg_descriptor = request->sg_descriptors;
4935 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4940 pqi_set_sg_descriptor(sg_descriptor, sg);
4947 if (i == max_sg_per_iu) {
4949 (u64)io_request->sg_chain_buffer_dma_handle,
4950 &sg_descriptor->address);
4951 put_unaligned_le32((sg_count - num_sg_in_iu)
4952 * sizeof(*sg_descriptor),
4953 &sg_descriptor->length);
4954 put_unaligned_le32(CISS_SG_CHAIN,
4955 &sg_descriptor->flags);
4958 sg_descriptor = io_request->sg_chain_buffer;
4963 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4964 request->partial = chained;
4965 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4968 put_unaligned_le16(iu_length, &request->header.iu_length);
4969 request->num_sg_descriptors = num_sg_in_iu;
4974 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4977 struct scsi_cmnd *scmd;
4979 scmd = io_request->scmd;
4980 pqi_free_io_request(io_request);
4981 scsi_dma_unmap(scmd);
4982 pqi_scsi_done(scmd);
4985 static int pqi_raid_submit_scsi_cmd_with_io_request(
4986 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4987 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4988 struct pqi_queue_group *queue_group)
4992 struct pqi_raid_path_request *request;
4994 io_request->io_complete_callback = pqi_raid_io_complete;
4995 io_request->scmd = scmd;
4997 request = io_request->iu;
4999 offsetof(struct pqi_raid_path_request, sg_descriptors));
5001 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5002 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5003 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5004 put_unaligned_le16(io_request->index, &request->request_id);
5005 request->error_index = request->request_id;
5006 memcpy(request->lun_number, device->scsi3addr,
5007 sizeof(request->lun_number));
5009 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5010 memcpy(request->cdb, scmd->cmnd, cdb_length);
5012 switch (cdb_length) {
5017 /* No bytes in the Additional CDB bytes field */
5018 request->additional_cdb_bytes_usage =
5019 SOP_ADDITIONAL_CDB_BYTES_0;
5022 /* 4 bytes in the Additional cdb field */
5023 request->additional_cdb_bytes_usage =
5024 SOP_ADDITIONAL_CDB_BYTES_4;
5027 /* 8 bytes in the Additional cdb field */
5028 request->additional_cdb_bytes_usage =
5029 SOP_ADDITIONAL_CDB_BYTES_8;
5032 /* 12 bytes in the Additional cdb field */
5033 request->additional_cdb_bytes_usage =
5034 SOP_ADDITIONAL_CDB_BYTES_12;
5038 /* 16 bytes in the Additional cdb field */
5039 request->additional_cdb_bytes_usage =
5040 SOP_ADDITIONAL_CDB_BYTES_16;
5044 switch (scmd->sc_data_direction) {
5046 request->data_direction = SOP_READ_FLAG;
5048 case DMA_FROM_DEVICE:
5049 request->data_direction = SOP_WRITE_FLAG;
5052 request->data_direction = SOP_NO_DIRECTION_FLAG;
5054 case DMA_BIDIRECTIONAL:
5055 request->data_direction = SOP_BIDIRECTIONAL;
5058 dev_err(&ctrl_info->pci_dev->dev,
5059 "unknown data direction: %d\n",
5060 scmd->sc_data_direction);
5064 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5066 pqi_free_io_request(io_request);
5067 return SCSI_MLQUEUE_HOST_BUSY;
5070 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5075 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5076 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5077 struct pqi_queue_group *queue_group)
5079 struct pqi_io_request *io_request;
5081 io_request = pqi_alloc_io_request(ctrl_info);
5083 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5084 device, scmd, queue_group);
5087 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5089 if (!pqi_ctrl_blocked(ctrl_info))
5090 schedule_work(&ctrl_info->raid_bypass_retry_work);
5093 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5095 struct scsi_cmnd *scmd;
5096 struct pqi_scsi_dev *device;
5097 struct pqi_ctrl_info *ctrl_info;
5099 if (!io_request->raid_bypass)
5102 scmd = io_request->scmd;
5103 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5105 if (host_byte(scmd->result) == DID_NO_CONNECT)
5108 device = scmd->device->hostdata;
5109 if (pqi_device_offline(device))
5112 ctrl_info = shost_to_hba(scmd->device->host);
5113 if (pqi_ctrl_offline(ctrl_info))
5119 static inline void pqi_add_to_raid_bypass_retry_list(
5120 struct pqi_ctrl_info *ctrl_info,
5121 struct pqi_io_request *io_request, bool at_head)
5123 unsigned long flags;
5125 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5127 list_add(&io_request->request_list_entry,
5128 &ctrl_info->raid_bypass_retry_list);
5130 list_add_tail(&io_request->request_list_entry,
5131 &ctrl_info->raid_bypass_retry_list);
5132 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5135 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5138 struct scsi_cmnd *scmd;
5140 scmd = io_request->scmd;
5141 pqi_free_io_request(io_request);
5142 pqi_scsi_done(scmd);
5145 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5147 struct scsi_cmnd *scmd;
5148 struct pqi_ctrl_info *ctrl_info;
5150 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5151 scmd = io_request->scmd;
5153 ctrl_info = shost_to_hba(scmd->device->host);
5155 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5156 pqi_schedule_bypass_retry(ctrl_info);
5159 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5161 struct scsi_cmnd *scmd;
5162 struct pqi_scsi_dev *device;
5163 struct pqi_ctrl_info *ctrl_info;
5164 struct pqi_queue_group *queue_group;
5166 scmd = io_request->scmd;
5167 device = scmd->device->hostdata;
5168 if (pqi_device_in_reset(device)) {
5169 pqi_free_io_request(io_request);
5170 set_host_byte(scmd, DID_RESET);
5171 pqi_scsi_done(scmd);
5175 ctrl_info = shost_to_hba(scmd->device->host);
5176 queue_group = io_request->queue_group;
5178 pqi_reinit_io_request(io_request);
5180 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5181 device, scmd, queue_group);
5184 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5185 struct pqi_ctrl_info *ctrl_info)
5187 unsigned long flags;
5188 struct pqi_io_request *io_request;
5190 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5191 io_request = list_first_entry_or_null(
5192 &ctrl_info->raid_bypass_retry_list,
5193 struct pqi_io_request, request_list_entry);
5195 list_del(&io_request->request_list_entry);
5196 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5201 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5204 struct pqi_io_request *io_request;
5206 pqi_ctrl_busy(ctrl_info);
5209 if (pqi_ctrl_blocked(ctrl_info))
5211 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5214 rc = pqi_retry_raid_bypass(io_request);
5216 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5218 pqi_schedule_bypass_retry(ctrl_info);
5223 pqi_ctrl_unbusy(ctrl_info);
5226 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5228 struct pqi_ctrl_info *ctrl_info;
5230 ctrl_info = container_of(work, struct pqi_ctrl_info,
5231 raid_bypass_retry_work);
5232 pqi_retry_raid_bypass_requests(ctrl_info);
5235 static void pqi_clear_all_queued_raid_bypass_retries(
5236 struct pqi_ctrl_info *ctrl_info)
5238 unsigned long flags;
5240 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5241 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5242 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5245 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5248 struct scsi_cmnd *scmd;
5250 scmd = io_request->scmd;
5251 scsi_dma_unmap(scmd);
5252 if (io_request->status == -EAGAIN)
5253 set_host_byte(scmd, DID_IMM_RETRY);
5254 else if (pqi_raid_bypass_retry_needed(io_request)) {
5255 pqi_queue_raid_bypass_retry(io_request);
5258 pqi_free_io_request(io_request);
5259 pqi_scsi_done(scmd);
5262 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5263 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5264 struct pqi_queue_group *queue_group)
5266 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5267 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5270 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5271 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5272 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5273 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5276 struct pqi_io_request *io_request;
5277 struct pqi_aio_path_request *request;
5279 io_request = pqi_alloc_io_request(ctrl_info);
5280 io_request->io_complete_callback = pqi_aio_io_complete;
5281 io_request->scmd = scmd;
5282 io_request->raid_bypass = raid_bypass;
5284 request = io_request->iu;
5286 offsetof(struct pqi_raid_path_request, sg_descriptors));
5288 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5289 put_unaligned_le32(aio_handle, &request->nexus_id);
5290 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5291 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5292 put_unaligned_le16(io_request->index, &request->request_id);
5293 request->error_index = request->request_id;
5294 if (cdb_length > sizeof(request->cdb))
5295 cdb_length = sizeof(request->cdb);
5296 request->cdb_length = cdb_length;
5297 memcpy(request->cdb, cdb, cdb_length);
5299 switch (scmd->sc_data_direction) {
5301 request->data_direction = SOP_READ_FLAG;
5303 case DMA_FROM_DEVICE:
5304 request->data_direction = SOP_WRITE_FLAG;
5307 request->data_direction = SOP_NO_DIRECTION_FLAG;
5309 case DMA_BIDIRECTIONAL:
5310 request->data_direction = SOP_BIDIRECTIONAL;
5313 dev_err(&ctrl_info->pci_dev->dev,
5314 "unknown data direction: %d\n",
5315 scmd->sc_data_direction);
5319 if (encryption_info) {
5320 request->encryption_enable = true;
5321 put_unaligned_le16(encryption_info->data_encryption_key_index,
5322 &request->data_encryption_key_index);
5323 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5324 &request->encrypt_tweak_lower);
5325 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5326 &request->encrypt_tweak_upper);
5329 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5331 pqi_free_io_request(io_request);
5332 return SCSI_MLQUEUE_HOST_BUSY;
5335 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5340 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5341 struct scsi_cmnd *scmd)
5345 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5346 if (hw_queue > ctrl_info->max_hw_queue_index)
5353 * This function gets called just before we hand the completed SCSI request
5357 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5359 struct pqi_scsi_dev *device;
5361 if (!scmd->device) {
5362 set_host_byte(scmd, DID_NO_CONNECT);
5366 device = scmd->device->hostdata;
5368 set_host_byte(scmd, DID_NO_CONNECT);
5372 atomic_dec(&device->scsi_cmds_outstanding);
5375 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5376 struct scsi_cmnd *scmd)
5379 struct pqi_ctrl_info *ctrl_info;
5380 struct pqi_scsi_dev *device;
5382 struct pqi_queue_group *queue_group;
5385 device = scmd->device->hostdata;
5386 ctrl_info = shost_to_hba(shost);
5389 set_host_byte(scmd, DID_NO_CONNECT);
5390 pqi_scsi_done(scmd);
5394 atomic_inc(&device->scsi_cmds_outstanding);
5396 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5398 set_host_byte(scmd, DID_NO_CONNECT);
5399 pqi_scsi_done(scmd);
5403 pqi_ctrl_busy(ctrl_info);
5404 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5405 pqi_ctrl_in_ofa(ctrl_info)) {
5406 rc = SCSI_MLQUEUE_HOST_BUSY;
5411 * This is necessary because the SML doesn't zero out this field during
5416 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5417 queue_group = &ctrl_info->queue_groups[hw_queue];
5419 if (pqi_is_logical_device(device)) {
5420 raid_bypassed = false;
5421 if (device->raid_bypass_enabled &&
5422 !blk_rq_is_passthrough(scmd->request)) {
5423 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5425 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5426 raid_bypassed = true;
5429 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5432 if (device->aio_enabled)
5433 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5436 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5441 pqi_ctrl_unbusy(ctrl_info);
5443 atomic_dec(&device->scsi_cmds_outstanding);
5448 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5449 struct pqi_queue_group *queue_group)
5452 unsigned long flags;
5455 for (path = 0; path < 2; path++) {
5458 &queue_group->submit_lock[path], flags);
5460 list_empty(&queue_group->request_list[path]);
5461 spin_unlock_irqrestore(
5462 &queue_group->submit_lock[path], flags);
5465 pqi_check_ctrl_health(ctrl_info);
5466 if (pqi_ctrl_offline(ctrl_info))
5468 usleep_range(1000, 2000);
5475 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5480 struct pqi_queue_group *queue_group;
5484 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5485 queue_group = &ctrl_info->queue_groups[i];
5487 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5491 for (path = 0; path < 2; path++) {
5492 iq_pi = queue_group->iq_pi_copy[path];
5495 iq_ci = readl(queue_group->iq_ci[path]);
5498 pqi_check_ctrl_health(ctrl_info);
5499 if (pqi_ctrl_offline(ctrl_info))
5501 usleep_range(1000, 2000);
5509 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5510 struct pqi_scsi_dev *device)
5514 struct pqi_queue_group *queue_group;
5515 unsigned long flags;
5516 struct pqi_io_request *io_request;
5517 struct pqi_io_request *next;
5518 struct scsi_cmnd *scmd;
5519 struct pqi_scsi_dev *scsi_device;
5521 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5522 queue_group = &ctrl_info->queue_groups[i];
5524 for (path = 0; path < 2; path++) {
5526 &queue_group->submit_lock[path], flags);
5528 list_for_each_entry_safe(io_request, next,
5529 &queue_group->request_list[path],
5530 request_list_entry) {
5531 scmd = io_request->scmd;
5535 scsi_device = scmd->device->hostdata;
5536 if (scsi_device != device)
5539 list_del(&io_request->request_list_entry);
5540 set_host_byte(scmd, DID_RESET);
5541 pqi_scsi_done(scmd);
5544 spin_unlock_irqrestore(
5545 &queue_group->submit_lock[path], flags);
5550 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5554 struct pqi_queue_group *queue_group;
5555 unsigned long flags;
5556 struct pqi_io_request *io_request;
5557 struct pqi_io_request *next;
5558 struct scsi_cmnd *scmd;
5560 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5561 queue_group = &ctrl_info->queue_groups[i];
5563 for (path = 0; path < 2; path++) {
5564 spin_lock_irqsave(&queue_group->submit_lock[path],
5567 list_for_each_entry_safe(io_request, next,
5568 &queue_group->request_list[path],
5569 request_list_entry) {
5571 scmd = io_request->scmd;
5575 list_del(&io_request->request_list_entry);
5576 set_host_byte(scmd, DID_RESET);
5577 pqi_scsi_done(scmd);
5580 spin_unlock_irqrestore(
5581 &queue_group->submit_lock[path], flags);
5586 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5587 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5589 unsigned long timeout;
5591 timeout = (timeout_secs * PQI_HZ) + jiffies;
5593 while (atomic_read(&device->scsi_cmds_outstanding)) {
5594 pqi_check_ctrl_health(ctrl_info);
5595 if (pqi_ctrl_offline(ctrl_info))
5597 if (timeout_secs != NO_TIMEOUT) {
5598 if (time_after(jiffies, timeout)) {
5599 dev_err(&ctrl_info->pci_dev->dev,
5600 "timed out waiting for pending IO\n");
5604 usleep_range(1000, 2000);
5610 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5611 unsigned long timeout_secs)
5614 unsigned long flags;
5615 unsigned long timeout;
5616 struct pqi_scsi_dev *device;
5618 timeout = (timeout_secs * PQI_HZ) + jiffies;
5622 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5623 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5624 scsi_device_list_entry) {
5625 if (atomic_read(&device->scsi_cmds_outstanding)) {
5630 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5636 pqi_check_ctrl_health(ctrl_info);
5637 if (pqi_ctrl_offline(ctrl_info))
5640 if (timeout_secs != NO_TIMEOUT) {
5641 if (time_after(jiffies, timeout)) {
5642 dev_err(&ctrl_info->pci_dev->dev,
5643 "timed out waiting for pending IO\n");
5647 usleep_range(1000, 2000);
5653 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5656 struct completion *waiting = context;
5661 #define PQI_LUN_RESET_TIMEOUT_SECS 10
5663 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5664 struct pqi_scsi_dev *device, struct completion *wait)
5669 if (wait_for_completion_io_timeout(wait,
5670 PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
5675 pqi_check_ctrl_health(ctrl_info);
5676 if (pqi_ctrl_offline(ctrl_info)) {
5685 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5686 struct pqi_scsi_dev *device)
5689 struct pqi_io_request *io_request;
5690 DECLARE_COMPLETION_ONSTACK(wait);
5691 struct pqi_task_management_request *request;
5693 io_request = pqi_alloc_io_request(ctrl_info);
5694 io_request->io_complete_callback = pqi_lun_reset_complete;
5695 io_request->context = &wait;
5697 request = io_request->iu;
5698 memset(request, 0, sizeof(*request));
5700 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5701 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5702 &request->header.iu_length);
5703 put_unaligned_le16(io_request->index, &request->request_id);
5704 memcpy(request->lun_number, device->scsi3addr,
5705 sizeof(request->lun_number));
5706 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5708 pqi_start_io(ctrl_info,
5709 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5712 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5714 rc = io_request->status;
5716 pqi_free_io_request(io_request);
5721 /* Performs a reset at the LUN level. */
5723 #define PQI_LUN_RESET_RETRIES 3
5724 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5725 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5727 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5728 struct pqi_scsi_dev *device)
5731 unsigned int retries;
5732 unsigned long timeout_secs;
5734 for (retries = 0;;) {
5735 rc = pqi_lun_reset(ctrl_info, device);
5736 if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
5738 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5741 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5743 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5745 return rc == 0 ? SUCCESS : FAILED;
5748 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5749 struct pqi_scsi_dev *device)
5753 mutex_lock(&ctrl_info->lun_reset_mutex);
5755 pqi_ctrl_block_requests(ctrl_info);
5756 pqi_ctrl_wait_until_quiesced(ctrl_info);
5757 pqi_fail_io_queued_for_device(ctrl_info, device);
5758 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5759 pqi_device_reset_start(device);
5760 pqi_ctrl_unblock_requests(ctrl_info);
5765 rc = _pqi_device_reset(ctrl_info, device);
5767 pqi_device_reset_done(device);
5769 mutex_unlock(&ctrl_info->lun_reset_mutex);
5774 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5777 struct Scsi_Host *shost;
5778 struct pqi_ctrl_info *ctrl_info;
5779 struct pqi_scsi_dev *device;
5781 shost = scmd->device->host;
5782 ctrl_info = shost_to_hba(shost);
5783 device = scmd->device->hostdata;
5785 dev_err(&ctrl_info->pci_dev->dev,
5786 "resetting scsi %d:%d:%d:%d\n",
5787 shost->host_no, device->bus, device->target, device->lun);
5789 pqi_check_ctrl_health(ctrl_info);
5790 if (pqi_ctrl_offline(ctrl_info)) {
5791 dev_err(&ctrl_info->pci_dev->dev,
5792 "controller %u offlined - cannot send device reset\n",
5793 ctrl_info->ctrl_id);
5798 pqi_wait_until_ofa_finished(ctrl_info);
5800 rc = pqi_device_reset(ctrl_info, device);
5803 dev_err(&ctrl_info->pci_dev->dev,
5804 "reset of scsi %d:%d:%d:%d: %s\n",
5805 shost->host_no, device->bus, device->target, device->lun,
5806 rc == SUCCESS ? "SUCCESS" : "FAILED");
5811 static int pqi_slave_alloc(struct scsi_device *sdev)
5813 struct pqi_scsi_dev *device;
5814 unsigned long flags;
5815 struct pqi_ctrl_info *ctrl_info;
5816 struct scsi_target *starget;
5817 struct sas_rphy *rphy;
5819 ctrl_info = shost_to_hba(sdev->host);
5821 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5823 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5824 starget = scsi_target(sdev);
5825 rphy = target_to_rphy(starget);
5826 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5828 device->target = sdev_id(sdev);
5829 device->lun = sdev->lun;
5830 device->target_lun_valid = true;
5833 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5834 sdev_id(sdev), sdev->lun);
5838 sdev->hostdata = device;
5839 device->sdev = sdev;
5840 if (device->queue_depth) {
5841 device->advertised_queue_depth = device->queue_depth;
5842 scsi_change_queue_depth(sdev,
5843 device->advertised_queue_depth);
5845 if (pqi_is_logical_device(device))
5846 pqi_disable_write_same(sdev);
5848 sdev->allow_restart = 1;
5851 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5856 static int pqi_map_queues(struct Scsi_Host *shost)
5858 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5860 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5861 ctrl_info->pci_dev, 0);
5864 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5867 struct pci_dev *pci_dev;
5868 u32 subsystem_vendor;
5869 u32 subsystem_device;
5870 cciss_pci_info_struct pciinfo;
5875 pci_dev = ctrl_info->pci_dev;
5877 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5878 pciinfo.bus = pci_dev->bus->number;
5879 pciinfo.dev_fn = pci_dev->devfn;
5880 subsystem_vendor = pci_dev->subsystem_vendor;
5881 subsystem_device = pci_dev->subsystem_device;
5882 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5885 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5891 static int pqi_getdrivver_ioctl(void __user *arg)
5898 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5899 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5901 if (copy_to_user(arg, &version, sizeof(version)))
5907 struct ciss_error_info {
5910 size_t sense_data_length;
5913 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5914 struct ciss_error_info *ciss_error_info)
5916 int ciss_cmd_status;
5917 size_t sense_data_length;
5919 switch (pqi_error_info->data_out_result) {
5920 case PQI_DATA_IN_OUT_GOOD:
5921 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5923 case PQI_DATA_IN_OUT_UNDERFLOW:
5924 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5926 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5927 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5929 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5930 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5931 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5932 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5933 case PQI_DATA_IN_OUT_ERROR:
5934 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5936 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5937 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5938 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5939 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5940 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5941 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5942 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5943 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5944 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5945 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5946 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5948 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5949 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5951 case PQI_DATA_IN_OUT_ABORTED:
5952 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5954 case PQI_DATA_IN_OUT_TIMEOUT:
5955 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5958 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5963 get_unaligned_le16(&pqi_error_info->sense_data_length);
5964 if (sense_data_length == 0)
5966 get_unaligned_le16(&pqi_error_info->response_data_length);
5967 if (sense_data_length)
5968 if (sense_data_length > sizeof(pqi_error_info->data))
5969 sense_data_length = sizeof(pqi_error_info->data);
5971 ciss_error_info->scsi_status = pqi_error_info->status;
5972 ciss_error_info->command_status = ciss_cmd_status;
5973 ciss_error_info->sense_data_length = sense_data_length;
5976 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5979 char *kernel_buffer = NULL;
5981 size_t sense_data_length;
5982 IOCTL_Command_struct iocommand;
5983 struct pqi_raid_path_request request;
5984 struct pqi_raid_error_info pqi_error_info;
5985 struct ciss_error_info ciss_error_info;
5987 if (pqi_ctrl_offline(ctrl_info))
5991 if (!capable(CAP_SYS_RAWIO))
5993 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5995 if (iocommand.buf_size < 1 &&
5996 iocommand.Request.Type.Direction != XFER_NONE)
5998 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6000 if (iocommand.Request.Type.Type != TYPE_CMD)
6003 switch (iocommand.Request.Type.Direction) {
6007 case XFER_READ | XFER_WRITE:
6013 if (iocommand.buf_size > 0) {
6014 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6017 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6018 if (copy_from_user(kernel_buffer, iocommand.buf,
6019 iocommand.buf_size)) {
6024 memset(kernel_buffer, 0, iocommand.buf_size);
6028 memset(&request, 0, sizeof(request));
6030 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6031 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6032 PQI_REQUEST_HEADER_LENGTH;
6033 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6034 sizeof(request.lun_number));
6035 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6036 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6038 switch (iocommand.Request.Type.Direction) {
6040 request.data_direction = SOP_NO_DIRECTION_FLAG;
6043 request.data_direction = SOP_WRITE_FLAG;
6046 request.data_direction = SOP_READ_FLAG;
6048 case XFER_READ | XFER_WRITE:
6049 request.data_direction = SOP_BIDIRECTIONAL;
6053 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6055 if (iocommand.buf_size > 0) {
6056 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6058 rc = pqi_map_single(ctrl_info->pci_dev,
6059 &request.sg_descriptors[0], kernel_buffer,
6060 iocommand.buf_size, DMA_BIDIRECTIONAL);
6064 iu_length += sizeof(request.sg_descriptors[0]);
6067 put_unaligned_le16(iu_length, &request.header.iu_length);
6069 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6070 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6072 if (iocommand.buf_size > 0)
6073 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6076 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6079 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6080 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6081 iocommand.error_info.CommandStatus =
6082 ciss_error_info.command_status;
6083 sense_data_length = ciss_error_info.sense_data_length;
6084 if (sense_data_length) {
6085 if (sense_data_length >
6086 sizeof(iocommand.error_info.SenseInfo))
6088 sizeof(iocommand.error_info.SenseInfo);
6089 memcpy(iocommand.error_info.SenseInfo,
6090 pqi_error_info.data, sense_data_length);
6091 iocommand.error_info.SenseLen = sense_data_length;
6095 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6100 if (rc == 0 && iocommand.buf_size > 0 &&
6101 (iocommand.Request.Type.Direction & XFER_READ)) {
6102 if (copy_to_user(iocommand.buf, kernel_buffer,
6103 iocommand.buf_size)) {
6109 kfree(kernel_buffer);
6114 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6118 struct pqi_ctrl_info *ctrl_info;
6120 ctrl_info = shost_to_hba(sdev->host);
6122 if (pqi_ctrl_in_ofa(ctrl_info))
6126 case CCISS_DEREGDISK:
6127 case CCISS_REGNEWDISK:
6129 rc = pqi_scan_scsi_devices(ctrl_info);
6131 case CCISS_GETPCIINFO:
6132 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6134 case CCISS_GETDRIVVER:
6135 rc = pqi_getdrivver_ioctl(arg);
6137 case CCISS_PASSTHRU:
6138 rc = pqi_passthru_ioctl(ctrl_info, arg);
6148 static ssize_t pqi_firmware_version_show(struct device *dev,
6149 struct device_attribute *attr, char *buffer)
6151 struct Scsi_Host *shost;
6152 struct pqi_ctrl_info *ctrl_info;
6154 shost = class_to_shost(dev);
6155 ctrl_info = shost_to_hba(shost);
6157 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6160 static ssize_t pqi_driver_version_show(struct device *dev,
6161 struct device_attribute *attr, char *buffer)
6163 struct Scsi_Host *shost;
6164 struct pqi_ctrl_info *ctrl_info;
6166 shost = class_to_shost(dev);
6167 ctrl_info = shost_to_hba(shost);
6169 return snprintf(buffer, PAGE_SIZE,
6170 "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6173 static ssize_t pqi_serial_number_show(struct device *dev,
6174 struct device_attribute *attr, char *buffer)
6176 struct Scsi_Host *shost;
6177 struct pqi_ctrl_info *ctrl_info;
6179 shost = class_to_shost(dev);
6180 ctrl_info = shost_to_hba(shost);
6182 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6185 static ssize_t pqi_model_show(struct device *dev,
6186 struct device_attribute *attr, char *buffer)
6188 struct Scsi_Host *shost;
6189 struct pqi_ctrl_info *ctrl_info;
6191 shost = class_to_shost(dev);
6192 ctrl_info = shost_to_hba(shost);
6194 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6197 static ssize_t pqi_vendor_show(struct device *dev,
6198 struct device_attribute *attr, char *buffer)
6200 struct Scsi_Host *shost;
6201 struct pqi_ctrl_info *ctrl_info;
6203 shost = class_to_shost(dev);
6204 ctrl_info = shost_to_hba(shost);
6206 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6209 static ssize_t pqi_host_rescan_store(struct device *dev,
6210 struct device_attribute *attr, const char *buffer, size_t count)
6212 struct Scsi_Host *shost = class_to_shost(dev);
6214 pqi_scan_start(shost);
6219 static ssize_t pqi_lockup_action_show(struct device *dev,
6220 struct device_attribute *attr, char *buffer)
6225 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6226 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6227 count += snprintf(buffer + count, PAGE_SIZE - count,
6228 "[%s] ", pqi_lockup_actions[i].name);
6230 count += snprintf(buffer + count, PAGE_SIZE - count,
6231 "%s ", pqi_lockup_actions[i].name);
6234 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
6239 static ssize_t pqi_lockup_action_store(struct device *dev,
6240 struct device_attribute *attr, const char *buffer, size_t count)
6244 char action_name_buffer[32];
6246 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6247 action_name = strstrip(action_name_buffer);
6249 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6250 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6251 pqi_lockup_action = pqi_lockup_actions[i].action;
6259 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6260 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6261 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6262 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6263 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6264 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6265 static DEVICE_ATTR(lockup_action, 0644,
6266 pqi_lockup_action_show, pqi_lockup_action_store);
6268 static struct device_attribute *pqi_shost_attrs[] = {
6269 &dev_attr_driver_version,
6270 &dev_attr_firmware_version,
6272 &dev_attr_serial_number,
6275 &dev_attr_lockup_action,
6279 static ssize_t pqi_unique_id_show(struct device *dev,
6280 struct device_attribute *attr, char *buffer)
6282 struct pqi_ctrl_info *ctrl_info;
6283 struct scsi_device *sdev;
6284 struct pqi_scsi_dev *device;
6285 unsigned long flags;
6286 unsigned char uid[16];
6288 sdev = to_scsi_device(dev);
6289 ctrl_info = shost_to_hba(sdev->host);
6291 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6293 device = sdev->hostdata;
6295 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6299 memcpy(uid, device->unique_id, sizeof(uid));
6301 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6303 return snprintf(buffer, PAGE_SIZE,
6304 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6305 uid[0], uid[1], uid[2], uid[3],
6306 uid[4], uid[5], uid[6], uid[7],
6307 uid[8], uid[9], uid[10], uid[11],
6308 uid[12], uid[13], uid[14], uid[15]);
6311 static ssize_t pqi_lunid_show(struct device *dev,
6312 struct device_attribute *attr, char *buffer)
6314 struct pqi_ctrl_info *ctrl_info;
6315 struct scsi_device *sdev;
6316 struct pqi_scsi_dev *device;
6317 unsigned long flags;
6320 sdev = to_scsi_device(dev);
6321 ctrl_info = shost_to_hba(sdev->host);
6323 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6325 device = sdev->hostdata;
6327 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6331 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6333 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6335 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6339 static ssize_t pqi_path_info_show(struct device *dev,
6340 struct device_attribute *attr, char *buf)
6342 struct pqi_ctrl_info *ctrl_info;
6343 struct scsi_device *sdev;
6344 struct pqi_scsi_dev *device;
6345 unsigned long flags;
6350 u8 path_map_index = 0;
6352 unsigned char phys_connector[2];
6354 sdev = to_scsi_device(dev);
6355 ctrl_info = shost_to_hba(sdev->host);
6357 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6359 device = sdev->hostdata;
6361 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6367 for (i = 0; i < MAX_PATHS; i++) {
6368 path_map_index = 1<<i;
6369 if (i == device->active_path_index)
6371 else if (device->path_map & path_map_index)
6372 active = "Inactive";
6376 output_len += scnprintf(buf + output_len,
6377 PAGE_SIZE - output_len,
6378 "[%d:%d:%d:%d] %20.20s ",
6379 ctrl_info->scsi_host->host_no,
6380 device->bus, device->target,
6382 scsi_device_type(device->devtype));
6384 if (device->devtype == TYPE_RAID ||
6385 pqi_is_logical_device(device))
6388 memcpy(&phys_connector, &device->phys_connector[i],
6389 sizeof(phys_connector));
6390 if (phys_connector[0] < '0')
6391 phys_connector[0] = '0';
6392 if (phys_connector[1] < '0')
6393 phys_connector[1] = '0';
6395 output_len += scnprintf(buf + output_len,
6396 PAGE_SIZE - output_len,
6397 "PORT: %.2s ", phys_connector);
6399 box = device->box[i];
6400 if (box != 0 && box != 0xFF)
6401 output_len += scnprintf(buf + output_len,
6402 PAGE_SIZE - output_len,
6405 if ((device->devtype == TYPE_DISK ||
6406 device->devtype == TYPE_ZBC) &&
6407 pqi_expose_device(device))
6408 output_len += scnprintf(buf + output_len,
6409 PAGE_SIZE - output_len,
6413 output_len += scnprintf(buf + output_len,
6414 PAGE_SIZE - output_len,
6418 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6423 static ssize_t pqi_sas_address_show(struct device *dev,
6424 struct device_attribute *attr, char *buffer)
6426 struct pqi_ctrl_info *ctrl_info;
6427 struct scsi_device *sdev;
6428 struct pqi_scsi_dev *device;
6429 unsigned long flags;
6432 sdev = to_scsi_device(dev);
6433 ctrl_info = shost_to_hba(sdev->host);
6435 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6437 device = sdev->hostdata;
6438 if (pqi_is_logical_device(device)) {
6439 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6443 sas_address = device->sas_address;
6445 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6447 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6450 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6451 struct device_attribute *attr, char *buffer)
6453 struct pqi_ctrl_info *ctrl_info;
6454 struct scsi_device *sdev;
6455 struct pqi_scsi_dev *device;
6456 unsigned long flags;
6458 sdev = to_scsi_device(dev);
6459 ctrl_info = shost_to_hba(sdev->host);
6461 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6463 device = sdev->hostdata;
6464 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6468 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6473 static ssize_t pqi_raid_level_show(struct device *dev,
6474 struct device_attribute *attr, char *buffer)
6476 struct pqi_ctrl_info *ctrl_info;
6477 struct scsi_device *sdev;
6478 struct pqi_scsi_dev *device;
6479 unsigned long flags;
6482 sdev = to_scsi_device(dev);
6483 ctrl_info = shost_to_hba(sdev->host);
6485 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6487 device = sdev->hostdata;
6489 if (pqi_is_logical_device(device))
6490 raid_level = pqi_raid_level_to_string(device->raid_level);
6494 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6496 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6499 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6500 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6501 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6502 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6503 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6504 pqi_ssd_smart_path_enabled_show, NULL);
6505 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6507 static struct device_attribute *pqi_sdev_attrs[] = {
6509 &dev_attr_unique_id,
6510 &dev_attr_path_info,
6511 &dev_attr_sas_address,
6512 &dev_attr_ssd_smart_path_enabled,
6513 &dev_attr_raid_level,
6517 static struct scsi_host_template pqi_driver_template = {
6518 .module = THIS_MODULE,
6519 .name = DRIVER_NAME_SHORT,
6520 .proc_name = DRIVER_NAME_SHORT,
6521 .queuecommand = pqi_scsi_queue_command,
6522 .scan_start = pqi_scan_start,
6523 .scan_finished = pqi_scan_finished,
6525 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6527 .slave_alloc = pqi_slave_alloc,
6528 .map_queues = pqi_map_queues,
6529 .sdev_attrs = pqi_sdev_attrs,
6530 .shost_attrs = pqi_shost_attrs,
6533 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6536 struct Scsi_Host *shost;
6538 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6540 dev_err(&ctrl_info->pci_dev->dev,
6541 "scsi_host_alloc failed for controller %u\n",
6542 ctrl_info->ctrl_id);
6547 shost->n_io_port = 0;
6548 shost->this_id = -1;
6549 shost->max_channel = PQI_MAX_BUS;
6550 shost->max_cmd_len = MAX_COMMAND_SIZE;
6551 shost->max_lun = ~0;
6553 shost->max_sectors = ctrl_info->max_sectors;
6554 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6555 shost->cmd_per_lun = shost->can_queue;
6556 shost->sg_tablesize = ctrl_info->sg_tablesize;
6557 shost->transportt = pqi_sas_transport_template;
6558 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6559 shost->unique_id = shost->irq;
6560 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6561 shost->hostdata[0] = (unsigned long)ctrl_info;
6563 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6565 dev_err(&ctrl_info->pci_dev->dev,
6566 "scsi_add_host failed for controller %u\n",
6567 ctrl_info->ctrl_id);
6571 rc = pqi_add_sas_host(shost, ctrl_info);
6573 dev_err(&ctrl_info->pci_dev->dev,
6574 "add SAS host failed for controller %u\n",
6575 ctrl_info->ctrl_id);
6579 ctrl_info->scsi_host = shost;
6584 scsi_remove_host(shost);
6586 scsi_host_put(shost);
6591 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6593 struct Scsi_Host *shost;
6595 pqi_delete_sas_host(ctrl_info);
6597 shost = ctrl_info->scsi_host;
6601 scsi_remove_host(shost);
6602 scsi_host_put(shost);
6605 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6608 struct pqi_device_registers __iomem *pqi_registers;
6609 unsigned long timeout;
6610 unsigned int timeout_msecs;
6611 union pqi_reset_register reset_reg;
6613 pqi_registers = ctrl_info->pqi_registers;
6614 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6615 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6618 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6619 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6620 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6622 pqi_check_ctrl_health(ctrl_info);
6623 if (pqi_ctrl_offline(ctrl_info)) {
6627 if (time_after(jiffies, timeout)) {
6636 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6639 union pqi_reset_register reset_reg;
6641 if (ctrl_info->pqi_reset_quiesce_supported) {
6642 rc = sis_pqi_reset_quiesce(ctrl_info);
6644 dev_err(&ctrl_info->pci_dev->dev,
6645 "PQI reset failed during quiesce with error %d\n",
6651 reset_reg.all_bits = 0;
6652 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6653 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6655 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6657 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6659 dev_err(&ctrl_info->pci_dev->dev,
6660 "PQI reset failed with error %d\n", rc);
6665 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6668 struct bmic_sense_subsystem_info *sense_info;
6670 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6674 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6678 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6679 sizeof(sense_info->ctrl_serial_number));
6680 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6688 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6691 struct bmic_identify_controller *identify;
6693 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6697 rc = pqi_identify_controller(ctrl_info, identify);
6701 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6702 sizeof(identify->firmware_version));
6703 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6704 snprintf(ctrl_info->firmware_version +
6705 strlen(ctrl_info->firmware_version),
6706 sizeof(ctrl_info->firmware_version),
6707 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6709 memcpy(ctrl_info->model, identify->product_id,
6710 sizeof(identify->product_id));
6711 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6713 memcpy(ctrl_info->vendor, identify->vendor_id,
6714 sizeof(identify->vendor_id));
6715 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6723 struct pqi_config_table_section_info {
6724 struct pqi_ctrl_info *ctrl_info;
6727 void __iomem *section_iomem_addr;
6730 static inline bool pqi_is_firmware_feature_supported(
6731 struct pqi_config_table_firmware_features *firmware_features,
6732 unsigned int bit_position)
6734 unsigned int byte_index;
6736 byte_index = bit_position / BITS_PER_BYTE;
6738 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6741 return firmware_features->features_supported[byte_index] &
6742 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6745 static inline bool pqi_is_firmware_feature_enabled(
6746 struct pqi_config_table_firmware_features *firmware_features,
6747 void __iomem *firmware_features_iomem_addr,
6748 unsigned int bit_position)
6750 unsigned int byte_index;
6751 u8 __iomem *features_enabled_iomem_addr;
6753 byte_index = (bit_position / BITS_PER_BYTE) +
6754 (le16_to_cpu(firmware_features->num_elements) * 2);
6756 features_enabled_iomem_addr = firmware_features_iomem_addr +
6757 offsetof(struct pqi_config_table_firmware_features,
6758 features_supported) + byte_index;
6760 return *((__force u8 *)features_enabled_iomem_addr) &
6761 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6764 static inline void pqi_request_firmware_feature(
6765 struct pqi_config_table_firmware_features *firmware_features,
6766 unsigned int bit_position)
6768 unsigned int byte_index;
6770 byte_index = (bit_position / BITS_PER_BYTE) +
6771 le16_to_cpu(firmware_features->num_elements);
6773 firmware_features->features_supported[byte_index] |=
6774 (1 << (bit_position % BITS_PER_BYTE));
6777 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6778 u16 first_section, u16 last_section)
6780 struct pqi_vendor_general_request request;
6782 memset(&request, 0, sizeof(request));
6784 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6785 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6786 &request.header.iu_length);
6787 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6788 &request.function_code);
6789 put_unaligned_le16(first_section,
6790 &request.data.config_table_update.first_section);
6791 put_unaligned_le16(last_section,
6792 &request.data.config_table_update.last_section);
6794 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6795 0, NULL, NO_TIMEOUT);
6798 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6799 struct pqi_config_table_firmware_features *firmware_features,
6800 void __iomem *firmware_features_iomem_addr)
6802 void *features_requested;
6803 void __iomem *features_requested_iomem_addr;
6805 features_requested = firmware_features->features_supported +
6806 le16_to_cpu(firmware_features->num_elements);
6808 features_requested_iomem_addr = firmware_features_iomem_addr +
6809 (features_requested - (void *)firmware_features);
6811 memcpy_toio(features_requested_iomem_addr, features_requested,
6812 le16_to_cpu(firmware_features->num_elements));
6814 return pqi_config_table_update(ctrl_info,
6815 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6816 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6819 struct pqi_firmware_feature {
6821 unsigned int feature_bit;
6824 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6825 struct pqi_firmware_feature *firmware_feature);
6828 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6829 struct pqi_firmware_feature *firmware_feature)
6831 if (!firmware_feature->supported) {
6832 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6833 firmware_feature->feature_name);
6837 if (firmware_feature->enabled) {
6838 dev_info(&ctrl_info->pci_dev->dev,
6839 "%s enabled\n", firmware_feature->feature_name);
6843 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6844 firmware_feature->feature_name);
6847 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6848 struct pqi_firmware_feature *firmware_feature)
6850 if (firmware_feature->feature_status)
6851 firmware_feature->feature_status(ctrl_info, firmware_feature);
6854 static DEFINE_MUTEX(pqi_firmware_features_mutex);
6856 static struct pqi_firmware_feature pqi_firmware_features[] = {
6858 .feature_name = "Online Firmware Activation",
6859 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6860 .feature_status = pqi_firmware_feature_status,
6863 .feature_name = "Serial Management Protocol",
6864 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6865 .feature_status = pqi_firmware_feature_status,
6868 .feature_name = "New Soft Reset Handshake",
6869 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6870 .feature_status = pqi_firmware_feature_status,
6874 static void pqi_process_firmware_features(
6875 struct pqi_config_table_section_info *section_info)
6878 struct pqi_ctrl_info *ctrl_info;
6879 struct pqi_config_table_firmware_features *firmware_features;
6880 void __iomem *firmware_features_iomem_addr;
6882 unsigned int num_features_supported;
6884 ctrl_info = section_info->ctrl_info;
6885 firmware_features = section_info->section;
6886 firmware_features_iomem_addr = section_info->section_iomem_addr;
6888 for (i = 0, num_features_supported = 0;
6889 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6890 if (pqi_is_firmware_feature_supported(firmware_features,
6891 pqi_firmware_features[i].feature_bit)) {
6892 pqi_firmware_features[i].supported = true;
6893 num_features_supported++;
6895 pqi_firmware_feature_update(ctrl_info,
6896 &pqi_firmware_features[i]);
6900 if (num_features_supported == 0)
6903 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6904 if (!pqi_firmware_features[i].supported)
6906 pqi_request_firmware_feature(firmware_features,
6907 pqi_firmware_features[i].feature_bit);
6910 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6911 firmware_features_iomem_addr);
6913 dev_err(&ctrl_info->pci_dev->dev,
6914 "failed to enable firmware features in PQI configuration table\n");
6915 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6916 if (!pqi_firmware_features[i].supported)
6918 pqi_firmware_feature_update(ctrl_info,
6919 &pqi_firmware_features[i]);
6924 ctrl_info->soft_reset_handshake_supported = false;
6925 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6926 if (!pqi_firmware_features[i].supported)
6928 if (pqi_is_firmware_feature_enabled(firmware_features,
6929 firmware_features_iomem_addr,
6930 pqi_firmware_features[i].feature_bit)) {
6931 pqi_firmware_features[i].enabled = true;
6932 if (pqi_firmware_features[i].feature_bit ==
6933 PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
6934 ctrl_info->soft_reset_handshake_supported =
6937 pqi_firmware_feature_update(ctrl_info,
6938 &pqi_firmware_features[i]);
6942 static void pqi_init_firmware_features(void)
6946 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6947 pqi_firmware_features[i].supported = false;
6948 pqi_firmware_features[i].enabled = false;
6952 static void pqi_process_firmware_features_section(
6953 struct pqi_config_table_section_info *section_info)
6955 mutex_lock(&pqi_firmware_features_mutex);
6956 pqi_init_firmware_features();
6957 pqi_process_firmware_features(section_info);
6958 mutex_unlock(&pqi_firmware_features_mutex);
6961 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6965 void __iomem *table_iomem_addr;
6966 struct pqi_config_table *config_table;
6967 struct pqi_config_table_section_header *section;
6968 struct pqi_config_table_section_info section_info;
6970 table_length = ctrl_info->config_table_length;
6971 if (table_length == 0)
6974 config_table = kmalloc(table_length, GFP_KERNEL);
6975 if (!config_table) {
6976 dev_err(&ctrl_info->pci_dev->dev,
6977 "failed to allocate memory for PQI configuration table\n");
6982 * Copy the config table contents from I/O memory space into the
6985 table_iomem_addr = ctrl_info->iomem_base +
6986 ctrl_info->config_table_offset;
6987 memcpy_fromio(config_table, table_iomem_addr, table_length);
6989 section_info.ctrl_info = ctrl_info;
6991 get_unaligned_le32(&config_table->first_section_offset);
6993 while (section_offset) {
6994 section = (void *)config_table + section_offset;
6996 section_info.section = section;
6997 section_info.section_offset = section_offset;
6998 section_info.section_iomem_addr =
6999 table_iomem_addr + section_offset;
7001 switch (get_unaligned_le16(§ion->section_id)) {
7002 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7003 pqi_process_firmware_features_section(§ion_info);
7005 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7006 if (pqi_disable_heartbeat)
7007 dev_warn(&ctrl_info->pci_dev->dev,
7008 "heartbeat disabled by module parameter\n");
7010 ctrl_info->heartbeat_counter =
7014 struct pqi_config_table_heartbeat,
7017 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7018 ctrl_info->soft_reset_status =
7021 offsetof(struct pqi_config_table_soft_reset,
7027 get_unaligned_le16(§ion->next_section_offset);
7030 kfree(config_table);
7035 /* Switches the controller from PQI mode back into SIS mode. */
7037 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7041 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7042 rc = pqi_reset(ctrl_info);
7045 rc = sis_reenable_sis_mode(ctrl_info);
7047 dev_err(&ctrl_info->pci_dev->dev,
7048 "re-enabling SIS mode failed with error %d\n", rc);
7051 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7057 * If the controller isn't already in SIS mode, this function forces it into
7061 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7063 if (!sis_is_firmware_running(ctrl_info))
7066 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7069 if (sis_is_kernel_up(ctrl_info)) {
7070 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7074 return pqi_revert_to_sis_mode(ctrl_info);
7077 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7081 rc = pqi_force_sis_mode(ctrl_info);
7086 * Wait until the controller is ready to start accepting SIS
7089 rc = sis_wait_for_ctrl_ready(ctrl_info);
7094 * Get the controller properties. This allows us to determine
7095 * whether or not it supports PQI mode.
7097 rc = sis_get_ctrl_properties(ctrl_info);
7099 dev_err(&ctrl_info->pci_dev->dev,
7100 "error obtaining controller properties\n");
7104 rc = sis_get_pqi_capabilities(ctrl_info);
7106 dev_err(&ctrl_info->pci_dev->dev,
7107 "error obtaining controller capabilities\n");
7111 if (reset_devices) {
7112 if (ctrl_info->max_outstanding_requests >
7113 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7114 ctrl_info->max_outstanding_requests =
7115 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7117 if (ctrl_info->max_outstanding_requests >
7118 PQI_MAX_OUTSTANDING_REQUESTS)
7119 ctrl_info->max_outstanding_requests =
7120 PQI_MAX_OUTSTANDING_REQUESTS;
7123 pqi_calculate_io_resources(ctrl_info);
7125 rc = pqi_alloc_error_buffer(ctrl_info);
7127 dev_err(&ctrl_info->pci_dev->dev,
7128 "failed to allocate PQI error buffer\n");
7133 * If the function we are about to call succeeds, the
7134 * controller will transition from legacy SIS mode
7137 rc = sis_init_base_struct_addr(ctrl_info);
7139 dev_err(&ctrl_info->pci_dev->dev,
7140 "error initializing PQI mode\n");
7144 /* Wait for the controller to complete the SIS -> PQI transition. */
7145 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7147 dev_err(&ctrl_info->pci_dev->dev,
7148 "transition to PQI mode failed\n");
7152 /* From here on, we are running in PQI mode. */
7153 ctrl_info->pqi_mode_enabled = true;
7154 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7156 rc = pqi_alloc_admin_queues(ctrl_info);
7158 dev_err(&ctrl_info->pci_dev->dev,
7159 "failed to allocate admin queues\n");
7163 rc = pqi_create_admin_queues(ctrl_info);
7165 dev_err(&ctrl_info->pci_dev->dev,
7166 "error creating admin queues\n");
7170 rc = pqi_report_device_capability(ctrl_info);
7172 dev_err(&ctrl_info->pci_dev->dev,
7173 "obtaining device capability failed\n");
7177 rc = pqi_validate_device_capability(ctrl_info);
7181 pqi_calculate_queue_resources(ctrl_info);
7183 rc = pqi_enable_msix_interrupts(ctrl_info);
7187 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7188 ctrl_info->max_msix_vectors =
7189 ctrl_info->num_msix_vectors_enabled;
7190 pqi_calculate_queue_resources(ctrl_info);
7193 rc = pqi_alloc_io_resources(ctrl_info);
7197 rc = pqi_alloc_operational_queues(ctrl_info);
7199 dev_err(&ctrl_info->pci_dev->dev,
7200 "failed to allocate operational queues\n");
7204 pqi_init_operational_queues(ctrl_info);
7206 rc = pqi_request_irqs(ctrl_info);
7210 rc = pqi_create_queues(ctrl_info);
7214 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7216 ctrl_info->controller_online = true;
7218 rc = pqi_process_config_table(ctrl_info);
7222 pqi_start_heartbeat_timer(ctrl_info);
7224 rc = pqi_enable_events(ctrl_info);
7226 dev_err(&ctrl_info->pci_dev->dev,
7227 "error enabling events\n");
7231 /* Register with the SCSI subsystem. */
7232 rc = pqi_register_scsi(ctrl_info);
7236 rc = pqi_get_ctrl_product_details(ctrl_info);
7238 dev_err(&ctrl_info->pci_dev->dev,
7239 "error obtaining product details\n");
7243 rc = pqi_get_ctrl_serial_number(ctrl_info);
7245 dev_err(&ctrl_info->pci_dev->dev,
7246 "error obtaining ctrl serial number\n");
7250 rc = pqi_set_diag_rescan(ctrl_info);
7252 dev_err(&ctrl_info->pci_dev->dev,
7253 "error enabling multi-lun rescan\n");
7257 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7259 dev_err(&ctrl_info->pci_dev->dev,
7260 "error updating host wellness\n");
7264 pqi_schedule_update_time_worker(ctrl_info);
7266 pqi_scan_scsi_devices(ctrl_info);
7271 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7274 struct pqi_admin_queues *admin_queues;
7275 struct pqi_event_queue *event_queue;
7277 admin_queues = &ctrl_info->admin_queues;
7278 admin_queues->iq_pi_copy = 0;
7279 admin_queues->oq_ci_copy = 0;
7280 writel(0, admin_queues->oq_pi);
7282 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7283 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7284 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7285 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7287 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7288 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7289 writel(0, ctrl_info->queue_groups[i].oq_pi);
7292 event_queue = &ctrl_info->event_queue;
7293 writel(0, event_queue->oq_pi);
7294 event_queue->oq_ci_copy = 0;
7297 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7301 rc = pqi_force_sis_mode(ctrl_info);
7306 * Wait until the controller is ready to start accepting SIS
7309 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7314 * Get the controller properties. This allows us to determine
7315 * whether or not it supports PQI mode.
7317 rc = sis_get_ctrl_properties(ctrl_info);
7319 dev_err(&ctrl_info->pci_dev->dev,
7320 "error obtaining controller properties\n");
7324 rc = sis_get_pqi_capabilities(ctrl_info);
7326 dev_err(&ctrl_info->pci_dev->dev,
7327 "error obtaining controller capabilities\n");
7332 * If the function we are about to call succeeds, the
7333 * controller will transition from legacy SIS mode
7336 rc = sis_init_base_struct_addr(ctrl_info);
7338 dev_err(&ctrl_info->pci_dev->dev,
7339 "error initializing PQI mode\n");
7343 /* Wait for the controller to complete the SIS -> PQI transition. */
7344 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7346 dev_err(&ctrl_info->pci_dev->dev,
7347 "transition to PQI mode failed\n");
7351 /* From here on, we are running in PQI mode. */
7352 ctrl_info->pqi_mode_enabled = true;
7353 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7355 pqi_reinit_queues(ctrl_info);
7357 rc = pqi_create_admin_queues(ctrl_info);
7359 dev_err(&ctrl_info->pci_dev->dev,
7360 "error creating admin queues\n");
7364 rc = pqi_create_queues(ctrl_info);
7368 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7370 ctrl_info->controller_online = true;
7371 pqi_ctrl_unblock_requests(ctrl_info);
7373 rc = pqi_process_config_table(ctrl_info);
7377 pqi_start_heartbeat_timer(ctrl_info);
7379 rc = pqi_enable_events(ctrl_info);
7381 dev_err(&ctrl_info->pci_dev->dev,
7382 "error enabling events\n");
7386 rc = pqi_get_ctrl_product_details(ctrl_info);
7388 dev_err(&ctrl_info->pci_dev->dev,
7389 "error obtaining product detail\n");
7393 rc = pqi_set_diag_rescan(ctrl_info);
7395 dev_err(&ctrl_info->pci_dev->dev,
7396 "error enabling multi-lun rescan\n");
7400 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7402 dev_err(&ctrl_info->pci_dev->dev,
7403 "error updating host wellness\n");
7407 pqi_schedule_update_time_worker(ctrl_info);
7409 pqi_scan_scsi_devices(ctrl_info);
7414 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7417 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7418 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7421 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7426 rc = pci_enable_device(ctrl_info->pci_dev);
7428 dev_err(&ctrl_info->pci_dev->dev,
7429 "failed to enable PCI device\n");
7433 if (sizeof(dma_addr_t) > 4)
7434 mask = DMA_BIT_MASK(64);
7436 mask = DMA_BIT_MASK(32);
7438 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7440 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7441 goto disable_device;
7444 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7446 dev_err(&ctrl_info->pci_dev->dev,
7447 "failed to obtain PCI resources\n");
7448 goto disable_device;
7451 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
7452 ctrl_info->pci_dev, 0),
7453 sizeof(struct pqi_ctrl_registers));
7454 if (!ctrl_info->iomem_base) {
7455 dev_err(&ctrl_info->pci_dev->dev,
7456 "failed to map memory for controller registers\n");
7458 goto release_regions;
7461 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7463 /* Increase the PCIe completion timeout. */
7464 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7465 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7467 dev_err(&ctrl_info->pci_dev->dev,
7468 "failed to set PCIe completion timeout\n");
7469 goto release_regions;
7472 /* Enable bus mastering. */
7473 pci_set_master(ctrl_info->pci_dev);
7475 ctrl_info->registers = ctrl_info->iomem_base;
7476 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7478 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7483 pci_release_regions(ctrl_info->pci_dev);
7485 pci_disable_device(ctrl_info->pci_dev);
7490 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7492 iounmap(ctrl_info->iomem_base);
7493 pci_release_regions(ctrl_info->pci_dev);
7494 if (pci_is_enabled(ctrl_info->pci_dev))
7495 pci_disable_device(ctrl_info->pci_dev);
7496 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7499 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7501 struct pqi_ctrl_info *ctrl_info;
7503 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7504 GFP_KERNEL, numa_node);
7508 mutex_init(&ctrl_info->scan_mutex);
7509 mutex_init(&ctrl_info->lun_reset_mutex);
7510 mutex_init(&ctrl_info->ofa_mutex);
7512 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7513 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7515 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7516 atomic_set(&ctrl_info->num_interrupts, 0);
7518 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7519 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7521 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7522 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7524 sema_init(&ctrl_info->sync_request_sem,
7525 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7526 init_waitqueue_head(&ctrl_info->block_requests_wait);
7528 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7529 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7530 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7531 pqi_raid_bypass_retry_worker);
7533 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7534 ctrl_info->irq_mode = IRQ_MODE_NONE;
7535 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7540 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7545 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7547 pqi_free_irqs(ctrl_info);
7548 pqi_disable_msix_interrupts(ctrl_info);
7551 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7553 pqi_stop_heartbeat_timer(ctrl_info);
7554 pqi_free_interrupts(ctrl_info);
7555 if (ctrl_info->queue_memory_base)
7556 dma_free_coherent(&ctrl_info->pci_dev->dev,
7557 ctrl_info->queue_memory_length,
7558 ctrl_info->queue_memory_base,
7559 ctrl_info->queue_memory_base_dma_handle);
7560 if (ctrl_info->admin_queue_memory_base)
7561 dma_free_coherent(&ctrl_info->pci_dev->dev,
7562 ctrl_info->admin_queue_memory_length,
7563 ctrl_info->admin_queue_memory_base,
7564 ctrl_info->admin_queue_memory_base_dma_handle);
7565 pqi_free_all_io_requests(ctrl_info);
7566 if (ctrl_info->error_buffer)
7567 dma_free_coherent(&ctrl_info->pci_dev->dev,
7568 ctrl_info->error_buffer_length,
7569 ctrl_info->error_buffer,
7570 ctrl_info->error_buffer_dma_handle);
7571 if (ctrl_info->iomem_base)
7572 pqi_cleanup_pci_init(ctrl_info);
7573 pqi_free_ctrl_info(ctrl_info);
7576 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7578 pqi_cancel_rescan_worker(ctrl_info);
7579 pqi_cancel_update_time_worker(ctrl_info);
7580 pqi_remove_all_scsi_devices(ctrl_info);
7581 pqi_unregister_scsi(ctrl_info);
7582 if (ctrl_info->pqi_mode_enabled)
7583 pqi_revert_to_sis_mode(ctrl_info);
7584 pqi_free_ctrl_resources(ctrl_info);
7587 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7589 pqi_cancel_update_time_worker(ctrl_info);
7590 pqi_cancel_rescan_worker(ctrl_info);
7591 pqi_wait_until_lun_reset_finished(ctrl_info);
7592 pqi_wait_until_scan_finished(ctrl_info);
7593 pqi_ctrl_ofa_start(ctrl_info);
7594 pqi_ctrl_block_requests(ctrl_info);
7595 pqi_ctrl_wait_until_quiesced(ctrl_info);
7596 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7597 pqi_fail_io_queued_for_all_devices(ctrl_info);
7598 pqi_wait_until_inbound_queues_empty(ctrl_info);
7599 pqi_stop_heartbeat_timer(ctrl_info);
7600 ctrl_info->pqi_mode_enabled = false;
7601 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7604 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7606 pqi_ofa_free_host_buffer(ctrl_info);
7607 ctrl_info->pqi_mode_enabled = true;
7608 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7609 ctrl_info->controller_online = true;
7610 pqi_ctrl_unblock_requests(ctrl_info);
7611 pqi_start_heartbeat_timer(ctrl_info);
7612 pqi_schedule_update_time_worker(ctrl_info);
7613 pqi_clear_soft_reset_status(ctrl_info,
7614 PQI_SOFT_RESET_ABORT);
7615 pqi_scan_scsi_devices(ctrl_info);
7618 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7619 u32 total_size, u32 chunk_size)
7624 struct pqi_sg_descriptor *mem_descriptor = NULL;
7626 struct pqi_ofa_memory *ofap;
7628 dev = &ctrl_info->pci_dev->dev;
7630 sg_count = (total_size + chunk_size - 1);
7631 sg_count /= chunk_size;
7633 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7635 if (sg_count*chunk_size < total_size)
7638 ctrl_info->pqi_ofa_chunk_virt_addr =
7639 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7640 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7643 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7644 dma_addr_t dma_handle;
7646 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7647 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7650 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7653 mem_descriptor = &ofap->sg_descriptor[i];
7654 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7655 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7658 if (!size || size < total_size)
7659 goto out_free_chunks;
7661 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7662 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7663 put_unaligned_le32(size, &ofap->bytes_allocated);
7669 mem_descriptor = &ofap->sg_descriptor[i];
7670 dma_free_coherent(dev, chunk_size,
7671 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7672 get_unaligned_le64(&mem_descriptor->address));
7674 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7677 put_unaligned_le32 (0, &ofap->bytes_allocated);
7681 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7687 total_size = le32_to_cpu(
7688 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7689 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7691 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7692 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7698 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7699 u32 bytes_requested)
7701 struct pqi_ofa_memory *pqi_ofa_memory;
7704 dev = &ctrl_info->pci_dev->dev;
7705 pqi_ofa_memory = dma_alloc_coherent(dev,
7706 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7707 &ctrl_info->pqi_ofa_mem_dma_handle,
7710 if (!pqi_ofa_memory)
7713 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7714 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7715 sizeof(pqi_ofa_memory->signature));
7716 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7718 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7720 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7721 dev_err(dev, "Failed to allocate host buffer of size = %u",
7726 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7729 struct pqi_sg_descriptor *mem_descriptor;
7730 struct pqi_ofa_memory *ofap;
7732 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7737 if (!ofap->bytes_allocated)
7740 mem_descriptor = ofap->sg_descriptor;
7742 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7744 dma_free_coherent(&ctrl_info->pci_dev->dev,
7745 get_unaligned_le32(&mem_descriptor[i].length),
7746 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7747 get_unaligned_le64(&mem_descriptor[i].address));
7749 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7752 dma_free_coherent(&ctrl_info->pci_dev->dev,
7753 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7754 ctrl_info->pqi_ofa_mem_dma_handle);
7755 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7758 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7760 struct pqi_vendor_general_request request;
7762 struct pqi_ofa_memory *ofap;
7764 memset(&request, 0, sizeof(request));
7766 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7768 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7769 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7770 &request.header.iu_length);
7771 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7772 &request.function_code);
7775 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7776 get_unaligned_le16(&ofap->num_memory_descriptors) *
7777 sizeof(struct pqi_sg_descriptor);
7779 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7780 &request.data.ofa_memory_allocation.buffer_address);
7781 put_unaligned_le32(size,
7782 &request.data.ofa_memory_allocation.buffer_length);
7786 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7787 0, NULL, NO_TIMEOUT);
7790 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7792 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7794 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7795 return pqi_ctrl_init_resume(ctrl_info);
7798 static void pqi_perform_lockup_action(void)
7800 switch (pqi_lockup_action) {
7802 panic("FATAL: Smart Family Controller lockup detected");
7805 emergency_restart();
7813 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7814 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7815 .status = SAM_STAT_CHECK_CONDITION,
7818 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7821 struct pqi_io_request *io_request;
7822 struct scsi_cmnd *scmd;
7824 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7825 io_request = &ctrl_info->io_request_pool[i];
7826 if (atomic_read(&io_request->refcount) == 0)
7829 scmd = io_request->scmd;
7831 set_host_byte(scmd, DID_NO_CONNECT);
7833 io_request->status = -ENXIO;
7834 io_request->error_info =
7835 &pqi_ctrl_offline_raid_error_info;
7838 io_request->io_complete_callback(io_request,
7839 io_request->context);
7843 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7845 pqi_perform_lockup_action();
7846 pqi_stop_heartbeat_timer(ctrl_info);
7847 pqi_free_interrupts(ctrl_info);
7848 pqi_cancel_rescan_worker(ctrl_info);
7849 pqi_cancel_update_time_worker(ctrl_info);
7850 pqi_ctrl_wait_until_quiesced(ctrl_info);
7851 pqi_fail_all_outstanding_requests(ctrl_info);
7852 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7853 pqi_ctrl_unblock_requests(ctrl_info);
7856 static void pqi_ctrl_offline_worker(struct work_struct *work)
7858 struct pqi_ctrl_info *ctrl_info;
7860 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7861 pqi_take_ctrl_offline_deferred(ctrl_info);
7864 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7866 if (!ctrl_info->controller_online)
7869 ctrl_info->controller_online = false;
7870 ctrl_info->pqi_mode_enabled = false;
7871 pqi_ctrl_block_requests(ctrl_info);
7872 if (!pqi_disable_ctrl_shutdown)
7873 sis_shutdown_ctrl(ctrl_info);
7874 pci_disable_device(ctrl_info->pci_dev);
7875 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7876 schedule_work(&ctrl_info->ctrl_offline_work);
7879 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
7880 const struct pci_device_id *id)
7882 char *ctrl_description;
7884 if (id->driver_data)
7885 ctrl_description = (char *)id->driver_data;
7887 ctrl_description = "Microsemi Smart Family Controller";
7889 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
7892 static int pqi_pci_probe(struct pci_dev *pci_dev,
7893 const struct pci_device_id *id)
7897 struct pqi_ctrl_info *ctrl_info;
7899 pqi_print_ctrl_info(pci_dev, id);
7901 if (pqi_disable_device_id_wildcards &&
7902 id->subvendor == PCI_ANY_ID &&
7903 id->subdevice == PCI_ANY_ID) {
7904 dev_warn(&pci_dev->dev,
7905 "controller not probed because device ID wildcards are disabled\n");
7909 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
7910 dev_warn(&pci_dev->dev,
7911 "controller device ID matched using wildcards\n");
7913 node = dev_to_node(&pci_dev->dev);
7914 if (node == NUMA_NO_NODE) {
7915 cp_node = cpu_to_node(0);
7916 if (cp_node == NUMA_NO_NODE)
7918 set_dev_node(&pci_dev->dev, cp_node);
7921 ctrl_info = pqi_alloc_ctrl_info(node);
7923 dev_err(&pci_dev->dev,
7924 "failed to allocate controller info block\n");
7928 ctrl_info->pci_dev = pci_dev;
7930 rc = pqi_pci_init(ctrl_info);
7934 rc = pqi_ctrl_init(ctrl_info);
7941 pqi_remove_ctrl(ctrl_info);
7946 static void pqi_pci_remove(struct pci_dev *pci_dev)
7948 struct pqi_ctrl_info *ctrl_info;
7950 ctrl_info = pci_get_drvdata(pci_dev);
7954 ctrl_info->in_shutdown = true;
7956 pqi_remove_ctrl(ctrl_info);
7959 static void pqi_shutdown(struct pci_dev *pci_dev)
7962 struct pqi_ctrl_info *ctrl_info;
7964 ctrl_info = pci_get_drvdata(pci_dev);
7969 * Write all data in the controller's battery-backed cache to
7972 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
7973 pqi_free_interrupts(ctrl_info);
7974 pqi_reset(ctrl_info);
7979 dev_warn(&pci_dev->dev,
7980 "unable to flush controller cache\n");
7983 static void pqi_process_lockup_action_param(void)
7987 if (!pqi_lockup_action_param)
7990 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7991 if (strcmp(pqi_lockup_action_param,
7992 pqi_lockup_actions[i].name) == 0) {
7993 pqi_lockup_action = pqi_lockup_actions[i].action;
7998 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
7999 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8002 static void pqi_process_module_params(void)
8004 pqi_process_lockup_action_param();
8007 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8009 struct pqi_ctrl_info *ctrl_info;
8011 ctrl_info = pci_get_drvdata(pci_dev);
8013 pqi_disable_events(ctrl_info);
8014 pqi_cancel_update_time_worker(ctrl_info);
8015 pqi_cancel_rescan_worker(ctrl_info);
8016 pqi_wait_until_scan_finished(ctrl_info);
8017 pqi_wait_until_lun_reset_finished(ctrl_info);
8018 pqi_wait_until_ofa_finished(ctrl_info);
8019 pqi_flush_cache(ctrl_info, SUSPEND);
8020 pqi_ctrl_block_requests(ctrl_info);
8021 pqi_ctrl_wait_until_quiesced(ctrl_info);
8022 pqi_wait_until_inbound_queues_empty(ctrl_info);
8023 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8024 pqi_stop_heartbeat_timer(ctrl_info);
8026 if (state.event == PM_EVENT_FREEZE)
8029 pci_save_state(pci_dev);
8030 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8032 ctrl_info->controller_online = false;
8033 ctrl_info->pqi_mode_enabled = false;
8038 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8041 struct pqi_ctrl_info *ctrl_info;
8043 ctrl_info = pci_get_drvdata(pci_dev);
8045 if (pci_dev->current_state != PCI_D0) {
8046 ctrl_info->max_hw_queue_index = 0;
8047 pqi_free_interrupts(ctrl_info);
8048 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8049 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8050 IRQF_SHARED, DRIVER_NAME_SHORT,
8051 &ctrl_info->queue_groups[0]);
8053 dev_err(&ctrl_info->pci_dev->dev,
8054 "irq %u init failed with error %d\n",
8058 pqi_start_heartbeat_timer(ctrl_info);
8059 pqi_ctrl_unblock_requests(ctrl_info);
8063 pci_set_power_state(pci_dev, PCI_D0);
8064 pci_restore_state(pci_dev);
8066 return pqi_ctrl_init_resume(ctrl_info);
8069 /* Define the PCI IDs for the controllers that we support. */
8070 static const struct pci_device_id pqi_pci_id_table[] = {
8072 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8076 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8080 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8084 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8088 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8092 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8096 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8100 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8104 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8108 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8112 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8116 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8120 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8124 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8128 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8132 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8136 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8140 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8144 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8148 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8152 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8156 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8160 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8164 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8176 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8180 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8184 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8188 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8197 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8201 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8205 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8209 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8213 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8217 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8221 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8225 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8229 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8233 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8237 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8241 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8245 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8249 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8253 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8257 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8261 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8265 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8269 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8272 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8273 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8276 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8277 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8280 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8281 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8284 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8285 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8288 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8289 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8292 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8293 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8296 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8297 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8300 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8301 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8304 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8305 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8308 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8309 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8312 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8313 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8316 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8317 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8320 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8321 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8325 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8329 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8333 PCI_VENDOR_ID_DELL, 0x1fe0)
8336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8337 PCI_VENDOR_ID_HP, 0x0600)
8340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8341 PCI_VENDOR_ID_HP, 0x0601)
8344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8345 PCI_VENDOR_ID_HP, 0x0602)
8348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8349 PCI_VENDOR_ID_HP, 0x0603)
8352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8353 PCI_VENDOR_ID_HP, 0x0609)
8356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8357 PCI_VENDOR_ID_HP, 0x0650)
8360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8361 PCI_VENDOR_ID_HP, 0x0651)
8364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8365 PCI_VENDOR_ID_HP, 0x0652)
8368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8369 PCI_VENDOR_ID_HP, 0x0653)
8372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8373 PCI_VENDOR_ID_HP, 0x0654)
8376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8377 PCI_VENDOR_ID_HP, 0x0655)
8380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8381 PCI_VENDOR_ID_HP, 0x0700)
8384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8385 PCI_VENDOR_ID_HP, 0x0701)
8388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8389 PCI_VENDOR_ID_HP, 0x1001)
8392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8393 PCI_VENDOR_ID_HP, 0x1100)
8396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8397 PCI_VENDOR_ID_HP, 0x1101)
8400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8417 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8421 PCI_ANY_ID, PCI_ANY_ID)
8426 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8428 static struct pci_driver pqi_pci_driver = {
8429 .name = DRIVER_NAME_SHORT,
8430 .id_table = pqi_pci_id_table,
8431 .probe = pqi_pci_probe,
8432 .remove = pqi_pci_remove,
8433 .shutdown = pqi_shutdown,
8434 #if defined(CONFIG_PM)
8435 .suspend = pqi_suspend,
8436 .resume = pqi_resume,
8440 static int __init pqi_init(void)
8444 pr_info(DRIVER_NAME "\n");
8446 pqi_sas_transport_template =
8447 sas_attach_transport(&pqi_sas_transport_functions);
8448 if (!pqi_sas_transport_template)
8451 pqi_process_module_params();
8453 rc = pci_register_driver(&pqi_pci_driver);
8455 sas_release_transport(pqi_sas_transport_template);
8460 static void __exit pqi_cleanup(void)
8462 pci_unregister_driver(&pqi_pci_driver);
8463 sas_release_transport(pqi_sas_transport_template);
8466 module_init(pqi_init);
8467 module_exit(pqi_cleanup);
8469 static void __attribute__((unused)) verify_structures(void)
8471 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8472 sis_host_to_ctrl_doorbell) != 0x20);
8473 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8474 sis_interrupt_mask) != 0x34);
8475 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8476 sis_ctrl_to_host_doorbell) != 0x9c);
8477 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8478 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8479 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8480 sis_driver_scratch) != 0xb0);
8481 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8482 sis_firmware_status) != 0xbc);
8483 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8484 sis_mailbox) != 0x1000);
8485 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8486 pqi_registers) != 0x4000);
8488 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8490 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8492 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8493 response_queue_id) != 0x4);
8494 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8496 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8498 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8500 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8501 service_response) != 0x1);
8502 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8503 data_present) != 0x2);
8504 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8506 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8507 residual_count) != 0x4);
8508 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8509 data_length) != 0x8);
8510 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8512 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8514 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8516 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8517 data_in_result) != 0x0);
8518 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8519 data_out_result) != 0x1);
8520 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8522 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8524 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8525 status_qualifier) != 0x6);
8526 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8527 sense_data_length) != 0x8);
8528 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8529 response_data_length) != 0xa);
8530 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8531 data_in_transferred) != 0xc);
8532 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8533 data_out_transferred) != 0x10);
8534 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8536 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8538 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8540 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8541 function_and_status_code) != 0x8);
8542 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8543 max_admin_iq_elements) != 0x10);
8544 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8545 max_admin_oq_elements) != 0x11);
8546 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8547 admin_iq_element_length) != 0x12);
8548 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8549 admin_oq_element_length) != 0x13);
8550 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8551 max_reset_timeout) != 0x14);
8552 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8553 legacy_intx_status) != 0x18);
8554 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8555 legacy_intx_mask_set) != 0x1c);
8556 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8557 legacy_intx_mask_clear) != 0x20);
8558 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8559 device_status) != 0x40);
8560 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8561 admin_iq_pi_offset) != 0x48);
8562 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8563 admin_oq_ci_offset) != 0x50);
8564 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8565 admin_iq_element_array_addr) != 0x58);
8566 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8567 admin_oq_element_array_addr) != 0x60);
8568 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8569 admin_iq_ci_addr) != 0x68);
8570 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8571 admin_oq_pi_addr) != 0x70);
8572 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8573 admin_iq_num_elements) != 0x78);
8574 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8575 admin_oq_num_elements) != 0x79);
8576 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8577 admin_queue_int_msg_num) != 0x7a);
8578 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8579 device_error) != 0x80);
8580 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8581 error_details) != 0x88);
8582 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8583 device_reset) != 0x90);
8584 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8585 power_action) != 0x94);
8586 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8588 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8589 header.iu_type) != 0);
8590 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8591 header.iu_length) != 2);
8592 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8593 header.work_area) != 6);
8594 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8596 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8597 function_code) != 10);
8598 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8599 data.report_device_capability.buffer_length) != 44);
8600 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8601 data.report_device_capability.sg_descriptor) != 48);
8602 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8603 data.create_operational_iq.queue_id) != 12);
8604 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8605 data.create_operational_iq.element_array_addr) != 16);
8606 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8607 data.create_operational_iq.ci_addr) != 24);
8608 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8609 data.create_operational_iq.num_elements) != 32);
8610 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8611 data.create_operational_iq.element_length) != 34);
8612 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8613 data.create_operational_iq.queue_protocol) != 36);
8614 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8615 data.create_operational_oq.queue_id) != 12);
8616 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8617 data.create_operational_oq.element_array_addr) != 16);
8618 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8619 data.create_operational_oq.pi_addr) != 24);
8620 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8621 data.create_operational_oq.num_elements) != 32);
8622 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8623 data.create_operational_oq.element_length) != 34);
8624 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8625 data.create_operational_oq.queue_protocol) != 36);
8626 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8627 data.create_operational_oq.int_msg_num) != 40);
8628 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8629 data.create_operational_oq.coalescing_count) != 42);
8630 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8631 data.create_operational_oq.min_coalescing_time) != 44);
8632 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8633 data.create_operational_oq.max_coalescing_time) != 48);
8634 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8635 data.delete_operational_queue.queue_id) != 12);
8636 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8637 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8638 data.create_operational_iq) != 64 - 11);
8639 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8640 data.create_operational_oq) != 64 - 11);
8641 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8642 data.delete_operational_queue) != 64 - 11);
8644 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8645 header.iu_type) != 0);
8646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8647 header.iu_length) != 2);
8648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8649 header.work_area) != 6);
8650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8653 function_code) != 10);
8654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8657 data.create_operational_iq.status_descriptor) != 12);
8658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8659 data.create_operational_iq.iq_pi_offset) != 16);
8660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8661 data.create_operational_oq.status_descriptor) != 12);
8662 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8663 data.create_operational_oq.oq_ci_offset) != 16);
8664 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8666 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8667 header.iu_type) != 0);
8668 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8669 header.iu_length) != 2);
8670 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8671 header.response_queue_id) != 4);
8672 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8673 header.work_area) != 6);
8674 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8676 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8678 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8679 buffer_length) != 12);
8680 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8682 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8683 protocol_specific) != 24);
8684 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8685 error_index) != 27);
8686 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8688 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8689 sg_descriptors) != 64);
8690 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8691 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8693 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8694 header.iu_type) != 0);
8695 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8696 header.iu_length) != 2);
8697 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8698 header.response_queue_id) != 4);
8699 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8700 header.work_area) != 6);
8701 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8703 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8705 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8706 buffer_length) != 16);
8707 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8708 data_encryption_key_index) != 22);
8709 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8710 encrypt_tweak_lower) != 24);
8711 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8712 encrypt_tweak_upper) != 28);
8713 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8715 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8716 error_index) != 48);
8717 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8718 num_sg_descriptors) != 50);
8719 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8721 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8723 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8724 sg_descriptors) != 64);
8725 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8726 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8728 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8729 header.iu_type) != 0);
8730 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8731 header.iu_length) != 2);
8732 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8734 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8735 error_index) != 10);
8737 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8738 header.iu_type) != 0);
8739 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8740 header.iu_length) != 2);
8741 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8742 header.response_queue_id) != 4);
8743 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8745 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8746 data.report_event_configuration.buffer_length) != 12);
8747 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8748 data.report_event_configuration.sg_descriptors) != 16);
8749 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8750 data.set_event_configuration.global_event_oq_id) != 10);
8751 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8752 data.set_event_configuration.buffer_length) != 12);
8753 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8754 data.set_event_configuration.sg_descriptors) != 16);
8756 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8757 max_inbound_iu_length) != 6);
8758 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8759 max_outbound_iu_length) != 14);
8760 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8762 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8764 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8765 iq_arbitration_priority_support_bitmask) != 8);
8766 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8767 maximum_aw_a) != 9);
8768 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8769 maximum_aw_b) != 10);
8770 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8771 maximum_aw_c) != 11);
8772 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8773 max_inbound_queues) != 16);
8774 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8775 max_elements_per_iq) != 18);
8776 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8777 max_iq_element_length) != 24);
8778 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8779 min_iq_element_length) != 26);
8780 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8781 max_outbound_queues) != 30);
8782 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8783 max_elements_per_oq) != 32);
8784 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8785 intr_coalescing_time_granularity) != 34);
8786 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8787 max_oq_element_length) != 36);
8788 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8789 min_oq_element_length) != 38);
8790 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8791 iu_layer_descriptors) != 64);
8792 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8794 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8796 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8798 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8800 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8801 num_event_descriptors) != 2);
8802 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8805 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8806 ARRAY_SIZE(pqi_supported_event_types));
8808 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8809 header.iu_type) != 0);
8810 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8811 header.iu_length) != 2);
8812 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8814 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8816 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8817 additional_event_id) != 12);
8818 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8820 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8822 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8823 header.iu_type) != 0);
8824 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8825 header.iu_length) != 2);
8826 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8828 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8830 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8831 additional_event_id) != 12);
8832 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8834 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8835 header.iu_type) != 0);
8836 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8837 header.iu_length) != 2);
8838 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8840 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8842 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8844 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8845 protocol_specific) != 24);
8846 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8847 outbound_queue_id_to_manage) != 26);
8848 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8849 request_id_to_manage) != 28);
8850 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8851 task_management_function) != 30);
8852 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8854 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8855 header.iu_type) != 0);
8856 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8857 header.iu_length) != 2);
8858 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8860 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8862 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8863 additional_response_info) != 12);
8864 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8865 response_code) != 15);
8866 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8868 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8869 configured_logical_drive_count) != 0);
8870 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8871 configuration_signature) != 1);
8872 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8873 firmware_version) != 5);
8874 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8875 extended_logical_unit_count) != 154);
8876 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8877 firmware_build_number) != 190);
8878 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8879 controller_mode) != 292);
8881 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8882 phys_bay_in_box) != 115);
8883 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8884 device_type) != 120);
8885 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8886 redundant_path_present_map) != 1736);
8887 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8888 active_path_number) != 1738);
8889 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8890 alternate_paths_phys_connector) != 1739);
8891 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8892 alternate_paths_phys_box_on_port) != 1755);
8893 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8894 current_queue_depth_limit) != 1796);
8895 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8897 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8898 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8899 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8900 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8901 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8902 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8903 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8904 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8905 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8906 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8907 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8908 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8910 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
8911 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8912 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);