]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/smartpqi/smartpqi_init.c
scsi: ufs: Check that space was properly alloced in copy_query_response
[linux.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    driver for Microsemi PQI-based storage controllers
4  *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to storagedev@microchip.com
9  *
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION          "1.2.6-015"
37 #define DRIVER_MAJOR            1
38 #define DRIVER_MINOR            2
39 #define DRIVER_RELEASE          6
40 #define DRIVER_REVISION         15
41
42 #define DRIVER_NAME             "Microsemi PQI Driver (v" \
43                                 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT       "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY    (12 * sizeof(struct pqi_sg_descriptor))
47
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
50         DRIVER_VERSION);
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION);
53 MODULE_LICENSE("GPL");
54
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
56 static void pqi_ctrl_offline_worker(struct work_struct *work);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59 static void pqi_scan_start(struct Scsi_Host *shost);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61         struct pqi_queue_group *queue_group, enum pqi_io_path path,
62         struct pqi_io_request *io_request);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64         struct pqi_iu_header *request, unsigned int flags,
65         struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68         unsigned int cdb_length, struct pqi_queue_group *queue_group,
69         struct pqi_encryption_info *encryption_info, bool raid_bypass);
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
74         u32 bytes_requested);
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78         struct pqi_scsi_dev *device, unsigned long timeout_secs);
79
80 /* for flags argument to pqi_submit_raid_request_synchronous() */
81 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
82
83 static struct scsi_transport_template *pqi_sas_transport_template;
84
85 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
86
87 enum pqi_lockup_action {
88         NONE,
89         REBOOT,
90         PANIC
91 };
92
93 static enum pqi_lockup_action pqi_lockup_action = NONE;
94
95 static struct {
96         enum pqi_lockup_action  action;
97         char                    *name;
98 } pqi_lockup_actions[] = {
99         {
100                 .action = NONE,
101                 .name = "none",
102         },
103         {
104                 .action = REBOOT,
105                 .name = "reboot",
106         },
107         {
108                 .action = PANIC,
109                 .name = "panic",
110         },
111 };
112
113 static unsigned int pqi_supported_event_types[] = {
114         PQI_EVENT_TYPE_HOTPLUG,
115         PQI_EVENT_TYPE_HARDWARE,
116         PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117         PQI_EVENT_TYPE_LOGICAL_DEVICE,
118         PQI_EVENT_TYPE_OFA,
119         PQI_EVENT_TYPE_AIO_STATE_CHANGE,
120         PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
121 };
122
123 static int pqi_disable_device_id_wildcards;
124 module_param_named(disable_device_id_wildcards,
125         pqi_disable_device_id_wildcards, int, 0644);
126 MODULE_PARM_DESC(disable_device_id_wildcards,
127         "Disable device ID wildcards.");
128
129 static int pqi_disable_heartbeat;
130 module_param_named(disable_heartbeat,
131         pqi_disable_heartbeat, int, 0644);
132 MODULE_PARM_DESC(disable_heartbeat,
133         "Disable heartbeat.");
134
135 static int pqi_disable_ctrl_shutdown;
136 module_param_named(disable_ctrl_shutdown,
137         pqi_disable_ctrl_shutdown, int, 0644);
138 MODULE_PARM_DESC(disable_ctrl_shutdown,
139         "Disable controller shutdown when controller locked up.");
140
141 static char *pqi_lockup_action_param;
142 module_param_named(lockup_action,
143         pqi_lockup_action_param, charp, 0644);
144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145         "\t\tSupported: none, reboot, panic\n"
146         "\t\tDefault: none");
147
148 static char *raid_levels[] = {
149         "RAID-0",
150         "RAID-4",
151         "RAID-1(1+0)",
152         "RAID-5",
153         "RAID-5+1",
154         "RAID-ADG",
155         "RAID-1(ADM)",
156 };
157
158 static char *pqi_raid_level_to_string(u8 raid_level)
159 {
160         if (raid_level < ARRAY_SIZE(raid_levels))
161                 return raid_levels[raid_level];
162
163         return "RAID UNKNOWN";
164 }
165
166 #define SA_RAID_0               0
167 #define SA_RAID_4               1
168 #define SA_RAID_1               2       /* also used for RAID 10 */
169 #define SA_RAID_5               3       /* also used for RAID 50 */
170 #define SA_RAID_51              4
171 #define SA_RAID_6               5       /* also used for RAID 60 */
172 #define SA_RAID_ADM             6       /* also used for RAID 1+0 ADM */
173 #define SA_RAID_MAX             SA_RAID_ADM
174 #define SA_RAID_UNKNOWN         0xff
175
176 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
177 {
178         pqi_prep_for_scsi_done(scmd);
179         scmd->scsi_done(scmd);
180 }
181
182 static inline void pqi_disable_write_same(struct scsi_device *sdev)
183 {
184         sdev->no_write_same = 1;
185 }
186
187 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
188 {
189         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
190 }
191
192 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
193 {
194         return !device->is_physical_device;
195 }
196
197 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
198 {
199         return scsi3addr[2] != 0;
200 }
201
202 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
203 {
204         if (ctrl_info->controller_online)
205                 if (!sis_is_firmware_running(ctrl_info))
206                         pqi_take_ctrl_offline(ctrl_info);
207 }
208
209 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
210 {
211         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
212 }
213
214 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
215         struct pqi_ctrl_info *ctrl_info)
216 {
217         return sis_read_driver_scratch(ctrl_info);
218 }
219
220 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
221         enum pqi_ctrl_mode mode)
222 {
223         sis_write_driver_scratch(ctrl_info, mode);
224 }
225
226 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
227 {
228         ctrl_info->block_requests = true;
229         scsi_block_requests(ctrl_info->scsi_host);
230 }
231
232 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
233 {
234         ctrl_info->block_requests = false;
235         wake_up_all(&ctrl_info->block_requests_wait);
236         pqi_retry_raid_bypass_requests(ctrl_info);
237         scsi_unblock_requests(ctrl_info->scsi_host);
238 }
239
240 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
241         unsigned long timeout_msecs)
242 {
243         unsigned long remaining_msecs;
244
245         if (!pqi_ctrl_blocked(ctrl_info))
246                 return timeout_msecs;
247
248         atomic_inc(&ctrl_info->num_blocked_threads);
249
250         if (timeout_msecs == NO_TIMEOUT) {
251                 wait_event(ctrl_info->block_requests_wait,
252                         !pqi_ctrl_blocked(ctrl_info));
253                 remaining_msecs = timeout_msecs;
254         } else {
255                 unsigned long remaining_jiffies;
256
257                 remaining_jiffies =
258                         wait_event_timeout(ctrl_info->block_requests_wait,
259                                 !pqi_ctrl_blocked(ctrl_info),
260                                 msecs_to_jiffies(timeout_msecs));
261                 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
262         }
263
264         atomic_dec(&ctrl_info->num_blocked_threads);
265
266         return remaining_msecs;
267 }
268
269 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
270 {
271         while (atomic_read(&ctrl_info->num_busy_threads) >
272                 atomic_read(&ctrl_info->num_blocked_threads))
273                 usleep_range(1000, 2000);
274 }
275
276 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
277 {
278         return device->device_offline;
279 }
280
281 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
282 {
283         device->in_reset = true;
284 }
285
286 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
287 {
288         device->in_reset = false;
289 }
290
291 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
292 {
293         return device->in_reset;
294 }
295
296 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
297 {
298         ctrl_info->in_ofa = true;
299 }
300
301 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
302 {
303         ctrl_info->in_ofa = false;
304 }
305
306 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
307 {
308         return ctrl_info->in_ofa;
309 }
310
311 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
312 {
313         device->in_remove = true;
314 }
315
316 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
317                                         struct pqi_scsi_dev *device)
318 {
319         return device->in_remove && !ctrl_info->in_shutdown;
320 }
321
322 static inline void pqi_schedule_rescan_worker_with_delay(
323         struct pqi_ctrl_info *ctrl_info, unsigned long delay)
324 {
325         if (pqi_ctrl_offline(ctrl_info))
326                 return;
327         if (pqi_ctrl_in_ofa(ctrl_info))
328                 return;
329
330         schedule_delayed_work(&ctrl_info->rescan_work, delay);
331 }
332
333 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
334 {
335         pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
336 }
337
338 #define PQI_RESCAN_WORK_DELAY   (10 * PQI_HZ)
339
340 static inline void pqi_schedule_rescan_worker_delayed(
341         struct pqi_ctrl_info *ctrl_info)
342 {
343         pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
344 }
345
346 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
347 {
348         cancel_delayed_work_sync(&ctrl_info->rescan_work);
349 }
350
351 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
352 {
353         if (!ctrl_info->heartbeat_counter)
354                 return 0;
355
356         return readl(ctrl_info->heartbeat_counter);
357 }
358
359 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
360 {
361         if (!ctrl_info->soft_reset_status)
362                 return 0;
363
364         return readb(ctrl_info->soft_reset_status);
365 }
366
367 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
368                                                 u8 clear)
369 {
370         u8 status;
371
372         if (!ctrl_info->soft_reset_status)
373                 return;
374
375         status = pqi_read_soft_reset_status(ctrl_info);
376         status &= ~clear;
377         writeb(status, ctrl_info->soft_reset_status);
378 }
379
380 static int pqi_map_single(struct pci_dev *pci_dev,
381         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
382         size_t buffer_length, enum dma_data_direction data_direction)
383 {
384         dma_addr_t bus_address;
385
386         if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
387                 return 0;
388
389         bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
390                 data_direction);
391         if (dma_mapping_error(&pci_dev->dev, bus_address))
392                 return -ENOMEM;
393
394         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
395         put_unaligned_le32(buffer_length, &sg_descriptor->length);
396         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
397
398         return 0;
399 }
400
401 static void pqi_pci_unmap(struct pci_dev *pci_dev,
402         struct pqi_sg_descriptor *descriptors, int num_descriptors,
403         enum dma_data_direction data_direction)
404 {
405         int i;
406
407         if (data_direction == DMA_NONE)
408                 return;
409
410         for (i = 0; i < num_descriptors; i++)
411                 dma_unmap_single(&pci_dev->dev,
412                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
413                         get_unaligned_le32(&descriptors[i].length),
414                         data_direction);
415 }
416
417 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
418         struct pqi_raid_path_request *request, u8 cmd,
419         u8 *scsi3addr, void *buffer, size_t buffer_length,
420         u16 vpd_page, enum dma_data_direction *dir)
421 {
422         u8 *cdb;
423         size_t cdb_length = buffer_length;
424
425         memset(request, 0, sizeof(*request));
426
427         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
428         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
429                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
430                 &request->header.iu_length);
431         put_unaligned_le32(buffer_length, &request->buffer_length);
432         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
433         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
434         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
435
436         cdb = request->cdb;
437
438         switch (cmd) {
439         case INQUIRY:
440                 request->data_direction = SOP_READ_FLAG;
441                 cdb[0] = INQUIRY;
442                 if (vpd_page & VPD_PAGE) {
443                         cdb[1] = 0x1;
444                         cdb[2] = (u8)vpd_page;
445                 }
446                 cdb[4] = (u8)cdb_length;
447                 break;
448         case CISS_REPORT_LOG:
449         case CISS_REPORT_PHYS:
450                 request->data_direction = SOP_READ_FLAG;
451                 cdb[0] = cmd;
452                 if (cmd == CISS_REPORT_PHYS)
453                         cdb[1] = CISS_REPORT_PHYS_EXTENDED;
454                 else
455                         cdb[1] = CISS_REPORT_LOG_EXTENDED;
456                 put_unaligned_be32(cdb_length, &cdb[6]);
457                 break;
458         case CISS_GET_RAID_MAP:
459                 request->data_direction = SOP_READ_FLAG;
460                 cdb[0] = CISS_READ;
461                 cdb[1] = CISS_GET_RAID_MAP;
462                 put_unaligned_be32(cdb_length, &cdb[6]);
463                 break;
464         case SA_FLUSH_CACHE:
465                 request->data_direction = SOP_WRITE_FLAG;
466                 cdb[0] = BMIC_WRITE;
467                 cdb[6] = BMIC_FLUSH_CACHE;
468                 put_unaligned_be16(cdb_length, &cdb[7]);
469                 break;
470         case BMIC_SENSE_DIAG_OPTIONS:
471                 cdb_length = 0;
472                 /* fall through */
473         case BMIC_IDENTIFY_CONTROLLER:
474         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
475                 request->data_direction = SOP_READ_FLAG;
476                 cdb[0] = BMIC_READ;
477                 cdb[6] = cmd;
478                 put_unaligned_be16(cdb_length, &cdb[7]);
479                 break;
480         case BMIC_SET_DIAG_OPTIONS:
481                 cdb_length = 0;
482                 /* fall through */
483         case BMIC_WRITE_HOST_WELLNESS:
484                 request->data_direction = SOP_WRITE_FLAG;
485                 cdb[0] = BMIC_WRITE;
486                 cdb[6] = cmd;
487                 put_unaligned_be16(cdb_length, &cdb[7]);
488                 break;
489         case BMIC_CSMI_PASSTHRU:
490                 request->data_direction = SOP_BIDIRECTIONAL;
491                 cdb[0] = BMIC_WRITE;
492                 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
493                 cdb[6] = cmd;
494                 put_unaligned_be16(cdb_length, &cdb[7]);
495                 break;
496         default:
497                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
498                         cmd);
499                 break;
500         }
501
502         switch (request->data_direction) {
503         case SOP_READ_FLAG:
504                 *dir = DMA_FROM_DEVICE;
505                 break;
506         case SOP_WRITE_FLAG:
507                 *dir = DMA_TO_DEVICE;
508                 break;
509         case SOP_NO_DIRECTION_FLAG:
510                 *dir = DMA_NONE;
511                 break;
512         default:
513                 *dir = DMA_BIDIRECTIONAL;
514                 break;
515         }
516
517         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
518                 buffer, buffer_length, *dir);
519 }
520
521 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
522 {
523         io_request->scmd = NULL;
524         io_request->status = 0;
525         io_request->error_info = NULL;
526         io_request->raid_bypass = false;
527 }
528
529 static struct pqi_io_request *pqi_alloc_io_request(
530         struct pqi_ctrl_info *ctrl_info)
531 {
532         struct pqi_io_request *io_request;
533         u16 i = ctrl_info->next_io_request_slot;        /* benignly racy */
534
535         while (1) {
536                 io_request = &ctrl_info->io_request_pool[i];
537                 if (atomic_inc_return(&io_request->refcount) == 1)
538                         break;
539                 atomic_dec(&io_request->refcount);
540                 i = (i + 1) % ctrl_info->max_io_slots;
541         }
542
543         /* benignly racy */
544         ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
545
546         pqi_reinit_io_request(io_request);
547
548         return io_request;
549 }
550
551 static void pqi_free_io_request(struct pqi_io_request *io_request)
552 {
553         atomic_dec(&io_request->refcount);
554 }
555
556 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
557                 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
558                 struct pqi_raid_error_info *error_info,
559                 unsigned long timeout_msecs)
560 {
561         int rc;
562         enum dma_data_direction dir;
563         struct pqi_raid_path_request request;
564
565         rc = pqi_build_raid_path_request(ctrl_info, &request,
566                 cmd, scsi3addr, buffer,
567                 buffer_length, vpd_page, &dir);
568         if (rc)
569                 return rc;
570
571         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
572                  0, error_info, timeout_msecs);
573
574         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
575         return rc;
576 }
577
578 /* Helper functions for pqi_send_scsi_raid_request */
579
580 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
581                 u8 cmd, void *buffer, size_t buffer_length)
582 {
583         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
584                         buffer, buffer_length, 0, NULL, NO_TIMEOUT);
585 }
586
587 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
588                 u8 cmd, void *buffer, size_t buffer_length,
589                 struct pqi_raid_error_info *error_info)
590 {
591         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
592                         buffer, buffer_length, 0, error_info, NO_TIMEOUT);
593 }
594
595
596 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
597                 struct bmic_identify_controller *buffer)
598 {
599         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
600                         buffer, sizeof(*buffer));
601 }
602
603 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
604         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
605 {
606         return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
607                 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
608 }
609
610 static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
611         u8 *scsi3addr, u16 vpd_page)
612 {
613         int rc;
614         int i;
615         int pages;
616         unsigned char *buf, bufsize;
617
618         buf = kzalloc(256, GFP_KERNEL);
619         if (!buf)
620                 return false;
621
622         /* Get the size of the page list first */
623         rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
624                                 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
625                                 buf, SCSI_VPD_HEADER_SZ);
626         if (rc != 0)
627                 goto exit_unsupported;
628
629         pages = buf[3];
630         if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
631                 bufsize = pages + SCSI_VPD_HEADER_SZ;
632         else
633                 bufsize = 255;
634
635         /* Get the whole VPD page list */
636         rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
637                                 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
638                                 buf, bufsize);
639         if (rc != 0)
640                 goto exit_unsupported;
641
642         pages = buf[3];
643         for (i = 1; i <= pages; i++)
644                 if (buf[3 + i] == vpd_page)
645                         goto exit_supported;
646
647 exit_unsupported:
648         kfree(buf);
649         return false;
650
651 exit_supported:
652         kfree(buf);
653         return true;
654 }
655
656 static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
657         u8 *scsi3addr, u8 *device_id, int buflen)
658 {
659         int rc;
660         unsigned char *buf;
661
662         if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
663                 return 1; /* function not supported */
664
665         buf = kzalloc(64, GFP_KERNEL);
666         if (!buf)
667                 return -ENOMEM;
668
669         rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
670                                 VPD_PAGE | SCSI_VPD_DEVICE_ID,
671                                 buf, 64);
672         if (rc == 0) {
673                 if (buflen > 16)
674                         buflen = 16;
675                 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
676         }
677
678         kfree(buf);
679
680         return rc;
681 }
682
683 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
684         struct pqi_scsi_dev *device,
685         struct bmic_identify_physical_device *buffer,
686         size_t buffer_length)
687 {
688         int rc;
689         enum dma_data_direction dir;
690         u16 bmic_device_index;
691         struct pqi_raid_path_request request;
692
693         rc = pqi_build_raid_path_request(ctrl_info, &request,
694                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
695                 buffer_length, 0, &dir);
696         if (rc)
697                 return rc;
698
699         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
700         request.cdb[2] = (u8)bmic_device_index;
701         request.cdb[9] = (u8)(bmic_device_index >> 8);
702
703         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
704                 0, NULL, NO_TIMEOUT);
705
706         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
707         return rc;
708 }
709
710 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
711         enum bmic_flush_cache_shutdown_event shutdown_event)
712 {
713         int rc;
714         struct bmic_flush_cache *flush_cache;
715
716         /*
717          * Don't bother trying to flush the cache if the controller is
718          * locked up.
719          */
720         if (pqi_ctrl_offline(ctrl_info))
721                 return -ENXIO;
722
723         flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
724         if (!flush_cache)
725                 return -ENOMEM;
726
727         flush_cache->shutdown_event = shutdown_event;
728
729         rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
730                 sizeof(*flush_cache));
731
732         kfree(flush_cache);
733
734         return rc;
735 }
736
737 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
738         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
739         struct pqi_raid_error_info *error_info)
740 {
741         return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
742                 buffer, buffer_length, error_info);
743 }
744
745 #define PQI_FETCH_PTRAID_DATA (1UL<<31)
746
747 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
748 {
749         int rc;
750         struct bmic_diag_options *diag;
751
752         diag = kzalloc(sizeof(*diag), GFP_KERNEL);
753         if (!diag)
754                 return -ENOMEM;
755
756         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
757                                         diag, sizeof(*diag));
758         if (rc)
759                 goto out;
760
761         diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
762
763         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
764                                         diag, sizeof(*diag));
765 out:
766         kfree(diag);
767
768         return rc;
769 }
770
771 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
772         void *buffer, size_t buffer_length)
773 {
774         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
775                                         buffer, buffer_length);
776 }
777
778 #pragma pack(1)
779
780 struct bmic_host_wellness_driver_version {
781         u8      start_tag[4];
782         u8      driver_version_tag[2];
783         __le16  driver_version_length;
784         char    driver_version[32];
785         u8      dont_write_tag[2];
786         u8      end_tag[2];
787 };
788
789 #pragma pack()
790
791 static int pqi_write_driver_version_to_host_wellness(
792         struct pqi_ctrl_info *ctrl_info)
793 {
794         int rc;
795         struct bmic_host_wellness_driver_version *buffer;
796         size_t buffer_length;
797
798         buffer_length = sizeof(*buffer);
799
800         buffer = kmalloc(buffer_length, GFP_KERNEL);
801         if (!buffer)
802                 return -ENOMEM;
803
804         buffer->start_tag[0] = '<';
805         buffer->start_tag[1] = 'H';
806         buffer->start_tag[2] = 'W';
807         buffer->start_tag[3] = '>';
808         buffer->driver_version_tag[0] = 'D';
809         buffer->driver_version_tag[1] = 'V';
810         put_unaligned_le16(sizeof(buffer->driver_version),
811                 &buffer->driver_version_length);
812         strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
813                 sizeof(buffer->driver_version) - 1);
814         buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
815         buffer->dont_write_tag[0] = 'D';
816         buffer->dont_write_tag[1] = 'W';
817         buffer->end_tag[0] = 'Z';
818         buffer->end_tag[1] = 'Z';
819
820         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
821
822         kfree(buffer);
823
824         return rc;
825 }
826
827 #pragma pack(1)
828
829 struct bmic_host_wellness_time {
830         u8      start_tag[4];
831         u8      time_tag[2];
832         __le16  time_length;
833         u8      time[8];
834         u8      dont_write_tag[2];
835         u8      end_tag[2];
836 };
837
838 #pragma pack()
839
840 static int pqi_write_current_time_to_host_wellness(
841         struct pqi_ctrl_info *ctrl_info)
842 {
843         int rc;
844         struct bmic_host_wellness_time *buffer;
845         size_t buffer_length;
846         time64_t local_time;
847         unsigned int year;
848         struct tm tm;
849
850         buffer_length = sizeof(*buffer);
851
852         buffer = kmalloc(buffer_length, GFP_KERNEL);
853         if (!buffer)
854                 return -ENOMEM;
855
856         buffer->start_tag[0] = '<';
857         buffer->start_tag[1] = 'H';
858         buffer->start_tag[2] = 'W';
859         buffer->start_tag[3] = '>';
860         buffer->time_tag[0] = 'T';
861         buffer->time_tag[1] = 'D';
862         put_unaligned_le16(sizeof(buffer->time),
863                 &buffer->time_length);
864
865         local_time = ktime_get_real_seconds();
866         time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
867         year = tm.tm_year + 1900;
868
869         buffer->time[0] = bin2bcd(tm.tm_hour);
870         buffer->time[1] = bin2bcd(tm.tm_min);
871         buffer->time[2] = bin2bcd(tm.tm_sec);
872         buffer->time[3] = 0;
873         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
874         buffer->time[5] = bin2bcd(tm.tm_mday);
875         buffer->time[6] = bin2bcd(year / 100);
876         buffer->time[7] = bin2bcd(year % 100);
877
878         buffer->dont_write_tag[0] = 'D';
879         buffer->dont_write_tag[1] = 'W';
880         buffer->end_tag[0] = 'Z';
881         buffer->end_tag[1] = 'Z';
882
883         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
884
885         kfree(buffer);
886
887         return rc;
888 }
889
890 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * PQI_HZ)
891
892 static void pqi_update_time_worker(struct work_struct *work)
893 {
894         int rc;
895         struct pqi_ctrl_info *ctrl_info;
896
897         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
898                 update_time_work);
899
900         if (pqi_ctrl_offline(ctrl_info))
901                 return;
902
903         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
904         if (rc)
905                 dev_warn(&ctrl_info->pci_dev->dev,
906                         "error updating time on controller\n");
907
908         schedule_delayed_work(&ctrl_info->update_time_work,
909                 PQI_UPDATE_TIME_WORK_INTERVAL);
910 }
911
912 static inline void pqi_schedule_update_time_worker(
913         struct pqi_ctrl_info *ctrl_info)
914 {
915         schedule_delayed_work(&ctrl_info->update_time_work, 0);
916 }
917
918 static inline void pqi_cancel_update_time_worker(
919         struct pqi_ctrl_info *ctrl_info)
920 {
921         cancel_delayed_work_sync(&ctrl_info->update_time_work);
922 }
923
924 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
925         void *buffer, size_t buffer_length)
926 {
927         return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
928                                         buffer_length);
929 }
930
931 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
932         void **buffer)
933 {
934         int rc;
935         size_t lun_list_length;
936         size_t lun_data_length;
937         size_t new_lun_list_length;
938         void *lun_data = NULL;
939         struct report_lun_header *report_lun_header;
940
941         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
942         if (!report_lun_header) {
943                 rc = -ENOMEM;
944                 goto out;
945         }
946
947         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
948                 sizeof(*report_lun_header));
949         if (rc)
950                 goto out;
951
952         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
953
954 again:
955         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
956
957         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
958         if (!lun_data) {
959                 rc = -ENOMEM;
960                 goto out;
961         }
962
963         if (lun_list_length == 0) {
964                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
965                 goto out;
966         }
967
968         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
969         if (rc)
970                 goto out;
971
972         new_lun_list_length = get_unaligned_be32(
973                 &((struct report_lun_header *)lun_data)->list_length);
974
975         if (new_lun_list_length > lun_list_length) {
976                 lun_list_length = new_lun_list_length;
977                 kfree(lun_data);
978                 goto again;
979         }
980
981 out:
982         kfree(report_lun_header);
983
984         if (rc) {
985                 kfree(lun_data);
986                 lun_data = NULL;
987         }
988
989         *buffer = lun_data;
990
991         return rc;
992 }
993
994 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
995         void **buffer)
996 {
997         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
998                 buffer);
999 }
1000
1001 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
1002         void **buffer)
1003 {
1004         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1005 }
1006
1007 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1008         struct report_phys_lun_extended **physdev_list,
1009         struct report_log_lun_extended **logdev_list)
1010 {
1011         int rc;
1012         size_t logdev_list_length;
1013         size_t logdev_data_length;
1014         struct report_log_lun_extended *internal_logdev_list;
1015         struct report_log_lun_extended *logdev_data;
1016         struct report_lun_header report_lun_header;
1017
1018         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1019         if (rc)
1020                 dev_err(&ctrl_info->pci_dev->dev,
1021                         "report physical LUNs failed\n");
1022
1023         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1024         if (rc)
1025                 dev_err(&ctrl_info->pci_dev->dev,
1026                         "report logical LUNs failed\n");
1027
1028         /*
1029          * Tack the controller itself onto the end of the logical device list.
1030          */
1031
1032         logdev_data = *logdev_list;
1033
1034         if (logdev_data) {
1035                 logdev_list_length =
1036                         get_unaligned_be32(&logdev_data->header.list_length);
1037         } else {
1038                 memset(&report_lun_header, 0, sizeof(report_lun_header));
1039                 logdev_data =
1040                         (struct report_log_lun_extended *)&report_lun_header;
1041                 logdev_list_length = 0;
1042         }
1043
1044         logdev_data_length = sizeof(struct report_lun_header) +
1045                 logdev_list_length;
1046
1047         internal_logdev_list = kmalloc(logdev_data_length +
1048                 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1049         if (!internal_logdev_list) {
1050                 kfree(*logdev_list);
1051                 *logdev_list = NULL;
1052                 return -ENOMEM;
1053         }
1054
1055         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1056         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1057                 sizeof(struct report_log_lun_extended_entry));
1058         put_unaligned_be32(logdev_list_length +
1059                 sizeof(struct report_log_lun_extended_entry),
1060                 &internal_logdev_list->header.list_length);
1061
1062         kfree(*logdev_list);
1063         *logdev_list = internal_logdev_list;
1064
1065         return 0;
1066 }
1067
1068 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1069         int bus, int target, int lun)
1070 {
1071         device->bus = bus;
1072         device->target = target;
1073         device->lun = lun;
1074 }
1075
1076 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1077 {
1078         u8 *scsi3addr;
1079         u32 lunid;
1080         int bus;
1081         int target;
1082         int lun;
1083
1084         scsi3addr = device->scsi3addr;
1085         lunid = get_unaligned_le32(scsi3addr);
1086
1087         if (pqi_is_hba_lunid(scsi3addr)) {
1088                 /* The specified device is the controller. */
1089                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1090                 device->target_lun_valid = true;
1091                 return;
1092         }
1093
1094         if (pqi_is_logical_device(device)) {
1095                 if (device->is_external_raid_device) {
1096                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1097                         target = (lunid >> 16) & 0x3fff;
1098                         lun = lunid & 0xff;
1099                 } else {
1100                         bus = PQI_RAID_VOLUME_BUS;
1101                         target = 0;
1102                         lun = lunid & 0x3fff;
1103                 }
1104                 pqi_set_bus_target_lun(device, bus, target, lun);
1105                 device->target_lun_valid = true;
1106                 return;
1107         }
1108
1109         /*
1110          * Defer target and LUN assignment for non-controller physical devices
1111          * because the SAS transport layer will make these assignments later.
1112          */
1113         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1114 }
1115
1116 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1117         struct pqi_scsi_dev *device)
1118 {
1119         int rc;
1120         u8 raid_level;
1121         u8 *buffer;
1122
1123         raid_level = SA_RAID_UNKNOWN;
1124
1125         buffer = kmalloc(64, GFP_KERNEL);
1126         if (buffer) {
1127                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1128                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1129                 if (rc == 0) {
1130                         raid_level = buffer[8];
1131                         if (raid_level > SA_RAID_MAX)
1132                                 raid_level = SA_RAID_UNKNOWN;
1133                 }
1134                 kfree(buffer);
1135         }
1136
1137         device->raid_level = raid_level;
1138 }
1139
1140 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1141         struct pqi_scsi_dev *device, struct raid_map *raid_map)
1142 {
1143         char *err_msg;
1144         u32 raid_map_size;
1145         u32 r5or6_blocks_per_row;
1146
1147         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1148
1149         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1150                 err_msg = "RAID map too small";
1151                 goto bad_raid_map;
1152         }
1153
1154         if (device->raid_level == SA_RAID_1) {
1155                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1156                         err_msg = "invalid RAID-1 map";
1157                         goto bad_raid_map;
1158                 }
1159         } else if (device->raid_level == SA_RAID_ADM) {
1160                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1161                         err_msg = "invalid RAID-1(ADM) map";
1162                         goto bad_raid_map;
1163                 }
1164         } else if ((device->raid_level == SA_RAID_5 ||
1165                 device->raid_level == SA_RAID_6) &&
1166                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1167                 /* RAID 50/60 */
1168                 r5or6_blocks_per_row =
1169                         get_unaligned_le16(&raid_map->strip_size) *
1170                         get_unaligned_le16(&raid_map->data_disks_per_row);
1171                 if (r5or6_blocks_per_row == 0) {
1172                         err_msg = "invalid RAID-5 or RAID-6 map";
1173                         goto bad_raid_map;
1174                 }
1175         }
1176
1177         return 0;
1178
1179 bad_raid_map:
1180         dev_warn(&ctrl_info->pci_dev->dev,
1181                 "logical device %08x%08x %s\n",
1182                 *((u32 *)&device->scsi3addr),
1183                 *((u32 *)&device->scsi3addr[4]), err_msg);
1184
1185         return -EINVAL;
1186 }
1187
1188 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1189         struct pqi_scsi_dev *device)
1190 {
1191         int rc;
1192         u32 raid_map_size;
1193         struct raid_map *raid_map;
1194
1195         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1196         if (!raid_map)
1197                 return -ENOMEM;
1198
1199         rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1200                 device->scsi3addr, raid_map, sizeof(*raid_map),
1201                 0, NULL, NO_TIMEOUT);
1202
1203         if (rc)
1204                 goto error;
1205
1206         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1207
1208         if (raid_map_size > sizeof(*raid_map)) {
1209
1210                 kfree(raid_map);
1211
1212                 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1213                 if (!raid_map)
1214                         return -ENOMEM;
1215
1216                 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1217                         device->scsi3addr, raid_map, raid_map_size,
1218                         0, NULL, NO_TIMEOUT);
1219                 if (rc)
1220                         goto error;
1221
1222                 if (get_unaligned_le32(&raid_map->structure_size)
1223                         != raid_map_size) {
1224                         dev_warn(&ctrl_info->pci_dev->dev,
1225                                 "Requested %d bytes, received %d bytes",
1226                                 raid_map_size,
1227                                 get_unaligned_le32(&raid_map->structure_size));
1228                         goto error;
1229                 }
1230         }
1231
1232         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1233         if (rc)
1234                 goto error;
1235
1236         device->raid_map = raid_map;
1237
1238         return 0;
1239
1240 error:
1241         kfree(raid_map);
1242
1243         return rc;
1244 }
1245
1246 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1247         struct pqi_scsi_dev *device)
1248 {
1249         int rc;
1250         u8 *buffer;
1251         u8 bypass_status;
1252
1253         buffer = kmalloc(64, GFP_KERNEL);
1254         if (!buffer)
1255                 return;
1256
1257         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1258                 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1259         if (rc)
1260                 goto out;
1261
1262 #define RAID_BYPASS_STATUS      4
1263 #define RAID_BYPASS_CONFIGURED  0x1
1264 #define RAID_BYPASS_ENABLED     0x2
1265
1266         bypass_status = buffer[RAID_BYPASS_STATUS];
1267         device->raid_bypass_configured =
1268                 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1269         if (device->raid_bypass_configured &&
1270                 (bypass_status & RAID_BYPASS_ENABLED) &&
1271                 pqi_get_raid_map(ctrl_info, device) == 0)
1272                 device->raid_bypass_enabled = true;
1273
1274 out:
1275         kfree(buffer);
1276 }
1277
1278 /*
1279  * Use vendor-specific VPD to determine online/offline status of a volume.
1280  */
1281
1282 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1283         struct pqi_scsi_dev *device)
1284 {
1285         int rc;
1286         size_t page_length;
1287         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1288         bool volume_offline = true;
1289         u32 volume_flags;
1290         struct ciss_vpd_logical_volume_status *vpd;
1291
1292         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1293         if (!vpd)
1294                 goto no_buffer;
1295
1296         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1297                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1298         if (rc)
1299                 goto out;
1300
1301         if (vpd->page_code != CISS_VPD_LV_STATUS)
1302                 goto out;
1303
1304         page_length = offsetof(struct ciss_vpd_logical_volume_status,
1305                 volume_status) + vpd->page_length;
1306         if (page_length < sizeof(*vpd))
1307                 goto out;
1308
1309         volume_status = vpd->volume_status;
1310         volume_flags = get_unaligned_be32(&vpd->flags);
1311         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1312
1313 out:
1314         kfree(vpd);
1315 no_buffer:
1316         device->volume_status = volume_status;
1317         device->volume_offline = volume_offline;
1318 }
1319
1320 #define PQI_INQUIRY_PAGE0_RETRIES       3
1321
1322 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1323         struct pqi_scsi_dev *device)
1324 {
1325         int rc;
1326         u8 *buffer;
1327         unsigned int retries;
1328
1329         if (device->is_expander_smp_device)
1330                 return 0;
1331
1332         buffer = kmalloc(64, GFP_KERNEL);
1333         if (!buffer)
1334                 return -ENOMEM;
1335
1336         /* Send an inquiry to the device to see what it is. */
1337         for (retries = 0;;) {
1338                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1339                         buffer, 64);
1340                 if (rc == 0)
1341                         break;
1342                 if (pqi_is_logical_device(device) ||
1343                         rc != PQI_CMD_STATUS_ABORTED ||
1344                         ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1345                         goto out;
1346         }
1347
1348         scsi_sanitize_inquiry_string(&buffer[8], 8);
1349         scsi_sanitize_inquiry_string(&buffer[16], 16);
1350
1351         device->devtype = buffer[0] & 0x1f;
1352         memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1353         memcpy(device->model, &buffer[16], sizeof(device->model));
1354
1355         if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1356                 if (device->is_external_raid_device) {
1357                         device->raid_level = SA_RAID_UNKNOWN;
1358                         device->volume_status = CISS_LV_OK;
1359                         device->volume_offline = false;
1360                 } else {
1361                         pqi_get_raid_level(ctrl_info, device);
1362                         pqi_get_raid_bypass_status(ctrl_info, device);
1363                         pqi_get_volume_status(ctrl_info, device);
1364                 }
1365         }
1366
1367         if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1368                 device->unique_id, sizeof(device->unique_id)) < 0)
1369                 dev_warn(&ctrl_info->pci_dev->dev,
1370                         "Can't get device id for scsi %d:%d:%d:%d\n",
1371                         ctrl_info->scsi_host->host_no,
1372                         device->bus, device->target,
1373                         device->lun);
1374
1375 out:
1376         kfree(buffer);
1377
1378         return rc;
1379 }
1380
1381 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1382         struct pqi_scsi_dev *device,
1383         struct bmic_identify_physical_device *id_phys)
1384 {
1385         int rc;
1386
1387         memset(id_phys, 0, sizeof(*id_phys));
1388
1389         rc = pqi_identify_physical_device(ctrl_info, device,
1390                 id_phys, sizeof(*id_phys));
1391         if (rc) {
1392                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1393                 return;
1394         }
1395
1396         device->queue_depth =
1397                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1398         device->device_type = id_phys->device_type;
1399         device->active_path_index = id_phys->active_path_number;
1400         device->path_map = id_phys->redundant_path_present_map;
1401         memcpy(&device->box,
1402                 &id_phys->alternate_paths_phys_box_on_port,
1403                 sizeof(device->box));
1404         memcpy(&device->phys_connector,
1405                 &id_phys->alternate_paths_phys_connector,
1406                 sizeof(device->phys_connector));
1407         device->bay = id_phys->phys_bay_in_box;
1408 }
1409
1410 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1411         struct pqi_scsi_dev *device)
1412 {
1413         char *status;
1414         static const char unknown_state_str[] =
1415                 "Volume is in an unknown state (%u)";
1416         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1417
1418         switch (device->volume_status) {
1419         case CISS_LV_OK:
1420                 status = "Volume online";
1421                 break;
1422         case CISS_LV_FAILED:
1423                 status = "Volume failed";
1424                 break;
1425         case CISS_LV_NOT_CONFIGURED:
1426                 status = "Volume not configured";
1427                 break;
1428         case CISS_LV_DEGRADED:
1429                 status = "Volume degraded";
1430                 break;
1431         case CISS_LV_READY_FOR_RECOVERY:
1432                 status = "Volume ready for recovery operation";
1433                 break;
1434         case CISS_LV_UNDERGOING_RECOVERY:
1435                 status = "Volume undergoing recovery";
1436                 break;
1437         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1438                 status = "Wrong physical drive was replaced";
1439                 break;
1440         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1441                 status = "A physical drive not properly connected";
1442                 break;
1443         case CISS_LV_HARDWARE_OVERHEATING:
1444                 status = "Hardware is overheating";
1445                 break;
1446         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1447                 status = "Hardware has overheated";
1448                 break;
1449         case CISS_LV_UNDERGOING_EXPANSION:
1450                 status = "Volume undergoing expansion";
1451                 break;
1452         case CISS_LV_NOT_AVAILABLE:
1453                 status = "Volume waiting for transforming volume";
1454                 break;
1455         case CISS_LV_QUEUED_FOR_EXPANSION:
1456                 status = "Volume queued for expansion";
1457                 break;
1458         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1459                 status = "Volume disabled due to SCSI ID conflict";
1460                 break;
1461         case CISS_LV_EJECTED:
1462                 status = "Volume has been ejected";
1463                 break;
1464         case CISS_LV_UNDERGOING_ERASE:
1465                 status = "Volume undergoing background erase";
1466                 break;
1467         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1468                 status = "Volume ready for predictive spare rebuild";
1469                 break;
1470         case CISS_LV_UNDERGOING_RPI:
1471                 status = "Volume undergoing rapid parity initialization";
1472                 break;
1473         case CISS_LV_PENDING_RPI:
1474                 status = "Volume queued for rapid parity initialization";
1475                 break;
1476         case CISS_LV_ENCRYPTED_NO_KEY:
1477                 status = "Encrypted volume inaccessible - key not present";
1478                 break;
1479         case CISS_LV_UNDERGOING_ENCRYPTION:
1480                 status = "Volume undergoing encryption process";
1481                 break;
1482         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1483                 status = "Volume undergoing encryption re-keying process";
1484                 break;
1485         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1486                 status = "Volume encrypted but encryption is disabled";
1487                 break;
1488         case CISS_LV_PENDING_ENCRYPTION:
1489                 status = "Volume pending migration to encrypted state";
1490                 break;
1491         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1492                 status = "Volume pending encryption rekeying";
1493                 break;
1494         case CISS_LV_NOT_SUPPORTED:
1495                 status = "Volume not supported on this controller";
1496                 break;
1497         case CISS_LV_STATUS_UNAVAILABLE:
1498                 status = "Volume status not available";
1499                 break;
1500         default:
1501                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1502                         unknown_state_str, device->volume_status);
1503                 status = unknown_state_buffer;
1504                 break;
1505         }
1506
1507         dev_info(&ctrl_info->pci_dev->dev,
1508                 "scsi %d:%d:%d:%d %s\n",
1509                 ctrl_info->scsi_host->host_no,
1510                 device->bus, device->target, device->lun, status);
1511 }
1512
1513 static void pqi_rescan_worker(struct work_struct *work)
1514 {
1515         struct pqi_ctrl_info *ctrl_info;
1516
1517         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1518                 rescan_work);
1519
1520         pqi_scan_scsi_devices(ctrl_info);
1521 }
1522
1523 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1524         struct pqi_scsi_dev *device)
1525 {
1526         int rc;
1527
1528         if (pqi_is_logical_device(device))
1529                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1530                         device->target, device->lun);
1531         else
1532                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1533
1534         return rc;
1535 }
1536
1537 #define PQI_PENDING_IO_TIMEOUT_SECS     20
1538
1539 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1540         struct pqi_scsi_dev *device)
1541 {
1542         int rc;
1543
1544         pqi_device_remove_start(device);
1545
1546         rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1547                 PQI_PENDING_IO_TIMEOUT_SECS);
1548         if (rc)
1549                 dev_err(&ctrl_info->pci_dev->dev,
1550                         "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1551                         ctrl_info->scsi_host->host_no, device->bus,
1552                         device->target, device->lun,
1553                         atomic_read(&device->scsi_cmds_outstanding));
1554
1555         if (pqi_is_logical_device(device))
1556                 scsi_remove_device(device->sdev);
1557         else
1558                 pqi_remove_sas_device(device);
1559 }
1560
1561 /* Assumes the SCSI device list lock is held. */
1562
1563 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1564         int bus, int target, int lun)
1565 {
1566         struct pqi_scsi_dev *device;
1567
1568         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1569                 scsi_device_list_entry)
1570                 if (device->bus == bus && device->target == target &&
1571                         device->lun == lun)
1572                         return device;
1573
1574         return NULL;
1575 }
1576
1577 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1578         struct pqi_scsi_dev *dev2)
1579 {
1580         if (dev1->is_physical_device != dev2->is_physical_device)
1581                 return false;
1582
1583         if (dev1->is_physical_device)
1584                 return dev1->wwid == dev2->wwid;
1585
1586         return memcmp(dev1->volume_id, dev2->volume_id,
1587                 sizeof(dev1->volume_id)) == 0;
1588 }
1589
1590 enum pqi_find_result {
1591         DEVICE_NOT_FOUND,
1592         DEVICE_CHANGED,
1593         DEVICE_SAME,
1594 };
1595
1596 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1597         struct pqi_scsi_dev *device_to_find,
1598         struct pqi_scsi_dev **matching_device)
1599 {
1600         struct pqi_scsi_dev *device;
1601
1602         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1603                 scsi_device_list_entry) {
1604                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1605                         device->scsi3addr)) {
1606                         *matching_device = device;
1607                         if (pqi_device_equal(device_to_find, device)) {
1608                                 if (device_to_find->volume_offline)
1609                                         return DEVICE_CHANGED;
1610                                 return DEVICE_SAME;
1611                         }
1612                         return DEVICE_CHANGED;
1613                 }
1614         }
1615
1616         return DEVICE_NOT_FOUND;
1617 }
1618
1619 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1620 {
1621         if (device->is_expander_smp_device)
1622                 return "Enclosure SMP    ";
1623
1624         return scsi_device_type(device->devtype);
1625 }
1626
1627 #define PQI_DEV_INFO_BUFFER_LENGTH      128
1628
1629 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1630         char *action, struct pqi_scsi_dev *device)
1631 {
1632         ssize_t count;
1633         char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1634
1635         count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1636                 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1637
1638         if (device->target_lun_valid)
1639                 count += snprintf(buffer + count,
1640                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1641                         "%d:%d",
1642                         device->target,
1643                         device->lun);
1644         else
1645                 count += snprintf(buffer + count,
1646                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1647                         "-:-");
1648
1649         if (pqi_is_logical_device(device))
1650                 count += snprintf(buffer + count,
1651                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1652                         " %08x%08x",
1653                         *((u32 *)&device->scsi3addr),
1654                         *((u32 *)&device->scsi3addr[4]));
1655         else
1656                 count += snprintf(buffer + count,
1657                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1658                         " %016llx", device->sas_address);
1659
1660         count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1661                 " %s %.8s %.16s ",
1662                 pqi_device_type(device),
1663                 device->vendor,
1664                 device->model);
1665
1666         if (pqi_is_logical_device(device)) {
1667                 if (device->devtype == TYPE_DISK)
1668                         count += snprintf(buffer + count,
1669                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
1670                                 "SSDSmartPathCap%c En%c %-12s",
1671                                 device->raid_bypass_configured ? '+' : '-',
1672                                 device->raid_bypass_enabled ? '+' : '-',
1673                                 pqi_raid_level_to_string(device->raid_level));
1674         } else {
1675                 count += snprintf(buffer + count,
1676                         PQI_DEV_INFO_BUFFER_LENGTH - count,
1677                         "AIO%c", device->aio_enabled ? '+' : '-');
1678                 if (device->devtype == TYPE_DISK ||
1679                         device->devtype == TYPE_ZBC)
1680                         count += snprintf(buffer + count,
1681                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
1682                                 " qd=%-6d", device->queue_depth);
1683         }
1684
1685         dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1686 }
1687
1688 /* Assumes the SCSI device list lock is held. */
1689
1690 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1691         struct pqi_scsi_dev *new_device)
1692 {
1693         existing_device->devtype = new_device->devtype;
1694         existing_device->device_type = new_device->device_type;
1695         existing_device->bus = new_device->bus;
1696         if (new_device->target_lun_valid) {
1697                 existing_device->target = new_device->target;
1698                 existing_device->lun = new_device->lun;
1699                 existing_device->target_lun_valid = true;
1700         }
1701
1702         /* By definition, the scsi3addr and wwid fields are already the same. */
1703
1704         existing_device->is_physical_device = new_device->is_physical_device;
1705         existing_device->is_external_raid_device =
1706                 new_device->is_external_raid_device;
1707         existing_device->is_expander_smp_device =
1708                 new_device->is_expander_smp_device;
1709         existing_device->aio_enabled = new_device->aio_enabled;
1710         memcpy(existing_device->vendor, new_device->vendor,
1711                 sizeof(existing_device->vendor));
1712         memcpy(existing_device->model, new_device->model,
1713                 sizeof(existing_device->model));
1714         existing_device->sas_address = new_device->sas_address;
1715         existing_device->raid_level = new_device->raid_level;
1716         existing_device->queue_depth = new_device->queue_depth;
1717         existing_device->aio_handle = new_device->aio_handle;
1718         existing_device->volume_status = new_device->volume_status;
1719         existing_device->active_path_index = new_device->active_path_index;
1720         existing_device->path_map = new_device->path_map;
1721         existing_device->bay = new_device->bay;
1722         memcpy(existing_device->box, new_device->box,
1723                 sizeof(existing_device->box));
1724         memcpy(existing_device->phys_connector, new_device->phys_connector,
1725                 sizeof(existing_device->phys_connector));
1726         existing_device->offload_to_mirror = 0;
1727         kfree(existing_device->raid_map);
1728         existing_device->raid_map = new_device->raid_map;
1729         existing_device->raid_bypass_configured =
1730                 new_device->raid_bypass_configured;
1731         existing_device->raid_bypass_enabled =
1732                 new_device->raid_bypass_enabled;
1733         existing_device->device_offline = false;
1734
1735         /* To prevent this from being freed later. */
1736         new_device->raid_map = NULL;
1737 }
1738
1739 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1740 {
1741         if (device) {
1742                 kfree(device->raid_map);
1743                 kfree(device);
1744         }
1745 }
1746
1747 /*
1748  * Called when exposing a new device to the OS fails in order to re-adjust
1749  * our internal SCSI device list to match the SCSI ML's view.
1750  */
1751
1752 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1753         struct pqi_scsi_dev *device)
1754 {
1755         unsigned long flags;
1756
1757         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1758         list_del(&device->scsi_device_list_entry);
1759         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1760
1761         /* Allow the device structure to be freed later. */
1762         device->keep_device = false;
1763 }
1764
1765 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1766 {
1767         if (device->is_expander_smp_device)
1768                 return device->sas_port != NULL;
1769
1770         return device->sdev != NULL;
1771 }
1772
1773 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1774         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1775 {
1776         int rc;
1777         unsigned int i;
1778         unsigned long flags;
1779         enum pqi_find_result find_result;
1780         struct pqi_scsi_dev *device;
1781         struct pqi_scsi_dev *next;
1782         struct pqi_scsi_dev *matching_device;
1783         LIST_HEAD(add_list);
1784         LIST_HEAD(delete_list);
1785
1786         /*
1787          * The idea here is to do as little work as possible while holding the
1788          * spinlock.  That's why we go to great pains to defer anything other
1789          * than updating the internal device list until after we release the
1790          * spinlock.
1791          */
1792
1793         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1794
1795         /* Assume that all devices in the existing list have gone away. */
1796         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1797                 scsi_device_list_entry)
1798                 device->device_gone = true;
1799
1800         for (i = 0; i < num_new_devices; i++) {
1801                 device = new_device_list[i];
1802
1803                 find_result = pqi_scsi_find_entry(ctrl_info, device,
1804                                                 &matching_device);
1805
1806                 switch (find_result) {
1807                 case DEVICE_SAME:
1808                         /*
1809                          * The newly found device is already in the existing
1810                          * device list.
1811                          */
1812                         device->new_device = false;
1813                         matching_device->device_gone = false;
1814                         pqi_scsi_update_device(matching_device, device);
1815                         break;
1816                 case DEVICE_NOT_FOUND:
1817                         /*
1818                          * The newly found device is NOT in the existing device
1819                          * list.
1820                          */
1821                         device->new_device = true;
1822                         break;
1823                 case DEVICE_CHANGED:
1824                         /*
1825                          * The original device has gone away and we need to add
1826                          * the new device.
1827                          */
1828                         device->new_device = true;
1829                         break;
1830                 }
1831         }
1832
1833         /* Process all devices that have gone away. */
1834         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1835                 scsi_device_list_entry) {
1836                 if (device->device_gone) {
1837                         list_del(&device->scsi_device_list_entry);
1838                         list_add_tail(&device->delete_list_entry, &delete_list);
1839                 }
1840         }
1841
1842         /* Process all new devices. */
1843         for (i = 0; i < num_new_devices; i++) {
1844                 device = new_device_list[i];
1845                 if (!device->new_device)
1846                         continue;
1847                 if (device->volume_offline)
1848                         continue;
1849                 list_add_tail(&device->scsi_device_list_entry,
1850                         &ctrl_info->scsi_device_list);
1851                 list_add_tail(&device->add_list_entry, &add_list);
1852                 /* To prevent this device structure from being freed later. */
1853                 device->keep_device = true;
1854         }
1855
1856         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1857
1858         if (pqi_ctrl_in_ofa(ctrl_info))
1859                 pqi_ctrl_ofa_done(ctrl_info);
1860
1861         /* Remove all devices that have gone away. */
1862         list_for_each_entry_safe(device, next, &delete_list,
1863                 delete_list_entry) {
1864                 if (device->volume_offline) {
1865                         pqi_dev_info(ctrl_info, "offline", device);
1866                         pqi_show_volume_status(ctrl_info, device);
1867                 } else {
1868                         pqi_dev_info(ctrl_info, "removed", device);
1869                 }
1870                 if (pqi_is_device_added(device))
1871                         pqi_remove_device(ctrl_info, device);
1872                 list_del(&device->delete_list_entry);
1873                 pqi_free_device(device);
1874         }
1875
1876         /*
1877          * Notify the SCSI ML if the queue depth of any existing device has
1878          * changed.
1879          */
1880         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1881                 scsi_device_list_entry) {
1882                 if (device->sdev && device->queue_depth !=
1883                         device->advertised_queue_depth) {
1884                         device->advertised_queue_depth = device->queue_depth;
1885                         scsi_change_queue_depth(device->sdev,
1886                                 device->advertised_queue_depth);
1887                 }
1888         }
1889
1890         /* Expose any new devices. */
1891         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1892                 if (!pqi_is_device_added(device)) {
1893                         pqi_dev_info(ctrl_info, "added", device);
1894                         rc = pqi_add_device(ctrl_info, device);
1895                         if (rc) {
1896                                 dev_warn(&ctrl_info->pci_dev->dev,
1897                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
1898                                         ctrl_info->scsi_host->host_no,
1899                                         device->bus, device->target,
1900                                         device->lun);
1901                                 pqi_fixup_botched_add(ctrl_info, device);
1902                         }
1903                 }
1904         }
1905 }
1906
1907 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1908 {
1909         bool is_supported;
1910
1911         if (device->is_expander_smp_device)
1912                 return true;
1913
1914         is_supported = false;
1915
1916         switch (device->devtype) {
1917         case TYPE_DISK:
1918         case TYPE_ZBC:
1919         case TYPE_TAPE:
1920         case TYPE_MEDIUM_CHANGER:
1921         case TYPE_ENCLOSURE:
1922                 is_supported = true;
1923                 break;
1924         case TYPE_RAID:
1925                 /*
1926                  * Only support the HBA controller itself as a RAID
1927                  * controller.  If it's a RAID controller other than
1928                  * the HBA itself (an external RAID controller, for
1929                  * example), we don't support it.
1930                  */
1931                 if (pqi_is_hba_lunid(device->scsi3addr))
1932                         is_supported = true;
1933                 break;
1934         }
1935
1936         return is_supported;
1937 }
1938
1939 static inline bool pqi_skip_device(u8 *scsi3addr)
1940 {
1941         /* Ignore all masked devices. */
1942         if (MASKED_DEVICE(scsi3addr))
1943                 return true;
1944
1945         return false;
1946 }
1947
1948 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1949 {
1950         if (!device->is_physical_device)
1951                 return false;
1952
1953         if (device->is_expander_smp_device)
1954                 return true;
1955
1956         switch (device->devtype) {
1957         case TYPE_DISK:
1958         case TYPE_ZBC:
1959         case TYPE_ENCLOSURE:
1960                 return true;
1961         }
1962
1963         return false;
1964 }
1965
1966 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1967 {
1968         return !device->is_physical_device ||
1969                 !pqi_skip_device(device->scsi3addr);
1970 }
1971
1972 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1973 {
1974         int i;
1975         int rc;
1976         LIST_HEAD(new_device_list_head);
1977         struct report_phys_lun_extended *physdev_list = NULL;
1978         struct report_log_lun_extended *logdev_list = NULL;
1979         struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1980         struct report_log_lun_extended_entry *log_lun_ext_entry;
1981         struct bmic_identify_physical_device *id_phys = NULL;
1982         u32 num_physicals;
1983         u32 num_logicals;
1984         struct pqi_scsi_dev **new_device_list = NULL;
1985         struct pqi_scsi_dev *device;
1986         struct pqi_scsi_dev *next;
1987         unsigned int num_new_devices;
1988         unsigned int num_valid_devices;
1989         bool is_physical_device;
1990         u8 *scsi3addr;
1991         static char *out_of_memory_msg =
1992                 "failed to allocate memory, device discovery stopped";
1993
1994         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1995         if (rc)
1996                 goto out;
1997
1998         if (physdev_list)
1999                 num_physicals =
2000                         get_unaligned_be32(&physdev_list->header.list_length)
2001                                 / sizeof(physdev_list->lun_entries[0]);
2002         else
2003                 num_physicals = 0;
2004
2005         if (logdev_list)
2006                 num_logicals =
2007                         get_unaligned_be32(&logdev_list->header.list_length)
2008                                 / sizeof(logdev_list->lun_entries[0]);
2009         else
2010                 num_logicals = 0;
2011
2012         if (num_physicals) {
2013                 /*
2014                  * We need this buffer for calls to pqi_get_physical_disk_info()
2015                  * below.  We allocate it here instead of inside
2016                  * pqi_get_physical_disk_info() because it's a fairly large
2017                  * buffer.
2018                  */
2019                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2020                 if (!id_phys) {
2021                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2022                                 out_of_memory_msg);
2023                         rc = -ENOMEM;
2024                         goto out;
2025                 }
2026         }
2027
2028         num_new_devices = num_physicals + num_logicals;
2029
2030         new_device_list = kmalloc_array(num_new_devices,
2031                                         sizeof(*new_device_list),
2032                                         GFP_KERNEL);
2033         if (!new_device_list) {
2034                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2035                 rc = -ENOMEM;
2036                 goto out;
2037         }
2038
2039         for (i = 0; i < num_new_devices; i++) {
2040                 device = kzalloc(sizeof(*device), GFP_KERNEL);
2041                 if (!device) {
2042                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2043                                 out_of_memory_msg);
2044                         rc = -ENOMEM;
2045                         goto out;
2046                 }
2047                 list_add_tail(&device->new_device_list_entry,
2048                         &new_device_list_head);
2049         }
2050
2051         device = NULL;
2052         num_valid_devices = 0;
2053
2054         for (i = 0; i < num_new_devices; i++) {
2055
2056                 if (i < num_physicals) {
2057                         is_physical_device = true;
2058                         phys_lun_ext_entry = &physdev_list->lun_entries[i];
2059                         log_lun_ext_entry = NULL;
2060                         scsi3addr = phys_lun_ext_entry->lunid;
2061                 } else {
2062                         is_physical_device = false;
2063                         phys_lun_ext_entry = NULL;
2064                         log_lun_ext_entry =
2065                                 &logdev_list->lun_entries[i - num_physicals];
2066                         scsi3addr = log_lun_ext_entry->lunid;
2067                 }
2068
2069                 if (is_physical_device && pqi_skip_device(scsi3addr))
2070                         continue;
2071
2072                 if (device)
2073                         device = list_next_entry(device, new_device_list_entry);
2074                 else
2075                         device = list_first_entry(&new_device_list_head,
2076                                 struct pqi_scsi_dev, new_device_list_entry);
2077
2078                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2079                 device->is_physical_device = is_physical_device;
2080                 if (is_physical_device) {
2081                         if (phys_lun_ext_entry->device_type ==
2082                                 SA_EXPANDER_SMP_DEVICE)
2083                                 device->is_expander_smp_device = true;
2084                 } else {
2085                         device->is_external_raid_device =
2086                                 pqi_is_external_raid_addr(scsi3addr);
2087                 }
2088
2089                 /* Gather information about the device. */
2090                 rc = pqi_get_device_info(ctrl_info, device);
2091                 if (rc == -ENOMEM) {
2092                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2093                                 out_of_memory_msg);
2094                         goto out;
2095                 }
2096                 if (rc) {
2097                         if (device->is_physical_device)
2098                                 dev_warn(&ctrl_info->pci_dev->dev,
2099                                         "obtaining device info failed, skipping physical device %016llx\n",
2100                                         get_unaligned_be64(
2101                                                 &phys_lun_ext_entry->wwid));
2102                         else
2103                                 dev_warn(&ctrl_info->pci_dev->dev,
2104                                         "obtaining device info failed, skipping logical device %08x%08x\n",
2105                                         *((u32 *)&device->scsi3addr),
2106                                         *((u32 *)&device->scsi3addr[4]));
2107                         rc = 0;
2108                         continue;
2109                 }
2110
2111                 if (!pqi_is_supported_device(device))
2112                         continue;
2113
2114                 pqi_assign_bus_target_lun(device);
2115
2116                 if (device->is_physical_device) {
2117                         device->wwid = phys_lun_ext_entry->wwid;
2118                         if ((phys_lun_ext_entry->device_flags &
2119                                 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
2120                                 phys_lun_ext_entry->aio_handle) {
2121                                 device->aio_enabled = true;
2122                                         device->aio_handle =
2123                                                 phys_lun_ext_entry->aio_handle;
2124                         }
2125                         if (device->devtype == TYPE_DISK ||
2126                                 device->devtype == TYPE_ZBC) {
2127                                 pqi_get_physical_disk_info(ctrl_info,
2128                                         device, id_phys);
2129                         }
2130                 } else {
2131                         memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2132                                 sizeof(device->volume_id));
2133                 }
2134
2135                 if (pqi_is_device_with_sas_address(device))
2136                         device->sas_address = get_unaligned_be64(&device->wwid);
2137
2138                 new_device_list[num_valid_devices++] = device;
2139         }
2140
2141         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2142
2143 out:
2144         list_for_each_entry_safe(device, next, &new_device_list_head,
2145                 new_device_list_entry) {
2146                 if (device->keep_device)
2147                         continue;
2148                 list_del(&device->new_device_list_entry);
2149                 pqi_free_device(device);
2150         }
2151
2152         kfree(new_device_list);
2153         kfree(physdev_list);
2154         kfree(logdev_list);
2155         kfree(id_phys);
2156
2157         return rc;
2158 }
2159
2160 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2161 {
2162         unsigned long flags;
2163         struct pqi_scsi_dev *device;
2164
2165         while (1) {
2166                 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2167
2168                 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2169                         struct pqi_scsi_dev, scsi_device_list_entry);
2170                 if (device)
2171                         list_del(&device->scsi_device_list_entry);
2172
2173                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2174                         flags);
2175
2176                 if (!device)
2177                         break;
2178
2179                 if (pqi_is_device_added(device))
2180                         pqi_remove_device(ctrl_info, device);
2181                 pqi_free_device(device);
2182         }
2183 }
2184
2185 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2186 {
2187         int rc;
2188
2189         if (pqi_ctrl_offline(ctrl_info))
2190                 return -ENXIO;
2191
2192         mutex_lock(&ctrl_info->scan_mutex);
2193
2194         rc = pqi_update_scsi_devices(ctrl_info);
2195         if (rc)
2196                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2197
2198         mutex_unlock(&ctrl_info->scan_mutex);
2199
2200         return rc;
2201 }
2202
2203 static void pqi_scan_start(struct Scsi_Host *shost)
2204 {
2205         struct pqi_ctrl_info *ctrl_info;
2206
2207         ctrl_info = shost_to_hba(shost);
2208         if (pqi_ctrl_in_ofa(ctrl_info))
2209                 return;
2210
2211         pqi_scan_scsi_devices(ctrl_info);
2212 }
2213
2214 /* Returns TRUE if scan is finished. */
2215
2216 static int pqi_scan_finished(struct Scsi_Host *shost,
2217         unsigned long elapsed_time)
2218 {
2219         struct pqi_ctrl_info *ctrl_info;
2220
2221         ctrl_info = shost_priv(shost);
2222
2223         return !mutex_is_locked(&ctrl_info->scan_mutex);
2224 }
2225
2226 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2227 {
2228         mutex_lock(&ctrl_info->scan_mutex);
2229         mutex_unlock(&ctrl_info->scan_mutex);
2230 }
2231
2232 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2233 {
2234         mutex_lock(&ctrl_info->lun_reset_mutex);
2235         mutex_unlock(&ctrl_info->lun_reset_mutex);
2236 }
2237
2238 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2239 {
2240         mutex_lock(&ctrl_info->ofa_mutex);
2241         mutex_unlock(&ctrl_info->ofa_mutex);
2242 }
2243
2244 static inline void pqi_set_encryption_info(
2245         struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2246         u64 first_block)
2247 {
2248         u32 volume_blk_size;
2249
2250         /*
2251          * Set the encryption tweak values based on logical block address.
2252          * If the block size is 512, the tweak value is equal to the LBA.
2253          * For other block sizes, tweak value is (LBA * block size) / 512.
2254          */
2255         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2256         if (volume_blk_size != 512)
2257                 first_block = (first_block * volume_blk_size) / 512;
2258
2259         encryption_info->data_encryption_key_index =
2260                 get_unaligned_le16(&raid_map->data_encryption_key_index);
2261         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2262         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2263 }
2264
2265 /*
2266  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2267  */
2268
2269 #define PQI_RAID_BYPASS_INELIGIBLE      1
2270
2271 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2272         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2273         struct pqi_queue_group *queue_group)
2274 {
2275         struct raid_map *raid_map;
2276         bool is_write = false;
2277         u32 map_index;
2278         u64 first_block;
2279         u64 last_block;
2280         u32 block_cnt;
2281         u32 blocks_per_row;
2282         u64 first_row;
2283         u64 last_row;
2284         u32 first_row_offset;
2285         u32 last_row_offset;
2286         u32 first_column;
2287         u32 last_column;
2288         u64 r0_first_row;
2289         u64 r0_last_row;
2290         u32 r5or6_blocks_per_row;
2291         u64 r5or6_first_row;
2292         u64 r5or6_last_row;
2293         u32 r5or6_first_row_offset;
2294         u32 r5or6_last_row_offset;
2295         u32 r5or6_first_column;
2296         u32 r5or6_last_column;
2297         u16 data_disks_per_row;
2298         u32 total_disks_per_row;
2299         u16 layout_map_count;
2300         u32 stripesize;
2301         u16 strip_size;
2302         u32 first_group;
2303         u32 last_group;
2304         u32 current_group;
2305         u32 map_row;
2306         u32 aio_handle;
2307         u64 disk_block;
2308         u32 disk_block_cnt;
2309         u8 cdb[16];
2310         u8 cdb_length;
2311         int offload_to_mirror;
2312         struct pqi_encryption_info *encryption_info_ptr;
2313         struct pqi_encryption_info encryption_info;
2314 #if BITS_PER_LONG == 32
2315         u64 tmpdiv;
2316 #endif
2317
2318         /* Check for valid opcode, get LBA and block count. */
2319         switch (scmd->cmnd[0]) {
2320         case WRITE_6:
2321                 is_write = true;
2322                 /* fall through */
2323         case READ_6:
2324                 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2325                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2326                 block_cnt = (u32)scmd->cmnd[4];
2327                 if (block_cnt == 0)
2328                         block_cnt = 256;
2329                 break;
2330         case WRITE_10:
2331                 is_write = true;
2332                 /* fall through */
2333         case READ_10:
2334                 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2335                 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2336                 break;
2337         case WRITE_12:
2338                 is_write = true;
2339                 /* fall through */
2340         case READ_12:
2341                 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2342                 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2343                 break;
2344         case WRITE_16:
2345                 is_write = true;
2346                 /* fall through */
2347         case READ_16:
2348                 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2349                 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2350                 break;
2351         default:
2352                 /* Process via normal I/O path. */
2353                 return PQI_RAID_BYPASS_INELIGIBLE;
2354         }
2355
2356         /* Check for write to non-RAID-0. */
2357         if (is_write && device->raid_level != SA_RAID_0)
2358                 return PQI_RAID_BYPASS_INELIGIBLE;
2359
2360         if (unlikely(block_cnt == 0))
2361                 return PQI_RAID_BYPASS_INELIGIBLE;
2362
2363         last_block = first_block + block_cnt - 1;
2364         raid_map = device->raid_map;
2365
2366         /* Check for invalid block or wraparound. */
2367         if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2368                 last_block < first_block)
2369                 return PQI_RAID_BYPASS_INELIGIBLE;
2370
2371         data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2372         strip_size = get_unaligned_le16(&raid_map->strip_size);
2373         layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2374
2375         /* Calculate stripe information for the request. */
2376         blocks_per_row = data_disks_per_row * strip_size;
2377 #if BITS_PER_LONG == 32
2378         tmpdiv = first_block;
2379         do_div(tmpdiv, blocks_per_row);
2380         first_row = tmpdiv;
2381         tmpdiv = last_block;
2382         do_div(tmpdiv, blocks_per_row);
2383         last_row = tmpdiv;
2384         first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2385         last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2386         tmpdiv = first_row_offset;
2387         do_div(tmpdiv, strip_size);
2388         first_column = tmpdiv;
2389         tmpdiv = last_row_offset;
2390         do_div(tmpdiv, strip_size);
2391         last_column = tmpdiv;
2392 #else
2393         first_row = first_block / blocks_per_row;
2394         last_row = last_block / blocks_per_row;
2395         first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2396         last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2397         first_column = first_row_offset / strip_size;
2398         last_column = last_row_offset / strip_size;
2399 #endif
2400
2401         /* If this isn't a single row/column then give to the controller. */
2402         if (first_row != last_row || first_column != last_column)
2403                 return PQI_RAID_BYPASS_INELIGIBLE;
2404
2405         /* Proceeding with driver mapping. */
2406         total_disks_per_row = data_disks_per_row +
2407                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2408         map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2409                 get_unaligned_le16(&raid_map->row_cnt);
2410         map_index = (map_row * total_disks_per_row) + first_column;
2411
2412         /* RAID 1 */
2413         if (device->raid_level == SA_RAID_1) {
2414                 if (device->offload_to_mirror)
2415                         map_index += data_disks_per_row;
2416                 device->offload_to_mirror = !device->offload_to_mirror;
2417         } else if (device->raid_level == SA_RAID_ADM) {
2418                 /* RAID ADM */
2419                 /*
2420                  * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
2421                  * divisible by 3.
2422                  */
2423                 offload_to_mirror = device->offload_to_mirror;
2424                 if (offload_to_mirror == 0)  {
2425                         /* use physical disk in the first mirrored group. */
2426                         map_index %= data_disks_per_row;
2427                 } else {
2428                         do {
2429                                 /*
2430                                  * Determine mirror group that map_index
2431                                  * indicates.
2432                                  */
2433                                 current_group = map_index / data_disks_per_row;
2434
2435                                 if (offload_to_mirror != current_group) {
2436                                         if (current_group <
2437                                                 layout_map_count - 1) {
2438                                                 /*
2439                                                  * Select raid index from
2440                                                  * next group.
2441                                                  */
2442                                                 map_index += data_disks_per_row;
2443                                                 current_group++;
2444                                         } else {
2445                                                 /*
2446                                                  * Select raid index from first
2447                                                  * group.
2448                                                  */
2449                                                 map_index %= data_disks_per_row;
2450                                                 current_group = 0;
2451                                         }
2452                                 }
2453                         } while (offload_to_mirror != current_group);
2454                 }
2455
2456                 /* Set mirror group to use next time. */
2457                 offload_to_mirror =
2458                         (offload_to_mirror >= layout_map_count - 1) ?
2459                                 0 : offload_to_mirror + 1;
2460                 WARN_ON(offload_to_mirror >= layout_map_count);
2461                 device->offload_to_mirror = offload_to_mirror;
2462                 /*
2463                  * Avoid direct use of device->offload_to_mirror within this
2464                  * function since multiple threads might simultaneously
2465                  * increment it beyond the range of device->layout_map_count -1.
2466                  */
2467         } else if ((device->raid_level == SA_RAID_5 ||
2468                 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2469                 /* RAID 50/60 */
2470                 /* Verify first and last block are in same RAID group */
2471                 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2472                 stripesize = r5or6_blocks_per_row * layout_map_count;
2473 #if BITS_PER_LONG == 32
2474                 tmpdiv = first_block;
2475                 first_group = do_div(tmpdiv, stripesize);
2476                 tmpdiv = first_group;
2477                 do_div(tmpdiv, r5or6_blocks_per_row);
2478                 first_group = tmpdiv;
2479                 tmpdiv = last_block;
2480                 last_group = do_div(tmpdiv, stripesize);
2481                 tmpdiv = last_group;
2482                 do_div(tmpdiv, r5or6_blocks_per_row);
2483                 last_group = tmpdiv;
2484 #else
2485                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2486                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2487 #endif
2488                 if (first_group != last_group)
2489                         return PQI_RAID_BYPASS_INELIGIBLE;
2490
2491                 /* Verify request is in a single row of RAID 5/6 */
2492 #if BITS_PER_LONG == 32
2493                 tmpdiv = first_block;
2494                 do_div(tmpdiv, stripesize);
2495                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2496                 tmpdiv = last_block;
2497                 do_div(tmpdiv, stripesize);
2498                 r5or6_last_row = r0_last_row = tmpdiv;
2499 #else
2500                 first_row = r5or6_first_row = r0_first_row =
2501                         first_block / stripesize;
2502                 r5or6_last_row = r0_last_row = last_block / stripesize;
2503 #endif
2504                 if (r5or6_first_row != r5or6_last_row)
2505                         return PQI_RAID_BYPASS_INELIGIBLE;
2506
2507                 /* Verify request is in a single column */
2508 #if BITS_PER_LONG == 32
2509                 tmpdiv = first_block;
2510                 first_row_offset = do_div(tmpdiv, stripesize);
2511                 tmpdiv = first_row_offset;
2512                 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2513                 r5or6_first_row_offset = first_row_offset;
2514                 tmpdiv = last_block;
2515                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2516                 tmpdiv = r5or6_last_row_offset;
2517                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2518                 tmpdiv = r5or6_first_row_offset;
2519                 do_div(tmpdiv, strip_size);
2520                 first_column = r5or6_first_column = tmpdiv;
2521                 tmpdiv = r5or6_last_row_offset;
2522                 do_div(tmpdiv, strip_size);
2523                 r5or6_last_column = tmpdiv;
2524 #else
2525                 first_row_offset = r5or6_first_row_offset =
2526                         (u32)((first_block % stripesize) %
2527                         r5or6_blocks_per_row);
2528
2529                 r5or6_last_row_offset =
2530                         (u32)((last_block % stripesize) %
2531                         r5or6_blocks_per_row);
2532
2533                 first_column = r5or6_first_row_offset / strip_size;
2534                 r5or6_first_column = first_column;
2535                 r5or6_last_column = r5or6_last_row_offset / strip_size;
2536 #endif
2537                 if (r5or6_first_column != r5or6_last_column)
2538                         return PQI_RAID_BYPASS_INELIGIBLE;
2539
2540                 /* Request is eligible */
2541                 map_row =
2542                         ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2543                         get_unaligned_le16(&raid_map->row_cnt);
2544
2545                 map_index = (first_group *
2546                         (get_unaligned_le16(&raid_map->row_cnt) *
2547                         total_disks_per_row)) +
2548                         (map_row * total_disks_per_row) + first_column;
2549         }
2550
2551         aio_handle = raid_map->disk_data[map_index].aio_handle;
2552         disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2553                 first_row * strip_size +
2554                 (first_row_offset - first_column * strip_size);
2555         disk_block_cnt = block_cnt;
2556
2557         /* Handle differing logical/physical block sizes. */
2558         if (raid_map->phys_blk_shift) {
2559                 disk_block <<= raid_map->phys_blk_shift;
2560                 disk_block_cnt <<= raid_map->phys_blk_shift;
2561         }
2562
2563         if (unlikely(disk_block_cnt > 0xffff))
2564                 return PQI_RAID_BYPASS_INELIGIBLE;
2565
2566         /* Build the new CDB for the physical disk I/O. */
2567         if (disk_block > 0xffffffff) {
2568                 cdb[0] = is_write ? WRITE_16 : READ_16;
2569                 cdb[1] = 0;
2570                 put_unaligned_be64(disk_block, &cdb[2]);
2571                 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2572                 cdb[14] = 0;
2573                 cdb[15] = 0;
2574                 cdb_length = 16;
2575         } else {
2576                 cdb[0] = is_write ? WRITE_10 : READ_10;
2577                 cdb[1] = 0;
2578                 put_unaligned_be32((u32)disk_block, &cdb[2]);
2579                 cdb[6] = 0;
2580                 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2581                 cdb[9] = 0;
2582                 cdb_length = 10;
2583         }
2584
2585         if (get_unaligned_le16(&raid_map->flags) &
2586                 RAID_MAP_ENCRYPTION_ENABLED) {
2587                 pqi_set_encryption_info(&encryption_info, raid_map,
2588                         first_block);
2589                 encryption_info_ptr = &encryption_info;
2590         } else {
2591                 encryption_info_ptr = NULL;
2592         }
2593
2594         return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2595                 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2596 }
2597
2598 #define PQI_STATUS_IDLE         0x0
2599
2600 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
2601 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
2602
2603 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
2604 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
2605 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
2606 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
2607 #define PQI_DEVICE_STATE_ERROR                          0x4
2608
2609 #define PQI_MODE_READY_TIMEOUT_SECS             30
2610 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
2611
2612 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2613 {
2614         struct pqi_device_registers __iomem *pqi_registers;
2615         unsigned long timeout;
2616         u64 signature;
2617         u8 status;
2618
2619         pqi_registers = ctrl_info->pqi_registers;
2620         timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2621
2622         while (1) {
2623                 signature = readq(&pqi_registers->signature);
2624                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2625                         sizeof(signature)) == 0)
2626                         break;
2627                 if (time_after(jiffies, timeout)) {
2628                         dev_err(&ctrl_info->pci_dev->dev,
2629                                 "timed out waiting for PQI signature\n");
2630                         return -ETIMEDOUT;
2631                 }
2632                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2633         }
2634
2635         while (1) {
2636                 status = readb(&pqi_registers->function_and_status_code);
2637                 if (status == PQI_STATUS_IDLE)
2638                         break;
2639                 if (time_after(jiffies, timeout)) {
2640                         dev_err(&ctrl_info->pci_dev->dev,
2641                                 "timed out waiting for PQI IDLE\n");
2642                         return -ETIMEDOUT;
2643                 }
2644                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2645         }
2646
2647         while (1) {
2648                 if (readl(&pqi_registers->device_status) ==
2649                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2650                         break;
2651                 if (time_after(jiffies, timeout)) {
2652                         dev_err(&ctrl_info->pci_dev->dev,
2653                                 "timed out waiting for PQI all registers ready\n");
2654                         return -ETIMEDOUT;
2655                 }
2656                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2657         }
2658
2659         return 0;
2660 }
2661
2662 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2663 {
2664         struct pqi_scsi_dev *device;
2665
2666         device = io_request->scmd->device->hostdata;
2667         device->raid_bypass_enabled = false;
2668         device->aio_enabled = false;
2669 }
2670
2671 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2672 {
2673         struct pqi_ctrl_info *ctrl_info;
2674         struct pqi_scsi_dev *device;
2675
2676         device = sdev->hostdata;
2677         if (device->device_offline)
2678                 return;
2679
2680         device->device_offline = true;
2681         ctrl_info = shost_to_hba(sdev->host);
2682         pqi_schedule_rescan_worker(ctrl_info);
2683         dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2684                 path, ctrl_info->scsi_host->host_no, device->bus,
2685                 device->target, device->lun);
2686 }
2687
2688 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2689 {
2690         u8 scsi_status;
2691         u8 host_byte;
2692         struct scsi_cmnd *scmd;
2693         struct pqi_raid_error_info *error_info;
2694         size_t sense_data_length;
2695         int residual_count;
2696         int xfer_count;
2697         struct scsi_sense_hdr sshdr;
2698
2699         scmd = io_request->scmd;
2700         if (!scmd)
2701                 return;
2702
2703         error_info = io_request->error_info;
2704         scsi_status = error_info->status;
2705         host_byte = DID_OK;
2706
2707         switch (error_info->data_out_result) {
2708         case PQI_DATA_IN_OUT_GOOD:
2709                 break;
2710         case PQI_DATA_IN_OUT_UNDERFLOW:
2711                 xfer_count =
2712                         get_unaligned_le32(&error_info->data_out_transferred);
2713                 residual_count = scsi_bufflen(scmd) - xfer_count;
2714                 scsi_set_resid(scmd, residual_count);
2715                 if (xfer_count < scmd->underflow)
2716                         host_byte = DID_SOFT_ERROR;
2717                 break;
2718         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2719         case PQI_DATA_IN_OUT_ABORTED:
2720                 host_byte = DID_ABORT;
2721                 break;
2722         case PQI_DATA_IN_OUT_TIMEOUT:
2723                 host_byte = DID_TIME_OUT;
2724                 break;
2725         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2726         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2727         case PQI_DATA_IN_OUT_BUFFER_ERROR:
2728         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2729         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2730         case PQI_DATA_IN_OUT_ERROR:
2731         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2732         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2733         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2734         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2735         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2736         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2737         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2738         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2739         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2740         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2741         default:
2742                 host_byte = DID_ERROR;
2743                 break;
2744         }
2745
2746         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2747         if (sense_data_length == 0)
2748                 sense_data_length =
2749                         get_unaligned_le16(&error_info->response_data_length);
2750         if (sense_data_length) {
2751                 if (sense_data_length > sizeof(error_info->data))
2752                         sense_data_length = sizeof(error_info->data);
2753
2754                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2755                         scsi_normalize_sense(error_info->data,
2756                                 sense_data_length, &sshdr) &&
2757                                 sshdr.sense_key == HARDWARE_ERROR &&
2758                                 sshdr.asc == 0x3e) {
2759                         struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2760                         struct pqi_scsi_dev *device = scmd->device->hostdata;
2761
2762                         switch (sshdr.ascq) {
2763                         case 0x1: /* LOGICAL UNIT FAILURE */
2764                                 if (printk_ratelimit())
2765                                         scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2766                                                 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2767                                 pqi_take_device_offline(scmd->device, "RAID");
2768                                 host_byte = DID_NO_CONNECT;
2769                                 break;
2770
2771                         default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2772                                 if (printk_ratelimit())
2773                                         scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2774                                                 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2775                                 break;
2776                         }
2777                 }
2778
2779                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2780                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
2781                 memcpy(scmd->sense_buffer, error_info->data,
2782                         sense_data_length);
2783         }
2784
2785         scmd->result = scsi_status;
2786         set_host_byte(scmd, host_byte);
2787 }
2788
2789 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2790 {
2791         u8 scsi_status;
2792         u8 host_byte;
2793         struct scsi_cmnd *scmd;
2794         struct pqi_aio_error_info *error_info;
2795         size_t sense_data_length;
2796         int residual_count;
2797         int xfer_count;
2798         bool device_offline;
2799
2800         scmd = io_request->scmd;
2801         error_info = io_request->error_info;
2802         host_byte = DID_OK;
2803         sense_data_length = 0;
2804         device_offline = false;
2805
2806         switch (error_info->service_response) {
2807         case PQI_AIO_SERV_RESPONSE_COMPLETE:
2808                 scsi_status = error_info->status;
2809                 break;
2810         case PQI_AIO_SERV_RESPONSE_FAILURE:
2811                 switch (error_info->status) {
2812                 case PQI_AIO_STATUS_IO_ABORTED:
2813                         scsi_status = SAM_STAT_TASK_ABORTED;
2814                         break;
2815                 case PQI_AIO_STATUS_UNDERRUN:
2816                         scsi_status = SAM_STAT_GOOD;
2817                         residual_count = get_unaligned_le32(
2818                                                 &error_info->residual_count);
2819                         scsi_set_resid(scmd, residual_count);
2820                         xfer_count = scsi_bufflen(scmd) - residual_count;
2821                         if (xfer_count < scmd->underflow)
2822                                 host_byte = DID_SOFT_ERROR;
2823                         break;
2824                 case PQI_AIO_STATUS_OVERRUN:
2825                         scsi_status = SAM_STAT_GOOD;
2826                         break;
2827                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2828                         pqi_aio_path_disabled(io_request);
2829                         scsi_status = SAM_STAT_GOOD;
2830                         io_request->status = -EAGAIN;
2831                         break;
2832                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2833                 case PQI_AIO_STATUS_INVALID_DEVICE:
2834                         if (!io_request->raid_bypass) {
2835                                 device_offline = true;
2836                                 pqi_take_device_offline(scmd->device, "AIO");
2837                                 host_byte = DID_NO_CONNECT;
2838                         }
2839                         scsi_status = SAM_STAT_CHECK_CONDITION;
2840                         break;
2841                 case PQI_AIO_STATUS_IO_ERROR:
2842                 default:
2843                         scsi_status = SAM_STAT_CHECK_CONDITION;
2844                         break;
2845                 }
2846                 break;
2847         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2848         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2849                 scsi_status = SAM_STAT_GOOD;
2850                 break;
2851         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2852         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2853         default:
2854                 scsi_status = SAM_STAT_CHECK_CONDITION;
2855                 break;
2856         }
2857
2858         if (error_info->data_present) {
2859                 sense_data_length =
2860                         get_unaligned_le16(&error_info->data_length);
2861                 if (sense_data_length) {
2862                         if (sense_data_length > sizeof(error_info->data))
2863                                 sense_data_length = sizeof(error_info->data);
2864                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2865                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2866                         memcpy(scmd->sense_buffer, error_info->data,
2867                                 sense_data_length);
2868                 }
2869         }
2870
2871         if (device_offline && sense_data_length == 0)
2872                 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2873                         0x3e, 0x1);
2874
2875         scmd->result = scsi_status;
2876         set_host_byte(scmd, host_byte);
2877 }
2878
2879 static void pqi_process_io_error(unsigned int iu_type,
2880         struct pqi_io_request *io_request)
2881 {
2882         switch (iu_type) {
2883         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2884                 pqi_process_raid_io_error(io_request);
2885                 break;
2886         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2887                 pqi_process_aio_io_error(io_request);
2888                 break;
2889         }
2890 }
2891
2892 static int pqi_interpret_task_management_response(
2893         struct pqi_task_management_response *response)
2894 {
2895         int rc;
2896
2897         switch (response->response_code) {
2898         case SOP_TMF_COMPLETE:
2899         case SOP_TMF_FUNCTION_SUCCEEDED:
2900                 rc = 0;
2901                 break;
2902         case SOP_TMF_REJECTED:
2903                 rc = -EAGAIN;
2904                 break;
2905         default:
2906                 rc = -EIO;
2907                 break;
2908         }
2909
2910         return rc;
2911 }
2912
2913 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2914         struct pqi_queue_group *queue_group)
2915 {
2916         unsigned int num_responses;
2917         pqi_index_t oq_pi;
2918         pqi_index_t oq_ci;
2919         struct pqi_io_request *io_request;
2920         struct pqi_io_response *response;
2921         u16 request_id;
2922
2923         num_responses = 0;
2924         oq_ci = queue_group->oq_ci_copy;
2925
2926         while (1) {
2927                 oq_pi = readl(queue_group->oq_pi);
2928                 if (oq_pi == oq_ci)
2929                         break;
2930
2931                 num_responses++;
2932                 response = queue_group->oq_element_array +
2933                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2934
2935                 request_id = get_unaligned_le16(&response->request_id);
2936                 WARN_ON(request_id >= ctrl_info->max_io_slots);
2937
2938                 io_request = &ctrl_info->io_request_pool[request_id];
2939                 WARN_ON(atomic_read(&io_request->refcount) == 0);
2940
2941                 switch (response->header.iu_type) {
2942                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2943                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2944                         if (io_request->scmd)
2945                                 io_request->scmd->result = 0;
2946                         /* fall through */
2947                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2948                         break;
2949                 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2950                         io_request->status =
2951                                 get_unaligned_le16(
2952                                 &((struct pqi_vendor_general_response *)
2953                                         response)->status);
2954                         break;
2955                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2956                         io_request->status =
2957                                 pqi_interpret_task_management_response(
2958                                         (void *)response);
2959                         break;
2960                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2961                         pqi_aio_path_disabled(io_request);
2962                         io_request->status = -EAGAIN;
2963                         break;
2964                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2965                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2966                         io_request->error_info = ctrl_info->error_buffer +
2967                                 (get_unaligned_le16(&response->error_index) *
2968                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2969                         pqi_process_io_error(response->header.iu_type,
2970                                 io_request);
2971                         break;
2972                 default:
2973                         dev_err(&ctrl_info->pci_dev->dev,
2974                                 "unexpected IU type: 0x%x\n",
2975                                 response->header.iu_type);
2976                         break;
2977                 }
2978
2979                 io_request->io_complete_callback(io_request,
2980                         io_request->context);
2981
2982                 /*
2983                  * Note that the I/O request structure CANNOT BE TOUCHED after
2984                  * returning from the I/O completion callback!
2985                  */
2986
2987                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2988         }
2989
2990         if (num_responses) {
2991                 queue_group->oq_ci_copy = oq_ci;
2992                 writel(oq_ci, queue_group->oq_ci);
2993         }
2994
2995         return num_responses;
2996 }
2997
2998 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2999         unsigned int ci, unsigned int elements_in_queue)
3000 {
3001         unsigned int num_elements_used;
3002
3003         if (pi >= ci)
3004                 num_elements_used = pi - ci;
3005         else
3006                 num_elements_used = elements_in_queue - ci + pi;
3007
3008         return elements_in_queue - num_elements_used - 1;
3009 }
3010
3011 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3012         struct pqi_event_acknowledge_request *iu, size_t iu_length)
3013 {
3014         pqi_index_t iq_pi;
3015         pqi_index_t iq_ci;
3016         unsigned long flags;
3017         void *next_element;
3018         struct pqi_queue_group *queue_group;
3019
3020         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3021         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3022
3023         while (1) {
3024                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3025
3026                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3027                 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3028
3029                 if (pqi_num_elements_free(iq_pi, iq_ci,
3030                         ctrl_info->num_elements_per_iq))
3031                         break;
3032
3033                 spin_unlock_irqrestore(
3034                         &queue_group->submit_lock[RAID_PATH], flags);
3035
3036                 if (pqi_ctrl_offline(ctrl_info))
3037                         return;
3038         }
3039
3040         next_element = queue_group->iq_element_array[RAID_PATH] +
3041                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3042
3043         memcpy(next_element, iu, iu_length);
3044
3045         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3046         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3047
3048         /*
3049          * This write notifies the controller that an IU is available to be
3050          * processed.
3051          */
3052         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3053
3054         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3055 }
3056
3057 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3058         struct pqi_event *event)
3059 {
3060         struct pqi_event_acknowledge_request request;
3061
3062         memset(&request, 0, sizeof(request));
3063
3064         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3065         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3066                 &request.header.iu_length);
3067         request.event_type = event->event_type;
3068         request.event_id = event->event_id;
3069         request.additional_event_id = event->additional_event_id;
3070
3071         pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3072 }
3073
3074 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS              30
3075 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS        1
3076
3077 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3078         struct pqi_ctrl_info *ctrl_info)
3079 {
3080         unsigned long timeout;
3081         u8 status;
3082
3083         timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3084
3085         while (1) {
3086                 status = pqi_read_soft_reset_status(ctrl_info);
3087                 if (status & PQI_SOFT_RESET_INITIATE)
3088                         return RESET_INITIATE_DRIVER;
3089
3090                 if (status & PQI_SOFT_RESET_ABORT)
3091                         return RESET_ABORT;
3092
3093                 if (time_after(jiffies, timeout)) {
3094                         dev_err(&ctrl_info->pci_dev->dev,
3095                                 "timed out waiting for soft reset status\n");
3096                         return RESET_TIMEDOUT;
3097                 }
3098
3099                 if (!sis_is_firmware_running(ctrl_info))
3100                         return RESET_NORESPONSE;
3101
3102                 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3103         }
3104 }
3105
3106 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3107                 enum pqi_soft_reset_status reset_status)
3108 {
3109         int rc;
3110
3111         switch (reset_status) {
3112         case RESET_INITIATE_DRIVER:
3113                 /* fall through */
3114         case RESET_TIMEDOUT:
3115                 dev_info(&ctrl_info->pci_dev->dev,
3116                         "resetting controller %u\n", ctrl_info->ctrl_id);
3117                 sis_soft_reset(ctrl_info);
3118                 /* fall through */
3119         case RESET_INITIATE_FIRMWARE:
3120                 rc = pqi_ofa_ctrl_restart(ctrl_info);
3121                 pqi_ofa_free_host_buffer(ctrl_info);
3122                 dev_info(&ctrl_info->pci_dev->dev,
3123                         "Online Firmware Activation for controller %u: %s\n",
3124                         ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3125                 break;
3126         case RESET_ABORT:
3127                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3128                 dev_info(&ctrl_info->pci_dev->dev,
3129                         "Online Firmware Activation for controller %u: %s\n",
3130                         ctrl_info->ctrl_id, "ABORTED");
3131                 break;
3132         case RESET_NORESPONSE:
3133                 pqi_ofa_free_host_buffer(ctrl_info);
3134                 pqi_take_ctrl_offline(ctrl_info);
3135                 break;
3136         }
3137 }
3138
3139 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3140         struct pqi_event *event)
3141 {
3142         u16 event_id;
3143         enum pqi_soft_reset_status status;
3144
3145         event_id = get_unaligned_le16(&event->event_id);
3146
3147         mutex_lock(&ctrl_info->ofa_mutex);
3148
3149         if (event_id == PQI_EVENT_OFA_QUIESCE) {
3150                 dev_info(&ctrl_info->pci_dev->dev,
3151                          "Received Online Firmware Activation quiesce event for controller %u\n",
3152                          ctrl_info->ctrl_id);
3153                 pqi_ofa_ctrl_quiesce(ctrl_info);
3154                 pqi_acknowledge_event(ctrl_info, event);
3155                 if (ctrl_info->soft_reset_handshake_supported) {
3156                         status = pqi_poll_for_soft_reset_status(ctrl_info);
3157                         pqi_process_soft_reset(ctrl_info, status);
3158                 } else {
3159                         pqi_process_soft_reset(ctrl_info,
3160                                         RESET_INITIATE_FIRMWARE);
3161                 }
3162
3163         } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3164                 pqi_acknowledge_event(ctrl_info, event);
3165                 pqi_ofa_setup_host_buffer(ctrl_info,
3166                         le32_to_cpu(event->ofa_bytes_requested));
3167                 pqi_ofa_host_memory_update(ctrl_info);
3168         } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3169                 pqi_ofa_free_host_buffer(ctrl_info);
3170                 pqi_acknowledge_event(ctrl_info, event);
3171                 dev_info(&ctrl_info->pci_dev->dev,
3172                          "Online Firmware Activation(%u) cancel reason : %u\n",
3173                          ctrl_info->ctrl_id, event->ofa_cancel_reason);
3174         }
3175
3176         mutex_unlock(&ctrl_info->ofa_mutex);
3177 }
3178
3179 static void pqi_event_worker(struct work_struct *work)
3180 {
3181         unsigned int i;
3182         struct pqi_ctrl_info *ctrl_info;
3183         struct pqi_event *event;
3184
3185         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3186
3187         pqi_ctrl_busy(ctrl_info);
3188         pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3189         if (pqi_ctrl_offline(ctrl_info))
3190                 goto out;
3191
3192         pqi_schedule_rescan_worker_delayed(ctrl_info);
3193
3194         event = ctrl_info->events;
3195         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3196                 if (event->pending) {
3197                         event->pending = false;
3198                         if (event->event_type == PQI_EVENT_TYPE_OFA) {
3199                                 pqi_ctrl_unbusy(ctrl_info);
3200                                 pqi_ofa_process_event(ctrl_info, event);
3201                                 return;
3202                         }
3203                         pqi_acknowledge_event(ctrl_info, event);
3204                 }
3205                 event++;
3206         }
3207
3208 out:
3209         pqi_ctrl_unbusy(ctrl_info);
3210 }
3211
3212 #define PQI_HEARTBEAT_TIMER_INTERVAL    (10 * PQI_HZ)
3213
3214 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3215 {
3216         int num_interrupts;
3217         u32 heartbeat_count;
3218         struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3219                                                      heartbeat_timer);
3220
3221         pqi_check_ctrl_health(ctrl_info);
3222         if (pqi_ctrl_offline(ctrl_info))
3223                 return;
3224
3225         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3226         heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3227
3228         if (num_interrupts == ctrl_info->previous_num_interrupts) {
3229                 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3230                         dev_err(&ctrl_info->pci_dev->dev,
3231                                 "no heartbeat detected - last heartbeat count: %u\n",
3232                                 heartbeat_count);
3233                         pqi_take_ctrl_offline(ctrl_info);
3234                         return;
3235                 }
3236         } else {
3237                 ctrl_info->previous_num_interrupts = num_interrupts;
3238         }
3239
3240         ctrl_info->previous_heartbeat_count = heartbeat_count;
3241         mod_timer(&ctrl_info->heartbeat_timer,
3242                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3243 }
3244
3245 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3246 {
3247         if (!ctrl_info->heartbeat_counter)
3248                 return;
3249
3250         ctrl_info->previous_num_interrupts =
3251                 atomic_read(&ctrl_info->num_interrupts);
3252         ctrl_info->previous_heartbeat_count =
3253                 pqi_read_heartbeat_counter(ctrl_info);
3254
3255         ctrl_info->heartbeat_timer.expires =
3256                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3257         add_timer(&ctrl_info->heartbeat_timer);
3258 }
3259
3260 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3261 {
3262         del_timer_sync(&ctrl_info->heartbeat_timer);
3263 }
3264
3265 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3266 {
3267         int index;
3268
3269         for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3270                 if (event_type == pqi_supported_event_types[index])
3271                         return index;
3272
3273         return -1;
3274 }
3275
3276 static inline bool pqi_is_supported_event(unsigned int event_type)
3277 {
3278         return pqi_event_type_to_event_index(event_type) != -1;
3279 }
3280
3281 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3282         struct pqi_event_response *response)
3283 {
3284         u16 event_id;
3285
3286         event_id = get_unaligned_le16(&event->event_id);
3287
3288         if (event->event_type == PQI_EVENT_TYPE_OFA) {
3289                 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3290                         event->ofa_bytes_requested =
3291                         response->data.ofa_memory_allocation.bytes_requested;
3292                 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3293                         event->ofa_cancel_reason =
3294                         response->data.ofa_cancelled.reason;
3295                 }
3296         }
3297 }
3298
3299 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3300 {
3301         unsigned int num_events;
3302         pqi_index_t oq_pi;
3303         pqi_index_t oq_ci;
3304         struct pqi_event_queue *event_queue;
3305         struct pqi_event_response *response;
3306         struct pqi_event *event;
3307         int event_index;
3308
3309         event_queue = &ctrl_info->event_queue;
3310         num_events = 0;
3311         oq_ci = event_queue->oq_ci_copy;
3312
3313         while (1) {
3314                 oq_pi = readl(event_queue->oq_pi);
3315                 if (oq_pi == oq_ci)
3316                         break;
3317
3318                 num_events++;
3319                 response = event_queue->oq_element_array +
3320                         (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3321
3322                 event_index =
3323                         pqi_event_type_to_event_index(response->event_type);
3324
3325                 if (event_index >= 0) {
3326                         if (response->request_acknowlege) {
3327                                 event = &ctrl_info->events[event_index];
3328                                 event->pending = true;
3329                                 event->event_type = response->event_type;
3330                                 event->event_id = response->event_id;
3331                                 event->additional_event_id =
3332                                         response->additional_event_id;
3333                                 pqi_ofa_capture_event_payload(event, response);
3334                         }
3335                 }
3336
3337                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3338         }
3339
3340         if (num_events) {
3341                 event_queue->oq_ci_copy = oq_ci;
3342                 writel(oq_ci, event_queue->oq_ci);
3343                 schedule_work(&ctrl_info->event_work);
3344         }
3345
3346         return num_events;
3347 }
3348
3349 #define PQI_LEGACY_INTX_MASK    0x1
3350
3351 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3352                                                 bool enable_intx)
3353 {
3354         u32 intx_mask;
3355         struct pqi_device_registers __iomem *pqi_registers;
3356         volatile void __iomem *register_addr;
3357
3358         pqi_registers = ctrl_info->pqi_registers;
3359
3360         if (enable_intx)
3361                 register_addr = &pqi_registers->legacy_intx_mask_clear;
3362         else
3363                 register_addr = &pqi_registers->legacy_intx_mask_set;
3364
3365         intx_mask = readl(register_addr);
3366         intx_mask |= PQI_LEGACY_INTX_MASK;
3367         writel(intx_mask, register_addr);
3368 }
3369
3370 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3371         enum pqi_irq_mode new_mode)
3372 {
3373         switch (ctrl_info->irq_mode) {
3374         case IRQ_MODE_MSIX:
3375                 switch (new_mode) {
3376                 case IRQ_MODE_MSIX:
3377                         break;
3378                 case IRQ_MODE_INTX:
3379                         pqi_configure_legacy_intx(ctrl_info, true);
3380                         sis_enable_intx(ctrl_info);
3381                         break;
3382                 case IRQ_MODE_NONE:
3383                         break;
3384                 }
3385                 break;
3386         case IRQ_MODE_INTX:
3387                 switch (new_mode) {
3388                 case IRQ_MODE_MSIX:
3389                         pqi_configure_legacy_intx(ctrl_info, false);
3390                         sis_enable_msix(ctrl_info);
3391                         break;
3392                 case IRQ_MODE_INTX:
3393                         break;
3394                 case IRQ_MODE_NONE:
3395                         pqi_configure_legacy_intx(ctrl_info, false);
3396                         break;
3397                 }
3398                 break;
3399         case IRQ_MODE_NONE:
3400                 switch (new_mode) {
3401                 case IRQ_MODE_MSIX:
3402                         sis_enable_msix(ctrl_info);
3403                         break;
3404                 case IRQ_MODE_INTX:
3405                         pqi_configure_legacy_intx(ctrl_info, true);
3406                         sis_enable_intx(ctrl_info);
3407                         break;
3408                 case IRQ_MODE_NONE:
3409                         break;
3410                 }
3411                 break;
3412         }
3413
3414         ctrl_info->irq_mode = new_mode;
3415 }
3416
3417 #define PQI_LEGACY_INTX_PENDING         0x1
3418
3419 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3420 {
3421         bool valid_irq;
3422         u32 intx_status;
3423
3424         switch (ctrl_info->irq_mode) {
3425         case IRQ_MODE_MSIX:
3426                 valid_irq = true;
3427                 break;
3428         case IRQ_MODE_INTX:
3429                 intx_status =
3430                         readl(&ctrl_info->pqi_registers->legacy_intx_status);
3431                 if (intx_status & PQI_LEGACY_INTX_PENDING)
3432                         valid_irq = true;
3433                 else
3434                         valid_irq = false;
3435                 break;
3436         case IRQ_MODE_NONE:
3437         default:
3438                 valid_irq = false;
3439                 break;
3440         }
3441
3442         return valid_irq;
3443 }
3444
3445 static irqreturn_t pqi_irq_handler(int irq, void *data)
3446 {
3447         struct pqi_ctrl_info *ctrl_info;
3448         struct pqi_queue_group *queue_group;
3449         unsigned int num_responses_handled;
3450
3451         queue_group = data;
3452         ctrl_info = queue_group->ctrl_info;
3453
3454         if (!pqi_is_valid_irq(ctrl_info))
3455                 return IRQ_NONE;
3456
3457         num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3458
3459         if (irq == ctrl_info->event_irq)
3460                 num_responses_handled += pqi_process_event_intr(ctrl_info);
3461
3462         if (num_responses_handled)
3463                 atomic_inc(&ctrl_info->num_interrupts);
3464
3465         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3466         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3467
3468         return IRQ_HANDLED;
3469 }
3470
3471 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3472 {
3473         struct pci_dev *pci_dev = ctrl_info->pci_dev;
3474         int i;
3475         int rc;
3476
3477         ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3478
3479         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3480                 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3481                         DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3482                 if (rc) {
3483                         dev_err(&pci_dev->dev,
3484                                 "irq %u init failed with error %d\n",
3485                                 pci_irq_vector(pci_dev, i), rc);
3486                         return rc;
3487                 }
3488                 ctrl_info->num_msix_vectors_initialized++;
3489         }
3490
3491         return 0;
3492 }
3493
3494 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3495 {
3496         int i;
3497
3498         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3499                 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3500                         &ctrl_info->queue_groups[i]);
3501
3502         ctrl_info->num_msix_vectors_initialized = 0;
3503 }
3504
3505 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3506 {
3507         int num_vectors_enabled;
3508
3509         num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3510                         PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3511                         PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3512         if (num_vectors_enabled < 0) {
3513                 dev_err(&ctrl_info->pci_dev->dev,
3514                         "MSI-X init failed with error %d\n",
3515                         num_vectors_enabled);
3516                 return num_vectors_enabled;
3517         }
3518
3519         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3520         ctrl_info->irq_mode = IRQ_MODE_MSIX;
3521         return 0;
3522 }
3523
3524 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3525 {
3526         if (ctrl_info->num_msix_vectors_enabled) {
3527                 pci_free_irq_vectors(ctrl_info->pci_dev);
3528                 ctrl_info->num_msix_vectors_enabled = 0;
3529         }
3530 }
3531
3532 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3533 {
3534         unsigned int i;
3535         size_t alloc_length;
3536         size_t element_array_length_per_iq;
3537         size_t element_array_length_per_oq;
3538         void *element_array;
3539         void __iomem *next_queue_index;
3540         void *aligned_pointer;
3541         unsigned int num_inbound_queues;
3542         unsigned int num_outbound_queues;
3543         unsigned int num_queue_indexes;
3544         struct pqi_queue_group *queue_group;
3545
3546         element_array_length_per_iq =
3547                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3548                 ctrl_info->num_elements_per_iq;
3549         element_array_length_per_oq =
3550                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3551                 ctrl_info->num_elements_per_oq;
3552         num_inbound_queues = ctrl_info->num_queue_groups * 2;
3553         num_outbound_queues = ctrl_info->num_queue_groups;
3554         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3555
3556         aligned_pointer = NULL;
3557
3558         for (i = 0; i < num_inbound_queues; i++) {
3559                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3560                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3561                 aligned_pointer += element_array_length_per_iq;
3562         }
3563
3564         for (i = 0; i < num_outbound_queues; i++) {
3565                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3566                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3567                 aligned_pointer += element_array_length_per_oq;
3568         }
3569
3570         aligned_pointer = PTR_ALIGN(aligned_pointer,
3571                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3572         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3573                 PQI_EVENT_OQ_ELEMENT_LENGTH;
3574
3575         for (i = 0; i < num_queue_indexes; i++) {
3576                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3577                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3578                 aligned_pointer += sizeof(pqi_index_t);
3579         }
3580
3581         alloc_length = (size_t)aligned_pointer +
3582                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3583
3584         alloc_length += PQI_EXTRA_SGL_MEMORY;
3585
3586         ctrl_info->queue_memory_base =
3587                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3588                                    &ctrl_info->queue_memory_base_dma_handle,
3589                                    GFP_KERNEL);
3590
3591         if (!ctrl_info->queue_memory_base)
3592                 return -ENOMEM;
3593
3594         ctrl_info->queue_memory_length = alloc_length;
3595
3596         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3597                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3598
3599         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3600                 queue_group = &ctrl_info->queue_groups[i];
3601                 queue_group->iq_element_array[RAID_PATH] = element_array;
3602                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3603                         ctrl_info->queue_memory_base_dma_handle +
3604                                 (element_array - ctrl_info->queue_memory_base);
3605                 element_array += element_array_length_per_iq;
3606                 element_array = PTR_ALIGN(element_array,
3607                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3608                 queue_group->iq_element_array[AIO_PATH] = element_array;
3609                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3610                         ctrl_info->queue_memory_base_dma_handle +
3611                         (element_array - ctrl_info->queue_memory_base);
3612                 element_array += element_array_length_per_iq;
3613                 element_array = PTR_ALIGN(element_array,
3614                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3615         }
3616
3617         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3618                 queue_group = &ctrl_info->queue_groups[i];
3619                 queue_group->oq_element_array = element_array;
3620                 queue_group->oq_element_array_bus_addr =
3621                         ctrl_info->queue_memory_base_dma_handle +
3622                         (element_array - ctrl_info->queue_memory_base);
3623                 element_array += element_array_length_per_oq;
3624                 element_array = PTR_ALIGN(element_array,
3625                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3626         }
3627
3628         ctrl_info->event_queue.oq_element_array = element_array;
3629         ctrl_info->event_queue.oq_element_array_bus_addr =
3630                 ctrl_info->queue_memory_base_dma_handle +
3631                 (element_array - ctrl_info->queue_memory_base);
3632         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3633                 PQI_EVENT_OQ_ELEMENT_LENGTH;
3634
3635         next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3636                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3637
3638         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3639                 queue_group = &ctrl_info->queue_groups[i];
3640                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3641                 queue_group->iq_ci_bus_addr[RAID_PATH] =
3642                         ctrl_info->queue_memory_base_dma_handle +
3643                         (next_queue_index -
3644                         (void __iomem *)ctrl_info->queue_memory_base);
3645                 next_queue_index += sizeof(pqi_index_t);
3646                 next_queue_index = PTR_ALIGN(next_queue_index,
3647                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3648                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3649                 queue_group->iq_ci_bus_addr[AIO_PATH] =
3650                         ctrl_info->queue_memory_base_dma_handle +
3651                         (next_queue_index -
3652                         (void __iomem *)ctrl_info->queue_memory_base);
3653                 next_queue_index += sizeof(pqi_index_t);
3654                 next_queue_index = PTR_ALIGN(next_queue_index,
3655                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3656                 queue_group->oq_pi = next_queue_index;
3657                 queue_group->oq_pi_bus_addr =
3658                         ctrl_info->queue_memory_base_dma_handle +
3659                         (next_queue_index -
3660                         (void __iomem *)ctrl_info->queue_memory_base);
3661                 next_queue_index += sizeof(pqi_index_t);
3662                 next_queue_index = PTR_ALIGN(next_queue_index,
3663                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3664         }
3665
3666         ctrl_info->event_queue.oq_pi = next_queue_index;
3667         ctrl_info->event_queue.oq_pi_bus_addr =
3668                 ctrl_info->queue_memory_base_dma_handle +
3669                 (next_queue_index -
3670                 (void __iomem *)ctrl_info->queue_memory_base);
3671
3672         return 0;
3673 }
3674
3675 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3676 {
3677         unsigned int i;
3678         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3679         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3680
3681         /*
3682          * Initialize the backpointers to the controller structure in
3683          * each operational queue group structure.
3684          */
3685         for (i = 0; i < ctrl_info->num_queue_groups; i++)
3686                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3687
3688         /*
3689          * Assign IDs to all operational queues.  Note that the IDs
3690          * assigned to operational IQs are independent of the IDs
3691          * assigned to operational OQs.
3692          */
3693         ctrl_info->event_queue.oq_id = next_oq_id++;
3694         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3695                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3696                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3697                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3698         }
3699
3700         /*
3701          * Assign MSI-X table entry indexes to all queues.  Note that the
3702          * interrupt for the event queue is shared with the first queue group.
3703          */
3704         ctrl_info->event_queue.int_msg_num = 0;
3705         for (i = 0; i < ctrl_info->num_queue_groups; i++)
3706                 ctrl_info->queue_groups[i].int_msg_num = i;
3707
3708         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3709                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3710                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3711                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3712                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3713         }
3714 }
3715
3716 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3717 {
3718         size_t alloc_length;
3719         struct pqi_admin_queues_aligned *admin_queues_aligned;
3720         struct pqi_admin_queues *admin_queues;
3721
3722         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3723                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3724
3725         ctrl_info->admin_queue_memory_base =
3726                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3727                                    &ctrl_info->admin_queue_memory_base_dma_handle,
3728                                    GFP_KERNEL);
3729
3730         if (!ctrl_info->admin_queue_memory_base)
3731                 return -ENOMEM;
3732
3733         ctrl_info->admin_queue_memory_length = alloc_length;
3734
3735         admin_queues = &ctrl_info->admin_queues;
3736         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3737                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3738         admin_queues->iq_element_array =
3739                 &admin_queues_aligned->iq_element_array;
3740         admin_queues->oq_element_array =
3741                 &admin_queues_aligned->oq_element_array;
3742         admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3743         admin_queues->oq_pi =
3744                 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3745
3746         admin_queues->iq_element_array_bus_addr =
3747                 ctrl_info->admin_queue_memory_base_dma_handle +
3748                 (admin_queues->iq_element_array -
3749                 ctrl_info->admin_queue_memory_base);
3750         admin_queues->oq_element_array_bus_addr =
3751                 ctrl_info->admin_queue_memory_base_dma_handle +
3752                 (admin_queues->oq_element_array -
3753                 ctrl_info->admin_queue_memory_base);
3754         admin_queues->iq_ci_bus_addr =
3755                 ctrl_info->admin_queue_memory_base_dma_handle +
3756                 ((void *)admin_queues->iq_ci -
3757                 ctrl_info->admin_queue_memory_base);
3758         admin_queues->oq_pi_bus_addr =
3759                 ctrl_info->admin_queue_memory_base_dma_handle +
3760                 ((void __iomem *)admin_queues->oq_pi -
3761                 (void __iomem *)ctrl_info->admin_queue_memory_base);
3762
3763         return 0;
3764 }
3765
3766 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          PQI_HZ
3767 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
3768
3769 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3770 {
3771         struct pqi_device_registers __iomem *pqi_registers;
3772         struct pqi_admin_queues *admin_queues;
3773         unsigned long timeout;
3774         u8 status;
3775         u32 reg;
3776
3777         pqi_registers = ctrl_info->pqi_registers;
3778         admin_queues = &ctrl_info->admin_queues;
3779
3780         writeq((u64)admin_queues->iq_element_array_bus_addr,
3781                 &pqi_registers->admin_iq_element_array_addr);
3782         writeq((u64)admin_queues->oq_element_array_bus_addr,
3783                 &pqi_registers->admin_oq_element_array_addr);
3784         writeq((u64)admin_queues->iq_ci_bus_addr,
3785                 &pqi_registers->admin_iq_ci_addr);
3786         writeq((u64)admin_queues->oq_pi_bus_addr,
3787                 &pqi_registers->admin_oq_pi_addr);
3788
3789         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3790                 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3791                 (admin_queues->int_msg_num << 16);
3792         writel(reg, &pqi_registers->admin_iq_num_elements);
3793         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3794                 &pqi_registers->function_and_status_code);
3795
3796         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3797         while (1) {
3798                 status = readb(&pqi_registers->function_and_status_code);
3799                 if (status == PQI_STATUS_IDLE)
3800                         break;
3801                 if (time_after(jiffies, timeout))
3802                         return -ETIMEDOUT;
3803                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3804         }
3805
3806         /*
3807          * The offset registers are not initialized to the correct
3808          * offsets until *after* the create admin queue pair command
3809          * completes successfully.
3810          */
3811         admin_queues->iq_pi = ctrl_info->iomem_base +
3812                 PQI_DEVICE_REGISTERS_OFFSET +
3813                 readq(&pqi_registers->admin_iq_pi_offset);
3814         admin_queues->oq_ci = ctrl_info->iomem_base +
3815                 PQI_DEVICE_REGISTERS_OFFSET +
3816                 readq(&pqi_registers->admin_oq_ci_offset);
3817
3818         return 0;
3819 }
3820
3821 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3822         struct pqi_general_admin_request *request)
3823 {
3824         struct pqi_admin_queues *admin_queues;
3825         void *next_element;
3826         pqi_index_t iq_pi;
3827
3828         admin_queues = &ctrl_info->admin_queues;
3829         iq_pi = admin_queues->iq_pi_copy;
3830
3831         next_element = admin_queues->iq_element_array +
3832                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3833
3834         memcpy(next_element, request, sizeof(*request));
3835
3836         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3837         admin_queues->iq_pi_copy = iq_pi;
3838
3839         /*
3840          * This write notifies the controller that an IU is available to be
3841          * processed.
3842          */
3843         writel(iq_pi, admin_queues->iq_pi);
3844 }
3845
3846 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS  60
3847
3848 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3849         struct pqi_general_admin_response *response)
3850 {
3851         struct pqi_admin_queues *admin_queues;
3852         pqi_index_t oq_pi;
3853         pqi_index_t oq_ci;
3854         unsigned long timeout;
3855
3856         admin_queues = &ctrl_info->admin_queues;
3857         oq_ci = admin_queues->oq_ci_copy;
3858
3859         timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3860
3861         while (1) {
3862                 oq_pi = readl(admin_queues->oq_pi);
3863                 if (oq_pi != oq_ci)
3864                         break;
3865                 if (time_after(jiffies, timeout)) {
3866                         dev_err(&ctrl_info->pci_dev->dev,
3867                                 "timed out waiting for admin response\n");
3868                         return -ETIMEDOUT;
3869                 }
3870                 if (!sis_is_firmware_running(ctrl_info))
3871                         return -ENXIO;
3872                 usleep_range(1000, 2000);
3873         }
3874
3875         memcpy(response, admin_queues->oq_element_array +
3876                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3877
3878         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3879         admin_queues->oq_ci_copy = oq_ci;
3880         writel(oq_ci, admin_queues->oq_ci);
3881
3882         return 0;
3883 }
3884
3885 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3886         struct pqi_queue_group *queue_group, enum pqi_io_path path,
3887         struct pqi_io_request *io_request)
3888 {
3889         struct pqi_io_request *next;
3890         void *next_element;
3891         pqi_index_t iq_pi;
3892         pqi_index_t iq_ci;
3893         size_t iu_length;
3894         unsigned long flags;
3895         unsigned int num_elements_needed;
3896         unsigned int num_elements_to_end_of_queue;
3897         size_t copy_count;
3898         struct pqi_iu_header *request;
3899
3900         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3901
3902         if (io_request) {
3903                 io_request->queue_group = queue_group;
3904                 list_add_tail(&io_request->request_list_entry,
3905                         &queue_group->request_list[path]);
3906         }
3907
3908         iq_pi = queue_group->iq_pi_copy[path];
3909
3910         list_for_each_entry_safe(io_request, next,
3911                 &queue_group->request_list[path], request_list_entry) {
3912
3913                 request = io_request->iu;
3914
3915                 iu_length = get_unaligned_le16(&request->iu_length) +
3916                         PQI_REQUEST_HEADER_LENGTH;
3917                 num_elements_needed =
3918                         DIV_ROUND_UP(iu_length,
3919                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3920
3921                 iq_ci = readl(queue_group->iq_ci[path]);
3922
3923                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3924                         ctrl_info->num_elements_per_iq))
3925                         break;
3926
3927                 put_unaligned_le16(queue_group->oq_id,
3928                         &request->response_queue_id);
3929
3930                 next_element = queue_group->iq_element_array[path] +
3931                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3932
3933                 num_elements_to_end_of_queue =
3934                         ctrl_info->num_elements_per_iq - iq_pi;
3935
3936                 if (num_elements_needed <= num_elements_to_end_of_queue) {
3937                         memcpy(next_element, request, iu_length);
3938                 } else {
3939                         copy_count = num_elements_to_end_of_queue *
3940                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3941                         memcpy(next_element, request, copy_count);
3942                         memcpy(queue_group->iq_element_array[path],
3943                                 (u8 *)request + copy_count,
3944                                 iu_length - copy_count);
3945                 }
3946
3947                 iq_pi = (iq_pi + num_elements_needed) %
3948                         ctrl_info->num_elements_per_iq;
3949
3950                 list_del(&io_request->request_list_entry);
3951         }
3952
3953         if (iq_pi != queue_group->iq_pi_copy[path]) {
3954                 queue_group->iq_pi_copy[path] = iq_pi;
3955                 /*
3956                  * This write notifies the controller that one or more IUs are
3957                  * available to be processed.
3958                  */
3959                 writel(iq_pi, queue_group->iq_pi[path]);
3960         }
3961
3962         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3963 }
3964
3965 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS         10
3966
3967 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3968         struct completion *wait)
3969 {
3970         int rc;
3971
3972         while (1) {
3973                 if (wait_for_completion_io_timeout(wait,
3974                         PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
3975                         rc = 0;
3976                         break;
3977                 }
3978
3979                 pqi_check_ctrl_health(ctrl_info);
3980                 if (pqi_ctrl_offline(ctrl_info)) {
3981                         rc = -ENXIO;
3982                         break;
3983                 }
3984         }
3985
3986         return rc;
3987 }
3988
3989 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3990         void *context)
3991 {
3992         struct completion *waiting = context;
3993
3994         complete(waiting);
3995 }
3996
3997 static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3998                                                 *error_info)
3999 {
4000         int rc = -EIO;
4001
4002         switch (error_info->data_out_result) {
4003         case PQI_DATA_IN_OUT_GOOD:
4004                 if (error_info->status == SAM_STAT_GOOD)
4005                         rc = 0;
4006                 break;
4007         case PQI_DATA_IN_OUT_UNDERFLOW:
4008                 if (error_info->status == SAM_STAT_GOOD ||
4009                         error_info->status == SAM_STAT_CHECK_CONDITION)
4010                         rc = 0;
4011                 break;
4012         case PQI_DATA_IN_OUT_ABORTED:
4013                 rc = PQI_CMD_STATUS_ABORTED;
4014                 break;
4015         }
4016
4017         return rc;
4018 }
4019
4020 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4021         struct pqi_iu_header *request, unsigned int flags,
4022         struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4023 {
4024         int rc = 0;
4025         struct pqi_io_request *io_request;
4026         unsigned long start_jiffies;
4027         unsigned long msecs_blocked;
4028         size_t iu_length;
4029         DECLARE_COMPLETION_ONSTACK(wait);
4030
4031         /*
4032          * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4033          * are mutually exclusive.
4034          */
4035
4036         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4037                 if (down_interruptible(&ctrl_info->sync_request_sem))
4038                         return -ERESTARTSYS;
4039         } else {
4040                 if (timeout_msecs == NO_TIMEOUT) {
4041                         down(&ctrl_info->sync_request_sem);
4042                 } else {
4043                         start_jiffies = jiffies;
4044                         if (down_timeout(&ctrl_info->sync_request_sem,
4045                                 msecs_to_jiffies(timeout_msecs)))
4046                                 return -ETIMEDOUT;
4047                         msecs_blocked =
4048                                 jiffies_to_msecs(jiffies - start_jiffies);
4049                         if (msecs_blocked >= timeout_msecs)
4050                                 return -ETIMEDOUT;
4051                         timeout_msecs -= msecs_blocked;
4052                 }
4053         }
4054
4055         pqi_ctrl_busy(ctrl_info);
4056         timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4057         if (timeout_msecs == 0) {
4058                 pqi_ctrl_unbusy(ctrl_info);
4059                 rc = -ETIMEDOUT;
4060                 goto out;
4061         }
4062
4063         if (pqi_ctrl_offline(ctrl_info)) {
4064                 pqi_ctrl_unbusy(ctrl_info);
4065                 rc = -ENXIO;
4066                 goto out;
4067         }
4068
4069         io_request = pqi_alloc_io_request(ctrl_info);
4070
4071         put_unaligned_le16(io_request->index,
4072                 &(((struct pqi_raid_path_request *)request)->request_id));
4073
4074         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4075                 ((struct pqi_raid_path_request *)request)->error_index =
4076                         ((struct pqi_raid_path_request *)request)->request_id;
4077
4078         iu_length = get_unaligned_le16(&request->iu_length) +
4079                 PQI_REQUEST_HEADER_LENGTH;
4080         memcpy(io_request->iu, request, iu_length);
4081
4082         io_request->io_complete_callback = pqi_raid_synchronous_complete;
4083         io_request->context = &wait;
4084
4085         pqi_start_io(ctrl_info,
4086                 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4087                 io_request);
4088
4089         pqi_ctrl_unbusy(ctrl_info);
4090
4091         if (timeout_msecs == NO_TIMEOUT) {
4092                 pqi_wait_for_completion_io(ctrl_info, &wait);
4093         } else {
4094                 if (!wait_for_completion_io_timeout(&wait,
4095                         msecs_to_jiffies(timeout_msecs))) {
4096                         dev_warn(&ctrl_info->pci_dev->dev,
4097                                 "command timed out\n");
4098                         rc = -ETIMEDOUT;
4099                 }
4100         }
4101
4102         if (error_info) {
4103                 if (io_request->error_info)
4104                         memcpy(error_info, io_request->error_info,
4105                                 sizeof(*error_info));
4106                 else
4107                         memset(error_info, 0, sizeof(*error_info));
4108         } else if (rc == 0 && io_request->error_info) {
4109                 rc = pqi_process_raid_io_error_synchronous(
4110                         io_request->error_info);
4111         }
4112
4113         pqi_free_io_request(io_request);
4114
4115 out:
4116         up(&ctrl_info->sync_request_sem);
4117
4118         return rc;
4119 }
4120
4121 static int pqi_validate_admin_response(
4122         struct pqi_general_admin_response *response, u8 expected_function_code)
4123 {
4124         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4125                 return -EINVAL;
4126
4127         if (get_unaligned_le16(&response->header.iu_length) !=
4128                 PQI_GENERAL_ADMIN_IU_LENGTH)
4129                 return -EINVAL;
4130
4131         if (response->function_code != expected_function_code)
4132                 return -EINVAL;
4133
4134         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4135                 return -EINVAL;
4136
4137         return 0;
4138 }
4139
4140 static int pqi_submit_admin_request_synchronous(
4141         struct pqi_ctrl_info *ctrl_info,
4142         struct pqi_general_admin_request *request,
4143         struct pqi_general_admin_response *response)
4144 {
4145         int rc;
4146
4147         pqi_submit_admin_request(ctrl_info, request);
4148
4149         rc = pqi_poll_for_admin_response(ctrl_info, response);
4150
4151         if (rc == 0)
4152                 rc = pqi_validate_admin_response(response,
4153                         request->function_code);
4154
4155         return rc;
4156 }
4157
4158 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4159 {
4160         int rc;
4161         struct pqi_general_admin_request request;
4162         struct pqi_general_admin_response response;
4163         struct pqi_device_capability *capability;
4164         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4165
4166         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4167         if (!capability)
4168                 return -ENOMEM;
4169
4170         memset(&request, 0, sizeof(request));
4171
4172         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4173         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4174                 &request.header.iu_length);
4175         request.function_code =
4176                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4177         put_unaligned_le32(sizeof(*capability),
4178                 &request.data.report_device_capability.buffer_length);
4179
4180         rc = pqi_map_single(ctrl_info->pci_dev,
4181                 &request.data.report_device_capability.sg_descriptor,
4182                 capability, sizeof(*capability),
4183                 DMA_FROM_DEVICE);
4184         if (rc)
4185                 goto out;
4186
4187         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4188                 &response);
4189
4190         pqi_pci_unmap(ctrl_info->pci_dev,
4191                 &request.data.report_device_capability.sg_descriptor, 1,
4192                 DMA_FROM_DEVICE);
4193
4194         if (rc)
4195                 goto out;
4196
4197         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4198                 rc = -EIO;
4199                 goto out;
4200         }
4201
4202         ctrl_info->max_inbound_queues =
4203                 get_unaligned_le16(&capability->max_inbound_queues);
4204         ctrl_info->max_elements_per_iq =
4205                 get_unaligned_le16(&capability->max_elements_per_iq);
4206         ctrl_info->max_iq_element_length =
4207                 get_unaligned_le16(&capability->max_iq_element_length)
4208                 * 16;
4209         ctrl_info->max_outbound_queues =
4210                 get_unaligned_le16(&capability->max_outbound_queues);
4211         ctrl_info->max_elements_per_oq =
4212                 get_unaligned_le16(&capability->max_elements_per_oq);
4213         ctrl_info->max_oq_element_length =
4214                 get_unaligned_le16(&capability->max_oq_element_length)
4215                 * 16;
4216
4217         sop_iu_layer_descriptor =
4218                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4219
4220         ctrl_info->max_inbound_iu_length_per_firmware =
4221                 get_unaligned_le16(
4222                         &sop_iu_layer_descriptor->max_inbound_iu_length);
4223         ctrl_info->inbound_spanning_supported =
4224                 sop_iu_layer_descriptor->inbound_spanning_supported;
4225         ctrl_info->outbound_spanning_supported =
4226                 sop_iu_layer_descriptor->outbound_spanning_supported;
4227
4228 out:
4229         kfree(capability);
4230
4231         return rc;
4232 }
4233
4234 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4235 {
4236         if (ctrl_info->max_iq_element_length <
4237                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4238                 dev_err(&ctrl_info->pci_dev->dev,
4239                         "max. inbound queue element length of %d is less than the required length of %d\n",
4240                         ctrl_info->max_iq_element_length,
4241                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4242                 return -EINVAL;
4243         }
4244
4245         if (ctrl_info->max_oq_element_length <
4246                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4247                 dev_err(&ctrl_info->pci_dev->dev,
4248                         "max. outbound queue element length of %d is less than the required length of %d\n",
4249                         ctrl_info->max_oq_element_length,
4250                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4251                 return -EINVAL;
4252         }
4253
4254         if (ctrl_info->max_inbound_iu_length_per_firmware <
4255                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4256                 dev_err(&ctrl_info->pci_dev->dev,
4257                         "max. inbound IU length of %u is less than the min. required length of %d\n",
4258                         ctrl_info->max_inbound_iu_length_per_firmware,
4259                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4260                 return -EINVAL;
4261         }
4262
4263         if (!ctrl_info->inbound_spanning_supported) {
4264                 dev_err(&ctrl_info->pci_dev->dev,
4265                         "the controller does not support inbound spanning\n");
4266                 return -EINVAL;
4267         }
4268
4269         if (ctrl_info->outbound_spanning_supported) {
4270                 dev_err(&ctrl_info->pci_dev->dev,
4271                         "the controller supports outbound spanning but this driver does not\n");
4272                 return -EINVAL;
4273         }
4274
4275         return 0;
4276 }
4277
4278 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4279 {
4280         int rc;
4281         struct pqi_event_queue *event_queue;
4282         struct pqi_general_admin_request request;
4283         struct pqi_general_admin_response response;
4284
4285         event_queue = &ctrl_info->event_queue;
4286
4287         /*
4288          * Create OQ (Outbound Queue - device to host queue) to dedicate
4289          * to events.
4290          */
4291         memset(&request, 0, sizeof(request));
4292         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4293         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4294                 &request.header.iu_length);
4295         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4296         put_unaligned_le16(event_queue->oq_id,
4297                 &request.data.create_operational_oq.queue_id);
4298         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4299                 &request.data.create_operational_oq.element_array_addr);
4300         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4301                 &request.data.create_operational_oq.pi_addr);
4302         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4303                 &request.data.create_operational_oq.num_elements);
4304         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4305                 &request.data.create_operational_oq.element_length);
4306         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4307         put_unaligned_le16(event_queue->int_msg_num,
4308                 &request.data.create_operational_oq.int_msg_num);
4309
4310         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4311                 &response);
4312         if (rc)
4313                 return rc;
4314
4315         event_queue->oq_ci = ctrl_info->iomem_base +
4316                 PQI_DEVICE_REGISTERS_OFFSET +
4317                 get_unaligned_le64(
4318                         &response.data.create_operational_oq.oq_ci_offset);
4319
4320         return 0;
4321 }
4322
4323 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4324         unsigned int group_number)
4325 {
4326         int rc;
4327         struct pqi_queue_group *queue_group;
4328         struct pqi_general_admin_request request;
4329         struct pqi_general_admin_response response;
4330
4331         queue_group = &ctrl_info->queue_groups[group_number];
4332
4333         /*
4334          * Create IQ (Inbound Queue - host to device queue) for
4335          * RAID path.
4336          */
4337         memset(&request, 0, sizeof(request));
4338         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4339         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4340                 &request.header.iu_length);
4341         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4342         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4343                 &request.data.create_operational_iq.queue_id);
4344         put_unaligned_le64(
4345                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4346                 &request.data.create_operational_iq.element_array_addr);
4347         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4348                 &request.data.create_operational_iq.ci_addr);
4349         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4350                 &request.data.create_operational_iq.num_elements);
4351         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4352                 &request.data.create_operational_iq.element_length);
4353         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4354
4355         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4356                 &response);
4357         if (rc) {
4358                 dev_err(&ctrl_info->pci_dev->dev,
4359                         "error creating inbound RAID queue\n");
4360                 return rc;
4361         }
4362
4363         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4364                 PQI_DEVICE_REGISTERS_OFFSET +
4365                 get_unaligned_le64(
4366                         &response.data.create_operational_iq.iq_pi_offset);
4367
4368         /*
4369          * Create IQ (Inbound Queue - host to device queue) for
4370          * Advanced I/O (AIO) path.
4371          */
4372         memset(&request, 0, sizeof(request));
4373         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4374         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4375                 &request.header.iu_length);
4376         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4377         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4378                 &request.data.create_operational_iq.queue_id);
4379         put_unaligned_le64((u64)queue_group->
4380                 iq_element_array_bus_addr[AIO_PATH],
4381                 &request.data.create_operational_iq.element_array_addr);
4382         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4383                 &request.data.create_operational_iq.ci_addr);
4384         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4385                 &request.data.create_operational_iq.num_elements);
4386         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4387                 &request.data.create_operational_iq.element_length);
4388         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4389
4390         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4391                 &response);
4392         if (rc) {
4393                 dev_err(&ctrl_info->pci_dev->dev,
4394                         "error creating inbound AIO queue\n");
4395                 return rc;
4396         }
4397
4398         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4399                 PQI_DEVICE_REGISTERS_OFFSET +
4400                 get_unaligned_le64(
4401                         &response.data.create_operational_iq.iq_pi_offset);
4402
4403         /*
4404          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4405          * assumed to be for RAID path I/O unless we change the queue's
4406          * property.
4407          */
4408         memset(&request, 0, sizeof(request));
4409         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4410         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4411                 &request.header.iu_length);
4412         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4413         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4414                 &request.data.change_operational_iq_properties.queue_id);
4415         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4416                 &request.data.change_operational_iq_properties.vendor_specific);
4417
4418         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4419                 &response);
4420         if (rc) {
4421                 dev_err(&ctrl_info->pci_dev->dev,
4422                         "error changing queue property\n");
4423                 return rc;
4424         }
4425
4426         /*
4427          * Create OQ (Outbound Queue - device to host queue).
4428          */
4429         memset(&request, 0, sizeof(request));
4430         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4431         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4432                 &request.header.iu_length);
4433         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4434         put_unaligned_le16(queue_group->oq_id,
4435                 &request.data.create_operational_oq.queue_id);
4436         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4437                 &request.data.create_operational_oq.element_array_addr);
4438         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4439                 &request.data.create_operational_oq.pi_addr);
4440         put_unaligned_le16(ctrl_info->num_elements_per_oq,
4441                 &request.data.create_operational_oq.num_elements);
4442         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4443                 &request.data.create_operational_oq.element_length);
4444         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4445         put_unaligned_le16(queue_group->int_msg_num,
4446                 &request.data.create_operational_oq.int_msg_num);
4447
4448         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4449                 &response);
4450         if (rc) {
4451                 dev_err(&ctrl_info->pci_dev->dev,
4452                         "error creating outbound queue\n");
4453                 return rc;
4454         }
4455
4456         queue_group->oq_ci = ctrl_info->iomem_base +
4457                 PQI_DEVICE_REGISTERS_OFFSET +
4458                 get_unaligned_le64(
4459                         &response.data.create_operational_oq.oq_ci_offset);
4460
4461         return 0;
4462 }
4463
4464 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4465 {
4466         int rc;
4467         unsigned int i;
4468
4469         rc = pqi_create_event_queue(ctrl_info);
4470         if (rc) {
4471                 dev_err(&ctrl_info->pci_dev->dev,
4472                         "error creating event queue\n");
4473                 return rc;
4474         }
4475
4476         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4477                 rc = pqi_create_queue_group(ctrl_info, i);
4478                 if (rc) {
4479                         dev_err(&ctrl_info->pci_dev->dev,
4480                                 "error creating queue group number %u/%u\n",
4481                                 i, ctrl_info->num_queue_groups);
4482                         return rc;
4483                 }
4484         }
4485
4486         return 0;
4487 }
4488
4489 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
4490         (offsetof(struct pqi_event_config, descriptors) + \
4491         (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4492
4493 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4494         bool enable_events)
4495 {
4496         int rc;
4497         unsigned int i;
4498         struct pqi_event_config *event_config;
4499         struct pqi_event_descriptor *event_descriptor;
4500         struct pqi_general_management_request request;
4501
4502         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4503                 GFP_KERNEL);
4504         if (!event_config)
4505                 return -ENOMEM;
4506
4507         memset(&request, 0, sizeof(request));
4508
4509         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4510         put_unaligned_le16(offsetof(struct pqi_general_management_request,
4511                 data.report_event_configuration.sg_descriptors[1]) -
4512                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4513         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4514                 &request.data.report_event_configuration.buffer_length);
4515
4516         rc = pqi_map_single(ctrl_info->pci_dev,
4517                 request.data.report_event_configuration.sg_descriptors,
4518                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4519                 DMA_FROM_DEVICE);
4520         if (rc)
4521                 goto out;
4522
4523         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4524                 0, NULL, NO_TIMEOUT);
4525
4526         pqi_pci_unmap(ctrl_info->pci_dev,
4527                 request.data.report_event_configuration.sg_descriptors, 1,
4528                 DMA_FROM_DEVICE);
4529
4530         if (rc)
4531                 goto out;
4532
4533         for (i = 0; i < event_config->num_event_descriptors; i++) {
4534                 event_descriptor = &event_config->descriptors[i];
4535                 if (enable_events &&
4536                         pqi_is_supported_event(event_descriptor->event_type))
4537                         put_unaligned_le16(ctrl_info->event_queue.oq_id,
4538                                         &event_descriptor->oq_id);
4539                 else
4540                         put_unaligned_le16(0, &event_descriptor->oq_id);
4541         }
4542
4543         memset(&request, 0, sizeof(request));
4544
4545         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4546         put_unaligned_le16(offsetof(struct pqi_general_management_request,
4547                 data.report_event_configuration.sg_descriptors[1]) -
4548                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4549         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4550                 &request.data.report_event_configuration.buffer_length);
4551
4552         rc = pqi_map_single(ctrl_info->pci_dev,
4553                 request.data.report_event_configuration.sg_descriptors,
4554                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4555                 DMA_TO_DEVICE);
4556         if (rc)
4557                 goto out;
4558
4559         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4560                 NULL, NO_TIMEOUT);
4561
4562         pqi_pci_unmap(ctrl_info->pci_dev,
4563                 request.data.report_event_configuration.sg_descriptors, 1,
4564                 DMA_TO_DEVICE);
4565
4566 out:
4567         kfree(event_config);
4568
4569         return rc;
4570 }
4571
4572 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4573 {
4574         return pqi_configure_events(ctrl_info, true);
4575 }
4576
4577 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4578 {
4579         return pqi_configure_events(ctrl_info, false);
4580 }
4581
4582 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4583 {
4584         unsigned int i;
4585         struct device *dev;
4586         size_t sg_chain_buffer_length;
4587         struct pqi_io_request *io_request;
4588
4589         if (!ctrl_info->io_request_pool)
4590                 return;
4591
4592         dev = &ctrl_info->pci_dev->dev;
4593         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4594         io_request = ctrl_info->io_request_pool;
4595
4596         for (i = 0; i < ctrl_info->max_io_slots; i++) {
4597                 kfree(io_request->iu);
4598                 if (!io_request->sg_chain_buffer)
4599                         break;
4600                 dma_free_coherent(dev, sg_chain_buffer_length,
4601                         io_request->sg_chain_buffer,
4602                         io_request->sg_chain_buffer_dma_handle);
4603                 io_request++;
4604         }
4605
4606         kfree(ctrl_info->io_request_pool);
4607         ctrl_info->io_request_pool = NULL;
4608 }
4609
4610 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4611 {
4612         ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4613                                                      ctrl_info->error_buffer_length,
4614                                                      &ctrl_info->error_buffer_dma_handle,
4615                                                      GFP_KERNEL);
4616
4617         if (!ctrl_info->error_buffer)
4618                 return -ENOMEM;
4619
4620         return 0;
4621 }
4622
4623 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4624 {
4625         unsigned int i;
4626         void *sg_chain_buffer;
4627         size_t sg_chain_buffer_length;
4628         dma_addr_t sg_chain_buffer_dma_handle;
4629         struct device *dev;
4630         struct pqi_io_request *io_request;
4631
4632         ctrl_info->io_request_pool =
4633                 kcalloc(ctrl_info->max_io_slots,
4634                         sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4635
4636         if (!ctrl_info->io_request_pool) {
4637                 dev_err(&ctrl_info->pci_dev->dev,
4638                         "failed to allocate I/O request pool\n");
4639                 goto error;
4640         }
4641
4642         dev = &ctrl_info->pci_dev->dev;
4643         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4644         io_request = ctrl_info->io_request_pool;
4645
4646         for (i = 0; i < ctrl_info->max_io_slots; i++) {
4647                 io_request->iu =
4648                         kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4649
4650                 if (!io_request->iu) {
4651                         dev_err(&ctrl_info->pci_dev->dev,
4652                                 "failed to allocate IU buffers\n");
4653                         goto error;
4654                 }
4655
4656                 sg_chain_buffer = dma_alloc_coherent(dev,
4657                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4658                         GFP_KERNEL);
4659
4660                 if (!sg_chain_buffer) {
4661                         dev_err(&ctrl_info->pci_dev->dev,
4662                                 "failed to allocate PQI scatter-gather chain buffers\n");
4663                         goto error;
4664                 }
4665
4666                 io_request->index = i;
4667                 io_request->sg_chain_buffer = sg_chain_buffer;
4668                 io_request->sg_chain_buffer_dma_handle =
4669                         sg_chain_buffer_dma_handle;
4670                 io_request++;
4671         }
4672
4673         return 0;
4674
4675 error:
4676         pqi_free_all_io_requests(ctrl_info);
4677
4678         return -ENOMEM;
4679 }
4680
4681 /*
4682  * Calculate required resources that are sized based on max. outstanding
4683  * requests and max. transfer size.
4684  */
4685
4686 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4687 {
4688         u32 max_transfer_size;
4689         u32 max_sg_entries;
4690
4691         ctrl_info->scsi_ml_can_queue =
4692                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4693         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4694
4695         ctrl_info->error_buffer_length =
4696                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4697
4698         if (reset_devices)
4699                 max_transfer_size = min(ctrl_info->max_transfer_size,
4700                         PQI_MAX_TRANSFER_SIZE_KDUMP);
4701         else
4702                 max_transfer_size = min(ctrl_info->max_transfer_size,
4703                         PQI_MAX_TRANSFER_SIZE);
4704
4705         max_sg_entries = max_transfer_size / PAGE_SIZE;
4706
4707         /* +1 to cover when the buffer is not page-aligned. */
4708         max_sg_entries++;
4709
4710         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4711
4712         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4713
4714         ctrl_info->sg_chain_buffer_length =
4715                 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4716                 PQI_EXTRA_SGL_MEMORY;
4717         ctrl_info->sg_tablesize = max_sg_entries;
4718         ctrl_info->max_sectors = max_transfer_size / 512;
4719 }
4720
4721 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4722 {
4723         int num_queue_groups;
4724         u16 num_elements_per_iq;
4725         u16 num_elements_per_oq;
4726
4727         if (reset_devices) {
4728                 num_queue_groups = 1;
4729         } else {
4730                 int num_cpus;
4731                 int max_queue_groups;
4732
4733                 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4734                         ctrl_info->max_outbound_queues - 1);
4735                 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4736
4737                 num_cpus = num_online_cpus();
4738                 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4739                 num_queue_groups = min(num_queue_groups, max_queue_groups);
4740         }
4741
4742         ctrl_info->num_queue_groups = num_queue_groups;
4743         ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4744
4745         /*
4746          * Make sure that the max. inbound IU length is an even multiple
4747          * of our inbound element length.
4748          */
4749         ctrl_info->max_inbound_iu_length =
4750                 (ctrl_info->max_inbound_iu_length_per_firmware /
4751                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4752                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4753
4754         num_elements_per_iq =
4755                 (ctrl_info->max_inbound_iu_length /
4756                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4757
4758         /* Add one because one element in each queue is unusable. */
4759         num_elements_per_iq++;
4760
4761         num_elements_per_iq = min(num_elements_per_iq,
4762                 ctrl_info->max_elements_per_iq);
4763
4764         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4765         num_elements_per_oq = min(num_elements_per_oq,
4766                 ctrl_info->max_elements_per_oq);
4767
4768         ctrl_info->num_elements_per_iq = num_elements_per_iq;
4769         ctrl_info->num_elements_per_oq = num_elements_per_oq;
4770
4771         ctrl_info->max_sg_per_iu =
4772                 ((ctrl_info->max_inbound_iu_length -
4773                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4774                 sizeof(struct pqi_sg_descriptor)) +
4775                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4776 }
4777
4778 static inline void pqi_set_sg_descriptor(
4779         struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4780 {
4781         u64 address = (u64)sg_dma_address(sg);
4782         unsigned int length = sg_dma_len(sg);
4783
4784         put_unaligned_le64(address, &sg_descriptor->address);
4785         put_unaligned_le32(length, &sg_descriptor->length);
4786         put_unaligned_le32(0, &sg_descriptor->flags);
4787 }
4788
4789 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4790         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4791         struct pqi_io_request *io_request)
4792 {
4793         int i;
4794         u16 iu_length;
4795         int sg_count;
4796         bool chained;
4797         unsigned int num_sg_in_iu;
4798         unsigned int max_sg_per_iu;
4799         struct scatterlist *sg;
4800         struct pqi_sg_descriptor *sg_descriptor;
4801
4802         sg_count = scsi_dma_map(scmd);
4803         if (sg_count < 0)
4804                 return sg_count;
4805
4806         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4807                 PQI_REQUEST_HEADER_LENGTH;
4808
4809         if (sg_count == 0)
4810                 goto out;
4811
4812         sg = scsi_sglist(scmd);
4813         sg_descriptor = request->sg_descriptors;
4814         max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4815         chained = false;
4816         num_sg_in_iu = 0;
4817         i = 0;
4818
4819         while (1) {
4820                 pqi_set_sg_descriptor(sg_descriptor, sg);
4821                 if (!chained)
4822                         num_sg_in_iu++;
4823                 i++;
4824                 if (i == sg_count)
4825                         break;
4826                 sg_descriptor++;
4827                 if (i == max_sg_per_iu) {
4828                         put_unaligned_le64(
4829                                 (u64)io_request->sg_chain_buffer_dma_handle,
4830                                 &sg_descriptor->address);
4831                         put_unaligned_le32((sg_count - num_sg_in_iu)
4832                                 * sizeof(*sg_descriptor),
4833                                 &sg_descriptor->length);
4834                         put_unaligned_le32(CISS_SG_CHAIN,
4835                                 &sg_descriptor->flags);
4836                         chained = true;
4837                         num_sg_in_iu++;
4838                         sg_descriptor = io_request->sg_chain_buffer;
4839                 }
4840                 sg = sg_next(sg);
4841         }
4842
4843         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4844         request->partial = chained;
4845         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4846
4847 out:
4848         put_unaligned_le16(iu_length, &request->header.iu_length);
4849
4850         return 0;
4851 }
4852
4853 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4854         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4855         struct pqi_io_request *io_request)
4856 {
4857         int i;
4858         u16 iu_length;
4859         int sg_count;
4860         bool chained;
4861         unsigned int num_sg_in_iu;
4862         unsigned int max_sg_per_iu;
4863         struct scatterlist *sg;
4864         struct pqi_sg_descriptor *sg_descriptor;
4865
4866         sg_count = scsi_dma_map(scmd);
4867         if (sg_count < 0)
4868                 return sg_count;
4869
4870         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4871                 PQI_REQUEST_HEADER_LENGTH;
4872         num_sg_in_iu = 0;
4873
4874         if (sg_count == 0)
4875                 goto out;
4876
4877         sg = scsi_sglist(scmd);
4878         sg_descriptor = request->sg_descriptors;
4879         max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4880         chained = false;
4881         i = 0;
4882
4883         while (1) {
4884                 pqi_set_sg_descriptor(sg_descriptor, sg);
4885                 if (!chained)
4886                         num_sg_in_iu++;
4887                 i++;
4888                 if (i == sg_count)
4889                         break;
4890                 sg_descriptor++;
4891                 if (i == max_sg_per_iu) {
4892                         put_unaligned_le64(
4893                                 (u64)io_request->sg_chain_buffer_dma_handle,
4894                                 &sg_descriptor->address);
4895                         put_unaligned_le32((sg_count - num_sg_in_iu)
4896                                 * sizeof(*sg_descriptor),
4897                                 &sg_descriptor->length);
4898                         put_unaligned_le32(CISS_SG_CHAIN,
4899                                 &sg_descriptor->flags);
4900                         chained = true;
4901                         num_sg_in_iu++;
4902                         sg_descriptor = io_request->sg_chain_buffer;
4903                 }
4904                 sg = sg_next(sg);
4905         }
4906
4907         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4908         request->partial = chained;
4909         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4910
4911 out:
4912         put_unaligned_le16(iu_length, &request->header.iu_length);
4913         request->num_sg_descriptors = num_sg_in_iu;
4914
4915         return 0;
4916 }
4917
4918 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4919         void *context)
4920 {
4921         struct scsi_cmnd *scmd;
4922
4923         scmd = io_request->scmd;
4924         pqi_free_io_request(io_request);
4925         scsi_dma_unmap(scmd);
4926         pqi_scsi_done(scmd);
4927 }
4928
4929 static int pqi_raid_submit_scsi_cmd_with_io_request(
4930         struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4931         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4932         struct pqi_queue_group *queue_group)
4933 {
4934         int rc;
4935         size_t cdb_length;
4936         struct pqi_raid_path_request *request;
4937
4938         io_request->io_complete_callback = pqi_raid_io_complete;
4939         io_request->scmd = scmd;
4940
4941         request = io_request->iu;
4942         memset(request, 0,
4943                 offsetof(struct pqi_raid_path_request, sg_descriptors));
4944
4945         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4946         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4947         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4948         put_unaligned_le16(io_request->index, &request->request_id);
4949         request->error_index = request->request_id;
4950         memcpy(request->lun_number, device->scsi3addr,
4951                 sizeof(request->lun_number));
4952
4953         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4954         memcpy(request->cdb, scmd->cmnd, cdb_length);
4955
4956         switch (cdb_length) {
4957         case 6:
4958         case 10:
4959         case 12:
4960         case 16:
4961                 /* No bytes in the Additional CDB bytes field */
4962                 request->additional_cdb_bytes_usage =
4963                         SOP_ADDITIONAL_CDB_BYTES_0;
4964                 break;
4965         case 20:
4966                 /* 4 bytes in the Additional cdb field */
4967                 request->additional_cdb_bytes_usage =
4968                         SOP_ADDITIONAL_CDB_BYTES_4;
4969                 break;
4970         case 24:
4971                 /* 8 bytes in the Additional cdb field */
4972                 request->additional_cdb_bytes_usage =
4973                         SOP_ADDITIONAL_CDB_BYTES_8;
4974                 break;
4975         case 28:
4976                 /* 12 bytes in the Additional cdb field */
4977                 request->additional_cdb_bytes_usage =
4978                         SOP_ADDITIONAL_CDB_BYTES_12;
4979                 break;
4980         case 32:
4981         default:
4982                 /* 16 bytes in the Additional cdb field */
4983                 request->additional_cdb_bytes_usage =
4984                         SOP_ADDITIONAL_CDB_BYTES_16;
4985                 break;
4986         }
4987
4988         switch (scmd->sc_data_direction) {
4989         case DMA_TO_DEVICE:
4990                 request->data_direction = SOP_READ_FLAG;
4991                 break;
4992         case DMA_FROM_DEVICE:
4993                 request->data_direction = SOP_WRITE_FLAG;
4994                 break;
4995         case DMA_NONE:
4996                 request->data_direction = SOP_NO_DIRECTION_FLAG;
4997                 break;
4998         case DMA_BIDIRECTIONAL:
4999                 request->data_direction = SOP_BIDIRECTIONAL;
5000                 break;
5001         default:
5002                 dev_err(&ctrl_info->pci_dev->dev,
5003                         "unknown data direction: %d\n",
5004                         scmd->sc_data_direction);
5005                 break;
5006         }
5007
5008         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5009         if (rc) {
5010                 pqi_free_io_request(io_request);
5011                 return SCSI_MLQUEUE_HOST_BUSY;
5012         }
5013
5014         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5015
5016         return 0;
5017 }
5018
5019 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5020         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5021         struct pqi_queue_group *queue_group)
5022 {
5023         struct pqi_io_request *io_request;
5024
5025         io_request = pqi_alloc_io_request(ctrl_info);
5026
5027         return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5028                 device, scmd, queue_group);
5029 }
5030
5031 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5032 {
5033         if (!pqi_ctrl_blocked(ctrl_info))
5034                 schedule_work(&ctrl_info->raid_bypass_retry_work);
5035 }
5036
5037 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5038 {
5039         struct scsi_cmnd *scmd;
5040         struct pqi_scsi_dev *device;
5041         struct pqi_ctrl_info *ctrl_info;
5042
5043         if (!io_request->raid_bypass)
5044                 return false;
5045
5046         scmd = io_request->scmd;
5047         if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5048                 return false;
5049         if (host_byte(scmd->result) == DID_NO_CONNECT)
5050                 return false;
5051
5052         device = scmd->device->hostdata;
5053         if (pqi_device_offline(device))
5054                 return false;
5055
5056         ctrl_info = shost_to_hba(scmd->device->host);
5057         if (pqi_ctrl_offline(ctrl_info))
5058                 return false;
5059
5060         return true;
5061 }
5062
5063 static inline void pqi_add_to_raid_bypass_retry_list(
5064         struct pqi_ctrl_info *ctrl_info,
5065         struct pqi_io_request *io_request, bool at_head)
5066 {
5067         unsigned long flags;
5068
5069         spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5070         if (at_head)
5071                 list_add(&io_request->request_list_entry,
5072                         &ctrl_info->raid_bypass_retry_list);
5073         else
5074                 list_add_tail(&io_request->request_list_entry,
5075                         &ctrl_info->raid_bypass_retry_list);
5076         spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5077 }
5078
5079 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5080         void *context)
5081 {
5082         struct scsi_cmnd *scmd;
5083
5084         scmd = io_request->scmd;
5085         pqi_free_io_request(io_request);
5086         pqi_scsi_done(scmd);
5087 }
5088
5089 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5090 {
5091         struct scsi_cmnd *scmd;
5092         struct pqi_ctrl_info *ctrl_info;
5093
5094         io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5095         scmd = io_request->scmd;
5096         scmd->result = 0;
5097         ctrl_info = shost_to_hba(scmd->device->host);
5098
5099         pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5100         pqi_schedule_bypass_retry(ctrl_info);
5101 }
5102
5103 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5104 {
5105         struct scsi_cmnd *scmd;
5106         struct pqi_scsi_dev *device;
5107         struct pqi_ctrl_info *ctrl_info;
5108         struct pqi_queue_group *queue_group;
5109
5110         scmd = io_request->scmd;
5111         device = scmd->device->hostdata;
5112         if (pqi_device_in_reset(device)) {
5113                 pqi_free_io_request(io_request);
5114                 set_host_byte(scmd, DID_RESET);
5115                 pqi_scsi_done(scmd);
5116                 return 0;
5117         }
5118
5119         ctrl_info = shost_to_hba(scmd->device->host);
5120         queue_group = io_request->queue_group;
5121
5122         pqi_reinit_io_request(io_request);
5123
5124         return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5125                 device, scmd, queue_group);
5126 }
5127
5128 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5129         struct pqi_ctrl_info *ctrl_info)
5130 {
5131         unsigned long flags;
5132         struct pqi_io_request *io_request;
5133
5134         spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5135         io_request = list_first_entry_or_null(
5136                 &ctrl_info->raid_bypass_retry_list,
5137                 struct pqi_io_request, request_list_entry);
5138         if (io_request)
5139                 list_del(&io_request->request_list_entry);
5140         spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5141
5142         return io_request;
5143 }
5144
5145 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5146 {
5147         int rc;
5148         struct pqi_io_request *io_request;
5149
5150         pqi_ctrl_busy(ctrl_info);
5151
5152         while (1) {
5153                 if (pqi_ctrl_blocked(ctrl_info))
5154                         break;
5155                 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5156                 if (!io_request)
5157                         break;
5158                 rc = pqi_retry_raid_bypass(io_request);
5159                 if (rc) {
5160                         pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5161                                 true);
5162                         pqi_schedule_bypass_retry(ctrl_info);
5163                         break;
5164                 }
5165         }
5166
5167         pqi_ctrl_unbusy(ctrl_info);
5168 }
5169
5170 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5171 {
5172         struct pqi_ctrl_info *ctrl_info;
5173
5174         ctrl_info = container_of(work, struct pqi_ctrl_info,
5175                 raid_bypass_retry_work);
5176         pqi_retry_raid_bypass_requests(ctrl_info);
5177 }
5178
5179 static void pqi_clear_all_queued_raid_bypass_retries(
5180         struct pqi_ctrl_info *ctrl_info)
5181 {
5182         unsigned long flags;
5183
5184         spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5185         INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5186         spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5187 }
5188
5189 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5190         void *context)
5191 {
5192         struct scsi_cmnd *scmd;
5193
5194         scmd = io_request->scmd;
5195         scsi_dma_unmap(scmd);
5196         if (io_request->status == -EAGAIN)
5197                 set_host_byte(scmd, DID_IMM_RETRY);
5198         else if (pqi_raid_bypass_retry_needed(io_request)) {
5199                 pqi_queue_raid_bypass_retry(io_request);
5200                 return;
5201         }
5202         pqi_free_io_request(io_request);
5203         pqi_scsi_done(scmd);
5204 }
5205
5206 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5207         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5208         struct pqi_queue_group *queue_group)
5209 {
5210         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5211                 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5212 }
5213
5214 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5215         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5216         unsigned int cdb_length, struct pqi_queue_group *queue_group,
5217         struct pqi_encryption_info *encryption_info, bool raid_bypass)
5218 {
5219         int rc;
5220         struct pqi_io_request *io_request;
5221         struct pqi_aio_path_request *request;
5222
5223         io_request = pqi_alloc_io_request(ctrl_info);
5224         io_request->io_complete_callback = pqi_aio_io_complete;
5225         io_request->scmd = scmd;
5226         io_request->raid_bypass = raid_bypass;
5227
5228         request = io_request->iu;
5229         memset(request, 0,
5230                 offsetof(struct pqi_raid_path_request, sg_descriptors));
5231
5232         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5233         put_unaligned_le32(aio_handle, &request->nexus_id);
5234         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5235         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5236         put_unaligned_le16(io_request->index, &request->request_id);
5237         request->error_index = request->request_id;
5238         if (cdb_length > sizeof(request->cdb))
5239                 cdb_length = sizeof(request->cdb);
5240         request->cdb_length = cdb_length;
5241         memcpy(request->cdb, cdb, cdb_length);
5242
5243         switch (scmd->sc_data_direction) {
5244         case DMA_TO_DEVICE:
5245                 request->data_direction = SOP_READ_FLAG;
5246                 break;
5247         case DMA_FROM_DEVICE:
5248                 request->data_direction = SOP_WRITE_FLAG;
5249                 break;
5250         case DMA_NONE:
5251                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5252                 break;
5253         case DMA_BIDIRECTIONAL:
5254                 request->data_direction = SOP_BIDIRECTIONAL;
5255                 break;
5256         default:
5257                 dev_err(&ctrl_info->pci_dev->dev,
5258                         "unknown data direction: %d\n",
5259                         scmd->sc_data_direction);
5260                 break;
5261         }
5262
5263         if (encryption_info) {
5264                 request->encryption_enable = true;
5265                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5266                         &request->data_encryption_key_index);
5267                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5268                         &request->encrypt_tweak_lower);
5269                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5270                         &request->encrypt_tweak_upper);
5271         }
5272
5273         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5274         if (rc) {
5275                 pqi_free_io_request(io_request);
5276                 return SCSI_MLQUEUE_HOST_BUSY;
5277         }
5278
5279         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5280
5281         return 0;
5282 }
5283
5284 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5285         struct scsi_cmnd *scmd)
5286 {
5287         u16 hw_queue;
5288
5289         hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5290         if (hw_queue > ctrl_info->max_hw_queue_index)
5291                 hw_queue = 0;
5292
5293         return hw_queue;
5294 }
5295
5296 /*
5297  * This function gets called just before we hand the completed SCSI request
5298  * back to the SML.
5299  */
5300
5301 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5302 {
5303         struct pqi_scsi_dev *device;
5304
5305         if (!scmd->device) {
5306                 set_host_byte(scmd, DID_NO_CONNECT);
5307                 return;
5308         }
5309
5310         device = scmd->device->hostdata;
5311         if (!device) {
5312                 set_host_byte(scmd, DID_NO_CONNECT);
5313                 return;
5314         }
5315
5316         atomic_dec(&device->scsi_cmds_outstanding);
5317 }
5318
5319 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5320         struct scsi_cmnd *scmd)
5321 {
5322         int rc;
5323         struct pqi_ctrl_info *ctrl_info;
5324         struct pqi_scsi_dev *device;
5325         u16 hw_queue;
5326         struct pqi_queue_group *queue_group;
5327         bool raid_bypassed;
5328
5329         device = scmd->device->hostdata;
5330         ctrl_info = shost_to_hba(shost);
5331
5332         if (!device) {
5333                 set_host_byte(scmd, DID_NO_CONNECT);
5334                 pqi_scsi_done(scmd);
5335                 return 0;
5336         }
5337
5338         atomic_inc(&device->scsi_cmds_outstanding);
5339
5340         if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5341                                                                 device)) {
5342                 set_host_byte(scmd, DID_NO_CONNECT);
5343                 pqi_scsi_done(scmd);
5344                 return 0;
5345         }
5346
5347         pqi_ctrl_busy(ctrl_info);
5348         if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5349             pqi_ctrl_in_ofa(ctrl_info)) {
5350                 rc = SCSI_MLQUEUE_HOST_BUSY;
5351                 goto out;
5352         }
5353
5354         /*
5355          * This is necessary because the SML doesn't zero out this field during
5356          * error recovery.
5357          */
5358         scmd->result = 0;
5359
5360         hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5361         queue_group = &ctrl_info->queue_groups[hw_queue];
5362
5363         if (pqi_is_logical_device(device)) {
5364                 raid_bypassed = false;
5365                 if (device->raid_bypass_enabled &&
5366                                 !blk_rq_is_passthrough(scmd->request)) {
5367                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5368                                 scmd, queue_group);
5369                         if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5370                                 raid_bypassed = true;
5371                 }
5372                 if (!raid_bypassed)
5373                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5374                                 queue_group);
5375         } else {
5376                 if (device->aio_enabled)
5377                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5378                                 queue_group);
5379                 else
5380                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5381                                 queue_group);
5382         }
5383
5384 out:
5385         pqi_ctrl_unbusy(ctrl_info);
5386         if (rc)
5387                 atomic_dec(&device->scsi_cmds_outstanding);
5388
5389         return rc;
5390 }
5391
5392 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5393         struct pqi_queue_group *queue_group)
5394 {
5395         unsigned int path;
5396         unsigned long flags;
5397         bool list_is_empty;
5398
5399         for (path = 0; path < 2; path++) {
5400                 while (1) {
5401                         spin_lock_irqsave(
5402                                 &queue_group->submit_lock[path], flags);
5403                         list_is_empty =
5404                                 list_empty(&queue_group->request_list[path]);
5405                         spin_unlock_irqrestore(
5406                                 &queue_group->submit_lock[path], flags);
5407                         if (list_is_empty)
5408                                 break;
5409                         pqi_check_ctrl_health(ctrl_info);
5410                         if (pqi_ctrl_offline(ctrl_info))
5411                                 return -ENXIO;
5412                         usleep_range(1000, 2000);
5413                 }
5414         }
5415
5416         return 0;
5417 }
5418
5419 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5420 {
5421         int rc;
5422         unsigned int i;
5423         unsigned int path;
5424         struct pqi_queue_group *queue_group;
5425         pqi_index_t iq_pi;
5426         pqi_index_t iq_ci;
5427
5428         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5429                 queue_group = &ctrl_info->queue_groups[i];
5430
5431                 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5432                 if (rc)
5433                         return rc;
5434
5435                 for (path = 0; path < 2; path++) {
5436                         iq_pi = queue_group->iq_pi_copy[path];
5437
5438                         while (1) {
5439                                 iq_ci = readl(queue_group->iq_ci[path]);
5440                                 if (iq_ci == iq_pi)
5441                                         break;
5442                                 pqi_check_ctrl_health(ctrl_info);
5443                                 if (pqi_ctrl_offline(ctrl_info))
5444                                         return -ENXIO;
5445                                 usleep_range(1000, 2000);
5446                         }
5447                 }
5448         }
5449
5450         return 0;
5451 }
5452
5453 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5454         struct pqi_scsi_dev *device)
5455 {
5456         unsigned int i;
5457         unsigned int path;
5458         struct pqi_queue_group *queue_group;
5459         unsigned long flags;
5460         struct pqi_io_request *io_request;
5461         struct pqi_io_request *next;
5462         struct scsi_cmnd *scmd;
5463         struct pqi_scsi_dev *scsi_device;
5464
5465         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5466                 queue_group = &ctrl_info->queue_groups[i];
5467
5468                 for (path = 0; path < 2; path++) {
5469                         spin_lock_irqsave(
5470                                 &queue_group->submit_lock[path], flags);
5471
5472                         list_for_each_entry_safe(io_request, next,
5473                                 &queue_group->request_list[path],
5474                                 request_list_entry) {
5475                                 scmd = io_request->scmd;
5476                                 if (!scmd)
5477                                         continue;
5478
5479                                 scsi_device = scmd->device->hostdata;
5480                                 if (scsi_device != device)
5481                                         continue;
5482
5483                                 list_del(&io_request->request_list_entry);
5484                                 set_host_byte(scmd, DID_RESET);
5485                                 pqi_scsi_done(scmd);
5486                         }
5487
5488                         spin_unlock_irqrestore(
5489                                 &queue_group->submit_lock[path], flags);
5490                 }
5491         }
5492 }
5493
5494 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5495 {
5496         unsigned int i;
5497         unsigned int path;
5498         struct pqi_queue_group *queue_group;
5499         unsigned long flags;
5500         struct pqi_io_request *io_request;
5501         struct pqi_io_request *next;
5502         struct scsi_cmnd *scmd;
5503
5504         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5505                 queue_group = &ctrl_info->queue_groups[i];
5506
5507                 for (path = 0; path < 2; path++) {
5508                         spin_lock_irqsave(&queue_group->submit_lock[path],
5509                                                 flags);
5510
5511                         list_for_each_entry_safe(io_request, next,
5512                                 &queue_group->request_list[path],
5513                                 request_list_entry) {
5514
5515                                 scmd = io_request->scmd;
5516                                 if (!scmd)
5517                                         continue;
5518
5519                                 list_del(&io_request->request_list_entry);
5520                                 set_host_byte(scmd, DID_RESET);
5521                                 pqi_scsi_done(scmd);
5522                         }
5523
5524                         spin_unlock_irqrestore(
5525                                 &queue_group->submit_lock[path], flags);
5526                 }
5527         }
5528 }
5529
5530 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5531         struct pqi_scsi_dev *device, unsigned long timeout_secs)
5532 {
5533         unsigned long timeout;
5534
5535         timeout = (timeout_secs * PQI_HZ) + jiffies;
5536
5537         while (atomic_read(&device->scsi_cmds_outstanding)) {
5538                 pqi_check_ctrl_health(ctrl_info);
5539                 if (pqi_ctrl_offline(ctrl_info))
5540                         return -ENXIO;
5541                 if (timeout_secs != NO_TIMEOUT) {
5542                         if (time_after(jiffies, timeout)) {
5543                                 dev_err(&ctrl_info->pci_dev->dev,
5544                                         "timed out waiting for pending IO\n");
5545                                 return -ETIMEDOUT;
5546                         }
5547                 }
5548                 usleep_range(1000, 2000);
5549         }
5550
5551         return 0;
5552 }
5553
5554 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5555         unsigned long timeout_secs)
5556 {
5557         bool io_pending;
5558         unsigned long flags;
5559         unsigned long timeout;
5560         struct pqi_scsi_dev *device;
5561
5562         timeout = (timeout_secs * PQI_HZ) + jiffies;
5563         while (1) {
5564                 io_pending = false;
5565
5566                 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5567                 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5568                         scsi_device_list_entry) {
5569                         if (atomic_read(&device->scsi_cmds_outstanding)) {
5570                                 io_pending = true;
5571                                 break;
5572                         }
5573                 }
5574                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5575                                         flags);
5576
5577                 if (!io_pending)
5578                         break;
5579
5580                 pqi_check_ctrl_health(ctrl_info);
5581                 if (pqi_ctrl_offline(ctrl_info))
5582                         return -ENXIO;
5583
5584                 if (timeout_secs != NO_TIMEOUT) {
5585                         if (time_after(jiffies, timeout)) {
5586                                 dev_err(&ctrl_info->pci_dev->dev,
5587                                         "timed out waiting for pending IO\n");
5588                                 return -ETIMEDOUT;
5589                         }
5590                 }
5591                 usleep_range(1000, 2000);
5592         }
5593
5594         return 0;
5595 }
5596
5597 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5598         void *context)
5599 {
5600         struct completion *waiting = context;
5601
5602         complete(waiting);
5603 }
5604
5605 #define PQI_LUN_RESET_TIMEOUT_SECS      10
5606
5607 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5608         struct pqi_scsi_dev *device, struct completion *wait)
5609 {
5610         int rc;
5611
5612         while (1) {
5613                 if (wait_for_completion_io_timeout(wait,
5614                         PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
5615                         rc = 0;
5616                         break;
5617                 }
5618
5619                 pqi_check_ctrl_health(ctrl_info);
5620                 if (pqi_ctrl_offline(ctrl_info)) {
5621                         rc = -ENXIO;
5622                         break;
5623                 }
5624         }
5625
5626         return rc;
5627 }
5628
5629 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5630         struct pqi_scsi_dev *device)
5631 {
5632         int rc;
5633         struct pqi_io_request *io_request;
5634         DECLARE_COMPLETION_ONSTACK(wait);
5635         struct pqi_task_management_request *request;
5636
5637         io_request = pqi_alloc_io_request(ctrl_info);
5638         io_request->io_complete_callback = pqi_lun_reset_complete;
5639         io_request->context = &wait;
5640
5641         request = io_request->iu;
5642         memset(request, 0, sizeof(*request));
5643
5644         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5645         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5646                 &request->header.iu_length);
5647         put_unaligned_le16(io_request->index, &request->request_id);
5648         memcpy(request->lun_number, device->scsi3addr,
5649                 sizeof(request->lun_number));
5650         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5651
5652         pqi_start_io(ctrl_info,
5653                 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5654                 io_request);
5655
5656         rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5657         if (rc == 0)
5658                 rc = io_request->status;
5659
5660         pqi_free_io_request(io_request);
5661
5662         return rc;
5663 }
5664
5665 /* Performs a reset at the LUN level. */
5666
5667 #define PQI_LUN_RESET_RETRIES                   3
5668 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS      10000
5669 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS   120
5670
5671 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5672         struct pqi_scsi_dev *device)
5673 {
5674         int rc;
5675         unsigned int retries;
5676         unsigned long timeout_secs;
5677
5678         for (retries = 0;;) {
5679                 rc = pqi_lun_reset(ctrl_info, device);
5680                 if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
5681                         break;
5682                 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5683         }
5684
5685         timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5686
5687         rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5688
5689         return rc == 0 ? SUCCESS : FAILED;
5690 }
5691
5692 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5693         struct pqi_scsi_dev *device)
5694 {
5695         int rc;
5696
5697         mutex_lock(&ctrl_info->lun_reset_mutex);
5698
5699         pqi_ctrl_block_requests(ctrl_info);
5700         pqi_ctrl_wait_until_quiesced(ctrl_info);
5701         pqi_fail_io_queued_for_device(ctrl_info, device);
5702         rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5703         pqi_device_reset_start(device);
5704         pqi_ctrl_unblock_requests(ctrl_info);
5705
5706         if (rc)
5707                 rc = FAILED;
5708         else
5709                 rc = _pqi_device_reset(ctrl_info, device);
5710
5711         pqi_device_reset_done(device);
5712
5713         mutex_unlock(&ctrl_info->lun_reset_mutex);
5714
5715         return rc;
5716 }
5717
5718 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5719 {
5720         int rc;
5721         struct Scsi_Host *shost;
5722         struct pqi_ctrl_info *ctrl_info;
5723         struct pqi_scsi_dev *device;
5724
5725         shost = scmd->device->host;
5726         ctrl_info = shost_to_hba(shost);
5727         device = scmd->device->hostdata;
5728
5729         dev_err(&ctrl_info->pci_dev->dev,
5730                 "resetting scsi %d:%d:%d:%d\n",
5731                 shost->host_no, device->bus, device->target, device->lun);
5732
5733         pqi_check_ctrl_health(ctrl_info);
5734         if (pqi_ctrl_offline(ctrl_info)) {
5735                 dev_err(&ctrl_info->pci_dev->dev,
5736                         "controller %u offlined - cannot send device reset\n",
5737                         ctrl_info->ctrl_id);
5738                 rc = FAILED;
5739                 goto out;
5740         }
5741
5742         pqi_wait_until_ofa_finished(ctrl_info);
5743
5744         rc = pqi_device_reset(ctrl_info, device);
5745
5746 out:
5747         dev_err(&ctrl_info->pci_dev->dev,
5748                 "reset of scsi %d:%d:%d:%d: %s\n",
5749                 shost->host_no, device->bus, device->target, device->lun,
5750                 rc == SUCCESS ? "SUCCESS" : "FAILED");
5751
5752         return rc;
5753 }
5754
5755 static int pqi_slave_alloc(struct scsi_device *sdev)
5756 {
5757         struct pqi_scsi_dev *device;
5758         unsigned long flags;
5759         struct pqi_ctrl_info *ctrl_info;
5760         struct scsi_target *starget;
5761         struct sas_rphy *rphy;
5762
5763         ctrl_info = shost_to_hba(sdev->host);
5764
5765         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5766
5767         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5768                 starget = scsi_target(sdev);
5769                 rphy = target_to_rphy(starget);
5770                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5771                 if (device) {
5772                         device->target = sdev_id(sdev);
5773                         device->lun = sdev->lun;
5774                         device->target_lun_valid = true;
5775                 }
5776         } else {
5777                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5778                         sdev_id(sdev), sdev->lun);
5779         }
5780
5781         if (device) {
5782                 sdev->hostdata = device;
5783                 device->sdev = sdev;
5784                 if (device->queue_depth) {
5785                         device->advertised_queue_depth = device->queue_depth;
5786                         scsi_change_queue_depth(sdev,
5787                                 device->advertised_queue_depth);
5788                 }
5789                 if (pqi_is_logical_device(device))
5790                         pqi_disable_write_same(sdev);
5791                 else
5792                         sdev->allow_restart = 1;
5793         }
5794
5795         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5796
5797         return 0;
5798 }
5799
5800 static int pqi_map_queues(struct Scsi_Host *shost)
5801 {
5802         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5803
5804         return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5805                                         ctrl_info->pci_dev, 0);
5806 }
5807
5808 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5809         void __user *arg)
5810 {
5811         struct pci_dev *pci_dev;
5812         u32 subsystem_vendor;
5813         u32 subsystem_device;
5814         cciss_pci_info_struct pciinfo;
5815
5816         if (!arg)
5817                 return -EINVAL;
5818
5819         pci_dev = ctrl_info->pci_dev;
5820
5821         pciinfo.domain = pci_domain_nr(pci_dev->bus);
5822         pciinfo.bus = pci_dev->bus->number;
5823         pciinfo.dev_fn = pci_dev->devfn;
5824         subsystem_vendor = pci_dev->subsystem_vendor;
5825         subsystem_device = pci_dev->subsystem_device;
5826         pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5827                 subsystem_vendor;
5828
5829         if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5830                 return -EFAULT;
5831
5832         return 0;
5833 }
5834
5835 static int pqi_getdrivver_ioctl(void __user *arg)
5836 {
5837         u32 version;
5838
5839         if (!arg)
5840                 return -EINVAL;
5841
5842         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5843                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5844
5845         if (copy_to_user(arg, &version, sizeof(version)))
5846                 return -EFAULT;
5847
5848         return 0;
5849 }
5850
5851 struct ciss_error_info {
5852         u8      scsi_status;
5853         int     command_status;
5854         size_t  sense_data_length;
5855 };
5856
5857 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5858         struct ciss_error_info *ciss_error_info)
5859 {
5860         int ciss_cmd_status;
5861         size_t sense_data_length;
5862
5863         switch (pqi_error_info->data_out_result) {
5864         case PQI_DATA_IN_OUT_GOOD:
5865                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5866                 break;
5867         case PQI_DATA_IN_OUT_UNDERFLOW:
5868                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5869                 break;
5870         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5871                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5872                 break;
5873         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5874         case PQI_DATA_IN_OUT_BUFFER_ERROR:
5875         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5876         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5877         case PQI_DATA_IN_OUT_ERROR:
5878                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5879                 break;
5880         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5881         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5882         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5883         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5884         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5885         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5886         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5887         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5888         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5889         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5890                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5891                 break;
5892         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5893                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5894                 break;
5895         case PQI_DATA_IN_OUT_ABORTED:
5896                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5897                 break;
5898         case PQI_DATA_IN_OUT_TIMEOUT:
5899                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5900                 break;
5901         default:
5902                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5903                 break;
5904         }
5905
5906         sense_data_length =
5907                 get_unaligned_le16(&pqi_error_info->sense_data_length);
5908         if (sense_data_length == 0)
5909                 sense_data_length =
5910                 get_unaligned_le16(&pqi_error_info->response_data_length);
5911         if (sense_data_length)
5912                 if (sense_data_length > sizeof(pqi_error_info->data))
5913                         sense_data_length = sizeof(pqi_error_info->data);
5914
5915         ciss_error_info->scsi_status = pqi_error_info->status;
5916         ciss_error_info->command_status = ciss_cmd_status;
5917         ciss_error_info->sense_data_length = sense_data_length;
5918 }
5919
5920 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5921 {
5922         int rc;
5923         char *kernel_buffer = NULL;
5924         u16 iu_length;
5925         size_t sense_data_length;
5926         IOCTL_Command_struct iocommand;
5927         struct pqi_raid_path_request request;
5928         struct pqi_raid_error_info pqi_error_info;
5929         struct ciss_error_info ciss_error_info;
5930
5931         if (pqi_ctrl_offline(ctrl_info))
5932                 return -ENXIO;
5933         if (!arg)
5934                 return -EINVAL;
5935         if (!capable(CAP_SYS_RAWIO))
5936                 return -EPERM;
5937         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5938                 return -EFAULT;
5939         if (iocommand.buf_size < 1 &&
5940                 iocommand.Request.Type.Direction != XFER_NONE)
5941                 return -EINVAL;
5942         if (iocommand.Request.CDBLen > sizeof(request.cdb))
5943                 return -EINVAL;
5944         if (iocommand.Request.Type.Type != TYPE_CMD)
5945                 return -EINVAL;
5946
5947         switch (iocommand.Request.Type.Direction) {
5948         case XFER_NONE:
5949         case XFER_WRITE:
5950         case XFER_READ:
5951         case XFER_READ | XFER_WRITE:
5952                 break;
5953         default:
5954                 return -EINVAL;
5955         }
5956
5957         if (iocommand.buf_size > 0) {
5958                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5959                 if (!kernel_buffer)
5960                         return -ENOMEM;
5961                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5962                         if (copy_from_user(kernel_buffer, iocommand.buf,
5963                                 iocommand.buf_size)) {
5964                                 rc = -EFAULT;
5965                                 goto out;
5966                         }
5967                 } else {
5968                         memset(kernel_buffer, 0, iocommand.buf_size);
5969                 }
5970         }
5971
5972         memset(&request, 0, sizeof(request));
5973
5974         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5975         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5976                 PQI_REQUEST_HEADER_LENGTH;
5977         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5978                 sizeof(request.lun_number));
5979         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5980         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5981
5982         switch (iocommand.Request.Type.Direction) {
5983         case XFER_NONE:
5984                 request.data_direction = SOP_NO_DIRECTION_FLAG;
5985                 break;
5986         case XFER_WRITE:
5987                 request.data_direction = SOP_WRITE_FLAG;
5988                 break;
5989         case XFER_READ:
5990                 request.data_direction = SOP_READ_FLAG;
5991                 break;
5992         case XFER_READ | XFER_WRITE:
5993                 request.data_direction = SOP_BIDIRECTIONAL;
5994                 break;
5995         }
5996
5997         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5998
5999         if (iocommand.buf_size > 0) {
6000                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6001
6002                 rc = pqi_map_single(ctrl_info->pci_dev,
6003                         &request.sg_descriptors[0], kernel_buffer,
6004                         iocommand.buf_size, DMA_BIDIRECTIONAL);
6005                 if (rc)
6006                         goto out;
6007
6008                 iu_length += sizeof(request.sg_descriptors[0]);
6009         }
6010
6011         put_unaligned_le16(iu_length, &request.header.iu_length);
6012
6013         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6014                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6015
6016         if (iocommand.buf_size > 0)
6017                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6018                         DMA_BIDIRECTIONAL);
6019
6020         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6021
6022         if (rc == 0) {
6023                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6024                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6025                 iocommand.error_info.CommandStatus =
6026                         ciss_error_info.command_status;
6027                 sense_data_length = ciss_error_info.sense_data_length;
6028                 if (sense_data_length) {
6029                         if (sense_data_length >
6030                                 sizeof(iocommand.error_info.SenseInfo))
6031                                 sense_data_length =
6032                                         sizeof(iocommand.error_info.SenseInfo);
6033                         memcpy(iocommand.error_info.SenseInfo,
6034                                 pqi_error_info.data, sense_data_length);
6035                         iocommand.error_info.SenseLen = sense_data_length;
6036                 }
6037         }
6038
6039         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6040                 rc = -EFAULT;
6041                 goto out;
6042         }
6043
6044         if (rc == 0 && iocommand.buf_size > 0 &&
6045                 (iocommand.Request.Type.Direction & XFER_READ)) {
6046                 if (copy_to_user(iocommand.buf, kernel_buffer,
6047                         iocommand.buf_size)) {
6048                         rc = -EFAULT;
6049                 }
6050         }
6051
6052 out:
6053         kfree(kernel_buffer);
6054
6055         return rc;
6056 }
6057
6058 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6059                      void __user *arg)
6060 {
6061         int rc;
6062         struct pqi_ctrl_info *ctrl_info;
6063
6064         ctrl_info = shost_to_hba(sdev->host);
6065
6066         if (pqi_ctrl_in_ofa(ctrl_info))
6067                 return -EBUSY;
6068
6069         switch (cmd) {
6070         case CCISS_DEREGDISK:
6071         case CCISS_REGNEWDISK:
6072         case CCISS_REGNEWD:
6073                 rc = pqi_scan_scsi_devices(ctrl_info);
6074                 break;
6075         case CCISS_GETPCIINFO:
6076                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6077                 break;
6078         case CCISS_GETDRIVVER:
6079                 rc = pqi_getdrivver_ioctl(arg);
6080                 break;
6081         case CCISS_PASSTHRU:
6082                 rc = pqi_passthru_ioctl(ctrl_info, arg);
6083                 break;
6084         default:
6085                 rc = -EINVAL;
6086                 break;
6087         }
6088
6089         return rc;
6090 }
6091
6092 static ssize_t pqi_version_show(struct device *dev,
6093         struct device_attribute *attr, char *buffer)
6094 {
6095         ssize_t count = 0;
6096         struct Scsi_Host *shost;
6097         struct pqi_ctrl_info *ctrl_info;
6098
6099         shost = class_to_shost(dev);
6100         ctrl_info = shost_to_hba(shost);
6101
6102         count += snprintf(buffer + count, PAGE_SIZE - count,
6103                 "  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6104
6105         count += snprintf(buffer + count, PAGE_SIZE - count,
6106                 "firmware: %s\n", ctrl_info->firmware_version);
6107
6108         return count;
6109 }
6110
6111 static ssize_t pqi_host_rescan_store(struct device *dev,
6112         struct device_attribute *attr, const char *buffer, size_t count)
6113 {
6114         struct Scsi_Host *shost = class_to_shost(dev);
6115
6116         pqi_scan_start(shost);
6117
6118         return count;
6119 }
6120
6121 static ssize_t pqi_lockup_action_show(struct device *dev,
6122         struct device_attribute *attr, char *buffer)
6123 {
6124         int count = 0;
6125         unsigned int i;
6126
6127         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6128                 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6129                         count += snprintf(buffer + count, PAGE_SIZE - count,
6130                                 "[%s] ", pqi_lockup_actions[i].name);
6131                 else
6132                         count += snprintf(buffer + count, PAGE_SIZE - count,
6133                                 "%s ", pqi_lockup_actions[i].name);
6134         }
6135
6136         count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
6137
6138         return count;
6139 }
6140
6141 static ssize_t pqi_lockup_action_store(struct device *dev,
6142         struct device_attribute *attr, const char *buffer, size_t count)
6143 {
6144         unsigned int i;
6145         char *action_name;
6146         char action_name_buffer[32];
6147
6148         strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6149         action_name = strstrip(action_name_buffer);
6150
6151         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6152                 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6153                         pqi_lockup_action = pqi_lockup_actions[i].action;
6154                         return count;
6155                 }
6156         }
6157
6158         return -EINVAL;
6159 }
6160
6161 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
6162 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6163 static DEVICE_ATTR(lockup_action, 0644,
6164         pqi_lockup_action_show, pqi_lockup_action_store);
6165
6166 static struct device_attribute *pqi_shost_attrs[] = {
6167         &dev_attr_version,
6168         &dev_attr_rescan,
6169         &dev_attr_lockup_action,
6170         NULL
6171 };
6172
6173 static ssize_t pqi_unique_id_show(struct device *dev,
6174         struct device_attribute *attr, char *buffer)
6175 {
6176         struct pqi_ctrl_info *ctrl_info;
6177         struct scsi_device *sdev;
6178         struct pqi_scsi_dev *device;
6179         unsigned long flags;
6180         unsigned char uid[16];
6181
6182         sdev = to_scsi_device(dev);
6183         ctrl_info = shost_to_hba(sdev->host);
6184
6185         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6186
6187         device = sdev->hostdata;
6188         if (!device) {
6189                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6190                         flags);
6191                 return -ENODEV;
6192         }
6193         memcpy(uid, device->unique_id, sizeof(uid));
6194
6195         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6196
6197         return snprintf(buffer, PAGE_SIZE,
6198                 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6199                 uid[0], uid[1], uid[2], uid[3],
6200                 uid[4], uid[5], uid[6], uid[7],
6201                 uid[8], uid[9], uid[10], uid[11],
6202                 uid[12], uid[13], uid[14], uid[15]);
6203 }
6204
6205 static ssize_t pqi_lunid_show(struct device *dev,
6206         struct device_attribute *attr, char *buffer)
6207 {
6208         struct pqi_ctrl_info *ctrl_info;
6209         struct scsi_device *sdev;
6210         struct pqi_scsi_dev *device;
6211         unsigned long flags;
6212         u8 lunid[8];
6213
6214         sdev = to_scsi_device(dev);
6215         ctrl_info = shost_to_hba(sdev->host);
6216
6217         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6218
6219         device = sdev->hostdata;
6220         if (!device) {
6221                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6222                         flags);
6223                 return -ENODEV;
6224         }
6225         memcpy(lunid, device->scsi3addr, sizeof(lunid));
6226
6227         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6228
6229         return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6230 }
6231
6232 #define MAX_PATHS 8
6233 static ssize_t pqi_path_info_show(struct device *dev,
6234         struct device_attribute *attr, char *buf)
6235 {
6236         struct pqi_ctrl_info *ctrl_info;
6237         struct scsi_device *sdev;
6238         struct pqi_scsi_dev *device;
6239         unsigned long flags;
6240         int i;
6241         int output_len = 0;
6242         u8 box;
6243         u8 bay;
6244         u8 path_map_index = 0;
6245         char *active;
6246         unsigned char phys_connector[2];
6247
6248         sdev = to_scsi_device(dev);
6249         ctrl_info = shost_to_hba(sdev->host);
6250
6251         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6252
6253         device = sdev->hostdata;
6254         if (!device) {
6255                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6256                         flags);
6257                 return -ENODEV;
6258         }
6259
6260         bay = device->bay;
6261         for (i = 0; i < MAX_PATHS; i++) {
6262                 path_map_index = 1<<i;
6263                 if (i == device->active_path_index)
6264                         active = "Active";
6265                 else if (device->path_map & path_map_index)
6266                         active = "Inactive";
6267                 else
6268                         continue;
6269
6270                 output_len += scnprintf(buf + output_len,
6271                                         PAGE_SIZE - output_len,
6272                                         "[%d:%d:%d:%d] %20.20s ",
6273                                         ctrl_info->scsi_host->host_no,
6274                                         device->bus, device->target,
6275                                         device->lun,
6276                                         scsi_device_type(device->devtype));
6277
6278                 if (device->devtype == TYPE_RAID ||
6279                         pqi_is_logical_device(device))
6280                         goto end_buffer;
6281
6282                 memcpy(&phys_connector, &device->phys_connector[i],
6283                         sizeof(phys_connector));
6284                 if (phys_connector[0] < '0')
6285                         phys_connector[0] = '0';
6286                 if (phys_connector[1] < '0')
6287                         phys_connector[1] = '0';
6288
6289                 output_len += scnprintf(buf + output_len,
6290                                         PAGE_SIZE - output_len,
6291                                         "PORT: %.2s ", phys_connector);
6292
6293                 box = device->box[i];
6294                 if (box != 0 && box != 0xFF)
6295                         output_len += scnprintf(buf + output_len,
6296                                                 PAGE_SIZE - output_len,
6297                                                 "BOX: %hhu ", box);
6298
6299                 if ((device->devtype == TYPE_DISK ||
6300                         device->devtype == TYPE_ZBC) &&
6301                         pqi_expose_device(device))
6302                         output_len += scnprintf(buf + output_len,
6303                                                 PAGE_SIZE - output_len,
6304                                                 "BAY: %hhu ", bay);
6305
6306 end_buffer:
6307                 output_len += scnprintf(buf + output_len,
6308                                         PAGE_SIZE - output_len,
6309                                         "%s\n", active);
6310         }
6311
6312         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6313         return output_len;
6314 }
6315
6316
6317 static ssize_t pqi_sas_address_show(struct device *dev,
6318         struct device_attribute *attr, char *buffer)
6319 {
6320         struct pqi_ctrl_info *ctrl_info;
6321         struct scsi_device *sdev;
6322         struct pqi_scsi_dev *device;
6323         unsigned long flags;
6324         u64 sas_address;
6325
6326         sdev = to_scsi_device(dev);
6327         ctrl_info = shost_to_hba(sdev->host);
6328
6329         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6330
6331         device = sdev->hostdata;
6332         if (pqi_is_logical_device(device)) {
6333                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6334                         flags);
6335                 return -ENODEV;
6336         }
6337         sas_address = device->sas_address;
6338
6339         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6340
6341         return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6342 }
6343
6344 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6345         struct device_attribute *attr, char *buffer)
6346 {
6347         struct pqi_ctrl_info *ctrl_info;
6348         struct scsi_device *sdev;
6349         struct pqi_scsi_dev *device;
6350         unsigned long flags;
6351
6352         sdev = to_scsi_device(dev);
6353         ctrl_info = shost_to_hba(sdev->host);
6354
6355         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6356
6357         device = sdev->hostdata;
6358         buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6359         buffer[1] = '\n';
6360         buffer[2] = '\0';
6361
6362         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6363
6364         return 2;
6365 }
6366
6367 static ssize_t pqi_raid_level_show(struct device *dev,
6368         struct device_attribute *attr, char *buffer)
6369 {
6370         struct pqi_ctrl_info *ctrl_info;
6371         struct scsi_device *sdev;
6372         struct pqi_scsi_dev *device;
6373         unsigned long flags;
6374         char *raid_level;
6375
6376         sdev = to_scsi_device(dev);
6377         ctrl_info = shost_to_hba(sdev->host);
6378
6379         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6380
6381         device = sdev->hostdata;
6382
6383         if (pqi_is_logical_device(device))
6384                 raid_level = pqi_raid_level_to_string(device->raid_level);
6385         else
6386                 raid_level = "N/A";
6387
6388         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6389
6390         return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6391 }
6392
6393 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6394 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6395 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6396 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6397 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6398         pqi_ssd_smart_path_enabled_show, NULL);
6399 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6400
6401 static struct device_attribute *pqi_sdev_attrs[] = {
6402         &dev_attr_lunid,
6403         &dev_attr_unique_id,
6404         &dev_attr_path_info,
6405         &dev_attr_sas_address,
6406         &dev_attr_ssd_smart_path_enabled,
6407         &dev_attr_raid_level,
6408         NULL
6409 };
6410
6411 static struct scsi_host_template pqi_driver_template = {
6412         .module = THIS_MODULE,
6413         .name = DRIVER_NAME_SHORT,
6414         .proc_name = DRIVER_NAME_SHORT,
6415         .queuecommand = pqi_scsi_queue_command,
6416         .scan_start = pqi_scan_start,
6417         .scan_finished = pqi_scan_finished,
6418         .this_id = -1,
6419         .eh_device_reset_handler = pqi_eh_device_reset_handler,
6420         .ioctl = pqi_ioctl,
6421         .slave_alloc = pqi_slave_alloc,
6422         .map_queues = pqi_map_queues,
6423         .sdev_attrs = pqi_sdev_attrs,
6424         .shost_attrs = pqi_shost_attrs,
6425 };
6426
6427 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6428 {
6429         int rc;
6430         struct Scsi_Host *shost;
6431
6432         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6433         if (!shost) {
6434                 dev_err(&ctrl_info->pci_dev->dev,
6435                         "scsi_host_alloc failed for controller %u\n",
6436                         ctrl_info->ctrl_id);
6437                 return -ENOMEM;
6438         }
6439
6440         shost->io_port = 0;
6441         shost->n_io_port = 0;
6442         shost->this_id = -1;
6443         shost->max_channel = PQI_MAX_BUS;
6444         shost->max_cmd_len = MAX_COMMAND_SIZE;
6445         shost->max_lun = ~0;
6446         shost->max_id = ~0;
6447         shost->max_sectors = ctrl_info->max_sectors;
6448         shost->can_queue = ctrl_info->scsi_ml_can_queue;
6449         shost->cmd_per_lun = shost->can_queue;
6450         shost->sg_tablesize = ctrl_info->sg_tablesize;
6451         shost->transportt = pqi_sas_transport_template;
6452         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6453         shost->unique_id = shost->irq;
6454         shost->nr_hw_queues = ctrl_info->num_queue_groups;
6455         shost->hostdata[0] = (unsigned long)ctrl_info;
6456
6457         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6458         if (rc) {
6459                 dev_err(&ctrl_info->pci_dev->dev,
6460                         "scsi_add_host failed for controller %u\n",
6461                         ctrl_info->ctrl_id);
6462                 goto free_host;
6463         }
6464
6465         rc = pqi_add_sas_host(shost, ctrl_info);
6466         if (rc) {
6467                 dev_err(&ctrl_info->pci_dev->dev,
6468                         "add SAS host failed for controller %u\n",
6469                         ctrl_info->ctrl_id);
6470                 goto remove_host;
6471         }
6472
6473         ctrl_info->scsi_host = shost;
6474
6475         return 0;
6476
6477 remove_host:
6478         scsi_remove_host(shost);
6479 free_host:
6480         scsi_host_put(shost);
6481
6482         return rc;
6483 }
6484
6485 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6486 {
6487         struct Scsi_Host *shost;
6488
6489         pqi_delete_sas_host(ctrl_info);
6490
6491         shost = ctrl_info->scsi_host;
6492         if (!shost)
6493                 return;
6494
6495         scsi_remove_host(shost);
6496         scsi_host_put(shost);
6497 }
6498
6499 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6500 {
6501         int rc = 0;
6502         struct pqi_device_registers __iomem *pqi_registers;
6503         unsigned long timeout;
6504         unsigned int timeout_msecs;
6505         union pqi_reset_register reset_reg;
6506
6507         pqi_registers = ctrl_info->pqi_registers;
6508         timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6509         timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6510
6511         while (1) {
6512                 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6513                 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6514                 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6515                         break;
6516                 pqi_check_ctrl_health(ctrl_info);
6517                 if (pqi_ctrl_offline(ctrl_info)) {
6518                         rc = -ENXIO;
6519                         break;
6520                 }
6521                 if (time_after(jiffies, timeout)) {
6522                         rc = -ETIMEDOUT;
6523                         break;
6524                 }
6525         }
6526
6527         return rc;
6528 }
6529
6530 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6531 {
6532         int rc;
6533         union pqi_reset_register reset_reg;
6534
6535         if (ctrl_info->pqi_reset_quiesce_supported) {
6536                 rc = sis_pqi_reset_quiesce(ctrl_info);
6537                 if (rc) {
6538                         dev_err(&ctrl_info->pci_dev->dev,
6539                                 "PQI reset failed during quiesce with error %d\n",
6540                                 rc);
6541                         return rc;
6542                 }
6543         }
6544
6545         reset_reg.all_bits = 0;
6546         reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6547         reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6548
6549         writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6550
6551         rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6552         if (rc)
6553                 dev_err(&ctrl_info->pci_dev->dev,
6554                         "PQI reset failed with error %d\n", rc);
6555
6556         return rc;
6557 }
6558
6559 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
6560 {
6561         int rc;
6562         struct bmic_identify_controller *identify;
6563
6564         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6565         if (!identify)
6566                 return -ENOMEM;
6567
6568         rc = pqi_identify_controller(ctrl_info, identify);
6569         if (rc)
6570                 goto out;
6571
6572         memcpy(ctrl_info->firmware_version, identify->firmware_version,
6573                 sizeof(identify->firmware_version));
6574         ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6575         snprintf(ctrl_info->firmware_version +
6576                 strlen(ctrl_info->firmware_version),
6577                 sizeof(ctrl_info->firmware_version),
6578                 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6579
6580 out:
6581         kfree(identify);
6582
6583         return rc;
6584 }
6585
6586 struct pqi_config_table_section_info {
6587         struct pqi_ctrl_info *ctrl_info;
6588         void            *section;
6589         u32             section_offset;
6590         void __iomem    *section_iomem_addr;
6591 };
6592
6593 static inline bool pqi_is_firmware_feature_supported(
6594         struct pqi_config_table_firmware_features *firmware_features,
6595         unsigned int bit_position)
6596 {
6597         unsigned int byte_index;
6598
6599         byte_index = bit_position / BITS_PER_BYTE;
6600
6601         if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6602                 return false;
6603
6604         return firmware_features->features_supported[byte_index] &
6605                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6606 }
6607
6608 static inline bool pqi_is_firmware_feature_enabled(
6609         struct pqi_config_table_firmware_features *firmware_features,
6610         void __iomem *firmware_features_iomem_addr,
6611         unsigned int bit_position)
6612 {
6613         unsigned int byte_index;
6614         u8 __iomem *features_enabled_iomem_addr;
6615
6616         byte_index = (bit_position / BITS_PER_BYTE) +
6617                 (le16_to_cpu(firmware_features->num_elements) * 2);
6618
6619         features_enabled_iomem_addr = firmware_features_iomem_addr +
6620                 offsetof(struct pqi_config_table_firmware_features,
6621                         features_supported) + byte_index;
6622
6623         return *((__force u8 *)features_enabled_iomem_addr) &
6624                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6625 }
6626
6627 static inline void pqi_request_firmware_feature(
6628         struct pqi_config_table_firmware_features *firmware_features,
6629         unsigned int bit_position)
6630 {
6631         unsigned int byte_index;
6632
6633         byte_index = (bit_position / BITS_PER_BYTE) +
6634                 le16_to_cpu(firmware_features->num_elements);
6635
6636         firmware_features->features_supported[byte_index] |=
6637                 (1 << (bit_position % BITS_PER_BYTE));
6638 }
6639
6640 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6641         u16 first_section, u16 last_section)
6642 {
6643         struct pqi_vendor_general_request request;
6644
6645         memset(&request, 0, sizeof(request));
6646
6647         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6648         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6649                 &request.header.iu_length);
6650         put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6651                 &request.function_code);
6652         put_unaligned_le16(first_section,
6653                 &request.data.config_table_update.first_section);
6654         put_unaligned_le16(last_section,
6655                 &request.data.config_table_update.last_section);
6656
6657         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6658                 0, NULL, NO_TIMEOUT);
6659 }
6660
6661 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6662         struct pqi_config_table_firmware_features *firmware_features,
6663         void __iomem *firmware_features_iomem_addr)
6664 {
6665         void *features_requested;
6666         void __iomem *features_requested_iomem_addr;
6667
6668         features_requested = firmware_features->features_supported +
6669                 le16_to_cpu(firmware_features->num_elements);
6670
6671         features_requested_iomem_addr = firmware_features_iomem_addr +
6672                 (features_requested - (void *)firmware_features);
6673
6674         memcpy_toio(features_requested_iomem_addr, features_requested,
6675                 le16_to_cpu(firmware_features->num_elements));
6676
6677         return pqi_config_table_update(ctrl_info,
6678                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6679                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6680 }
6681
6682 struct pqi_firmware_feature {
6683         char            *feature_name;
6684         unsigned int    feature_bit;
6685         bool            supported;
6686         bool            enabled;
6687         void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6688                 struct pqi_firmware_feature *firmware_feature);
6689 };
6690
6691 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6692         struct pqi_firmware_feature *firmware_feature)
6693 {
6694         if (!firmware_feature->supported) {
6695                 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6696                         firmware_feature->feature_name);
6697                 return;
6698         }
6699
6700         if (firmware_feature->enabled) {
6701                 dev_info(&ctrl_info->pci_dev->dev,
6702                         "%s enabled\n", firmware_feature->feature_name);
6703                 return;
6704         }
6705
6706         dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6707                 firmware_feature->feature_name);
6708 }
6709
6710 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6711         struct pqi_firmware_feature *firmware_feature)
6712 {
6713         if (firmware_feature->feature_status)
6714                 firmware_feature->feature_status(ctrl_info, firmware_feature);
6715 }
6716
6717 static DEFINE_MUTEX(pqi_firmware_features_mutex);
6718
6719 static struct pqi_firmware_feature pqi_firmware_features[] = {
6720         {
6721                 .feature_name = "Online Firmware Activation",
6722                 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6723                 .feature_status = pqi_firmware_feature_status,
6724         },
6725         {
6726                 .feature_name = "Serial Management Protocol",
6727                 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6728                 .feature_status = pqi_firmware_feature_status,
6729         },
6730         {
6731                 .feature_name = "New Soft Reset Handshake",
6732                 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6733                 .feature_status = pqi_firmware_feature_status,
6734         },
6735 };
6736
6737 static void pqi_process_firmware_features(
6738         struct pqi_config_table_section_info *section_info)
6739 {
6740         int rc;
6741         struct pqi_ctrl_info *ctrl_info;
6742         struct pqi_config_table_firmware_features *firmware_features;
6743         void __iomem *firmware_features_iomem_addr;
6744         unsigned int i;
6745         unsigned int num_features_supported;
6746
6747         ctrl_info = section_info->ctrl_info;
6748         firmware_features = section_info->section;
6749         firmware_features_iomem_addr = section_info->section_iomem_addr;
6750
6751         for (i = 0, num_features_supported = 0;
6752                 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6753                 if (pqi_is_firmware_feature_supported(firmware_features,
6754                         pqi_firmware_features[i].feature_bit)) {
6755                         pqi_firmware_features[i].supported = true;
6756                         num_features_supported++;
6757                 } else {
6758                         pqi_firmware_feature_update(ctrl_info,
6759                                 &pqi_firmware_features[i]);
6760                 }
6761         }
6762
6763         if (num_features_supported == 0)
6764                 return;
6765
6766         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6767                 if (!pqi_firmware_features[i].supported)
6768                         continue;
6769                 pqi_request_firmware_feature(firmware_features,
6770                         pqi_firmware_features[i].feature_bit);
6771         }
6772
6773         rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6774                 firmware_features_iomem_addr);
6775         if (rc) {
6776                 dev_err(&ctrl_info->pci_dev->dev,
6777                         "failed to enable firmware features in PQI configuration table\n");
6778                 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6779                         if (!pqi_firmware_features[i].supported)
6780                                 continue;
6781                         pqi_firmware_feature_update(ctrl_info,
6782                                 &pqi_firmware_features[i]);
6783                 }
6784                 return;
6785         }
6786
6787         ctrl_info->soft_reset_handshake_supported = false;
6788         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6789                 if (!pqi_firmware_features[i].supported)
6790                         continue;
6791                 if (pqi_is_firmware_feature_enabled(firmware_features,
6792                         firmware_features_iomem_addr,
6793                         pqi_firmware_features[i].feature_bit)) {
6794                         pqi_firmware_features[i].enabled = true;
6795                         if (pqi_firmware_features[i].feature_bit ==
6796                             PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
6797                                 ctrl_info->soft_reset_handshake_supported =
6798                                                                         true;
6799                 }
6800                 pqi_firmware_feature_update(ctrl_info,
6801                         &pqi_firmware_features[i]);
6802         }
6803 }
6804
6805 static void pqi_init_firmware_features(void)
6806 {
6807         unsigned int i;
6808
6809         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6810                 pqi_firmware_features[i].supported = false;
6811                 pqi_firmware_features[i].enabled = false;
6812         }
6813 }
6814
6815 static void pqi_process_firmware_features_section(
6816         struct pqi_config_table_section_info *section_info)
6817 {
6818         mutex_lock(&pqi_firmware_features_mutex);
6819         pqi_init_firmware_features();
6820         pqi_process_firmware_features(section_info);
6821         mutex_unlock(&pqi_firmware_features_mutex);
6822 }
6823
6824 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6825 {
6826         u32 table_length;
6827         u32 section_offset;
6828         void __iomem *table_iomem_addr;
6829         struct pqi_config_table *config_table;
6830         struct pqi_config_table_section_header *section;
6831         struct pqi_config_table_section_info section_info;
6832
6833         table_length = ctrl_info->config_table_length;
6834         if (table_length == 0)
6835                 return 0;
6836
6837         config_table = kmalloc(table_length, GFP_KERNEL);
6838         if (!config_table) {
6839                 dev_err(&ctrl_info->pci_dev->dev,
6840                         "failed to allocate memory for PQI configuration table\n");
6841                 return -ENOMEM;
6842         }
6843
6844         /*
6845          * Copy the config table contents from I/O memory space into the
6846          * temporary buffer.
6847          */
6848         table_iomem_addr = ctrl_info->iomem_base +
6849                 ctrl_info->config_table_offset;
6850         memcpy_fromio(config_table, table_iomem_addr, table_length);
6851
6852         section_info.ctrl_info = ctrl_info;
6853         section_offset =
6854                 get_unaligned_le32(&config_table->first_section_offset);
6855
6856         while (section_offset) {
6857                 section = (void *)config_table + section_offset;
6858
6859                 section_info.section = section;
6860                 section_info.section_offset = section_offset;
6861                 section_info.section_iomem_addr =
6862                         table_iomem_addr + section_offset;
6863
6864                 switch (get_unaligned_le16(&section->section_id)) {
6865                 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
6866                         pqi_process_firmware_features_section(&section_info);
6867                         break;
6868                 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
6869                         if (pqi_disable_heartbeat)
6870                                 dev_warn(&ctrl_info->pci_dev->dev,
6871                                 "heartbeat disabled by module parameter\n");
6872                         else
6873                                 ctrl_info->heartbeat_counter =
6874                                         table_iomem_addr +
6875                                         section_offset +
6876                                         offsetof(
6877                                         struct pqi_config_table_heartbeat,
6878                                                 heartbeat_counter);
6879                         break;
6880                 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
6881                         ctrl_info->soft_reset_status =
6882                                 table_iomem_addr +
6883                                 section_offset +
6884                                 offsetof(struct pqi_config_table_soft_reset,
6885                                                 soft_reset_status);
6886                         break;
6887                 }
6888
6889                 section_offset =
6890                         get_unaligned_le16(&section->next_section_offset);
6891         }
6892
6893         kfree(config_table);
6894
6895         return 0;
6896 }
6897
6898 /* Switches the controller from PQI mode back into SIS mode. */
6899
6900 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6901 {
6902         int rc;
6903
6904         pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
6905         rc = pqi_reset(ctrl_info);
6906         if (rc)
6907                 return rc;
6908         rc = sis_reenable_sis_mode(ctrl_info);
6909         if (rc) {
6910                 dev_err(&ctrl_info->pci_dev->dev,
6911                         "re-enabling SIS mode failed with error %d\n", rc);
6912                 return rc;
6913         }
6914         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6915
6916         return 0;
6917 }
6918
6919 /*
6920  * If the controller isn't already in SIS mode, this function forces it into
6921  * SIS mode.
6922  */
6923
6924 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
6925 {
6926         if (!sis_is_firmware_running(ctrl_info))
6927                 return -ENXIO;
6928
6929         if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6930                 return 0;
6931
6932         if (sis_is_kernel_up(ctrl_info)) {
6933                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6934                 return 0;
6935         }
6936
6937         return pqi_revert_to_sis_mode(ctrl_info);
6938 }
6939
6940 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6941 {
6942         int rc;
6943
6944         rc = pqi_force_sis_mode(ctrl_info);
6945         if (rc)
6946                 return rc;
6947
6948         /*
6949          * Wait until the controller is ready to start accepting SIS
6950          * commands.
6951          */
6952         rc = sis_wait_for_ctrl_ready(ctrl_info);
6953         if (rc)
6954                 return rc;
6955
6956         /*
6957          * Get the controller properties.  This allows us to determine
6958          * whether or not it supports PQI mode.
6959          */
6960         rc = sis_get_ctrl_properties(ctrl_info);
6961         if (rc) {
6962                 dev_err(&ctrl_info->pci_dev->dev,
6963                         "error obtaining controller properties\n");
6964                 return rc;
6965         }
6966
6967         rc = sis_get_pqi_capabilities(ctrl_info);
6968         if (rc) {
6969                 dev_err(&ctrl_info->pci_dev->dev,
6970                         "error obtaining controller capabilities\n");
6971                 return rc;
6972         }
6973
6974         if (reset_devices) {
6975                 if (ctrl_info->max_outstanding_requests >
6976                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6977                         ctrl_info->max_outstanding_requests =
6978                                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6979         } else {
6980                 if (ctrl_info->max_outstanding_requests >
6981                         PQI_MAX_OUTSTANDING_REQUESTS)
6982                         ctrl_info->max_outstanding_requests =
6983                                         PQI_MAX_OUTSTANDING_REQUESTS;
6984         }
6985
6986         pqi_calculate_io_resources(ctrl_info);
6987
6988         rc = pqi_alloc_error_buffer(ctrl_info);
6989         if (rc) {
6990                 dev_err(&ctrl_info->pci_dev->dev,
6991                         "failed to allocate PQI error buffer\n");
6992                 return rc;
6993         }
6994
6995         /*
6996          * If the function we are about to call succeeds, the
6997          * controller will transition from legacy SIS mode
6998          * into PQI mode.
6999          */
7000         rc = sis_init_base_struct_addr(ctrl_info);
7001         if (rc) {
7002                 dev_err(&ctrl_info->pci_dev->dev,
7003                         "error initializing PQI mode\n");
7004                 return rc;
7005         }
7006
7007         /* Wait for the controller to complete the SIS -> PQI transition. */
7008         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7009         if (rc) {
7010                 dev_err(&ctrl_info->pci_dev->dev,
7011                         "transition to PQI mode failed\n");
7012                 return rc;
7013         }
7014
7015         /* From here on, we are running in PQI mode. */
7016         ctrl_info->pqi_mode_enabled = true;
7017         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7018
7019         rc = pqi_alloc_admin_queues(ctrl_info);
7020         if (rc) {
7021                 dev_err(&ctrl_info->pci_dev->dev,
7022                         "failed to allocate admin queues\n");
7023                 return rc;
7024         }
7025
7026         rc = pqi_create_admin_queues(ctrl_info);
7027         if (rc) {
7028                 dev_err(&ctrl_info->pci_dev->dev,
7029                         "error creating admin queues\n");
7030                 return rc;
7031         }
7032
7033         rc = pqi_report_device_capability(ctrl_info);
7034         if (rc) {
7035                 dev_err(&ctrl_info->pci_dev->dev,
7036                         "obtaining device capability failed\n");
7037                 return rc;
7038         }
7039
7040         rc = pqi_validate_device_capability(ctrl_info);
7041         if (rc)
7042                 return rc;
7043
7044         pqi_calculate_queue_resources(ctrl_info);
7045
7046         rc = pqi_enable_msix_interrupts(ctrl_info);
7047         if (rc)
7048                 return rc;
7049
7050         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7051                 ctrl_info->max_msix_vectors =
7052                         ctrl_info->num_msix_vectors_enabled;
7053                 pqi_calculate_queue_resources(ctrl_info);
7054         }
7055
7056         rc = pqi_alloc_io_resources(ctrl_info);
7057         if (rc)
7058                 return rc;
7059
7060         rc = pqi_alloc_operational_queues(ctrl_info);
7061         if (rc) {
7062                 dev_err(&ctrl_info->pci_dev->dev,
7063                         "failed to allocate operational queues\n");
7064                 return rc;
7065         }
7066
7067         pqi_init_operational_queues(ctrl_info);
7068
7069         rc = pqi_request_irqs(ctrl_info);
7070         if (rc)
7071                 return rc;
7072
7073         rc = pqi_create_queues(ctrl_info);
7074         if (rc)
7075                 return rc;
7076
7077         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7078
7079         ctrl_info->controller_online = true;
7080
7081         rc = pqi_process_config_table(ctrl_info);
7082         if (rc)
7083                 return rc;
7084
7085         pqi_start_heartbeat_timer(ctrl_info);
7086
7087         rc = pqi_enable_events(ctrl_info);
7088         if (rc) {
7089                 dev_err(&ctrl_info->pci_dev->dev,
7090                         "error enabling events\n");
7091                 return rc;
7092         }
7093
7094         /* Register with the SCSI subsystem. */
7095         rc = pqi_register_scsi(ctrl_info);
7096         if (rc)
7097                 return rc;
7098
7099         rc = pqi_get_ctrl_firmware_version(ctrl_info);
7100         if (rc) {
7101                 dev_err(&ctrl_info->pci_dev->dev,
7102                         "error obtaining firmware version\n");
7103                 return rc;
7104         }
7105
7106         rc = pqi_set_diag_rescan(ctrl_info);
7107         if (rc) {
7108                 dev_err(&ctrl_info->pci_dev->dev,
7109                         "error enabling multi-lun rescan\n");
7110                 return rc;
7111         }
7112
7113         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7114         if (rc) {
7115                 dev_err(&ctrl_info->pci_dev->dev,
7116                         "error updating host wellness\n");
7117                 return rc;
7118         }
7119
7120         pqi_schedule_update_time_worker(ctrl_info);
7121
7122         pqi_scan_scsi_devices(ctrl_info);
7123
7124         return 0;
7125 }
7126
7127 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7128 {
7129         unsigned int i;
7130         struct pqi_admin_queues *admin_queues;
7131         struct pqi_event_queue *event_queue;
7132
7133         admin_queues = &ctrl_info->admin_queues;
7134         admin_queues->iq_pi_copy = 0;
7135         admin_queues->oq_ci_copy = 0;
7136         writel(0, admin_queues->oq_pi);
7137
7138         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7139                 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7140                 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7141                 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7142
7143                 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7144                 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7145                 writel(0, ctrl_info->queue_groups[i].oq_pi);
7146         }
7147
7148         event_queue = &ctrl_info->event_queue;
7149         writel(0, event_queue->oq_pi);
7150         event_queue->oq_ci_copy = 0;
7151 }
7152
7153 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7154 {
7155         int rc;
7156
7157         rc = pqi_force_sis_mode(ctrl_info);
7158         if (rc)
7159                 return rc;
7160
7161         /*
7162          * Wait until the controller is ready to start accepting SIS
7163          * commands.
7164          */
7165         rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7166         if (rc)
7167                 return rc;
7168
7169         /*
7170          * Get the controller properties.  This allows us to determine
7171          * whether or not it supports PQI mode.
7172          */
7173         rc = sis_get_ctrl_properties(ctrl_info);
7174         if (rc) {
7175                 dev_err(&ctrl_info->pci_dev->dev,
7176                         "error obtaining controller properties\n");
7177                 return rc;
7178         }
7179
7180         rc = sis_get_pqi_capabilities(ctrl_info);
7181         if (rc) {
7182                 dev_err(&ctrl_info->pci_dev->dev,
7183                         "error obtaining controller capabilities\n");
7184                 return rc;
7185         }
7186
7187         /*
7188          * If the function we are about to call succeeds, the
7189          * controller will transition from legacy SIS mode
7190          * into PQI mode.
7191          */
7192         rc = sis_init_base_struct_addr(ctrl_info);
7193         if (rc) {
7194                 dev_err(&ctrl_info->pci_dev->dev,
7195                         "error initializing PQI mode\n");
7196                 return rc;
7197         }
7198
7199         /* Wait for the controller to complete the SIS -> PQI transition. */
7200         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7201         if (rc) {
7202                 dev_err(&ctrl_info->pci_dev->dev,
7203                         "transition to PQI mode failed\n");
7204                 return rc;
7205         }
7206
7207         /* From here on, we are running in PQI mode. */
7208         ctrl_info->pqi_mode_enabled = true;
7209         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7210
7211         pqi_reinit_queues(ctrl_info);
7212
7213         rc = pqi_create_admin_queues(ctrl_info);
7214         if (rc) {
7215                 dev_err(&ctrl_info->pci_dev->dev,
7216                         "error creating admin queues\n");
7217                 return rc;
7218         }
7219
7220         rc = pqi_create_queues(ctrl_info);
7221         if (rc)
7222                 return rc;
7223
7224         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7225
7226         ctrl_info->controller_online = true;
7227         pqi_ctrl_unblock_requests(ctrl_info);
7228
7229         rc = pqi_process_config_table(ctrl_info);
7230         if (rc)
7231                 return rc;
7232
7233         pqi_start_heartbeat_timer(ctrl_info);
7234
7235         rc = pqi_enable_events(ctrl_info);
7236         if (rc) {
7237                 dev_err(&ctrl_info->pci_dev->dev,
7238                         "error enabling events\n");
7239                 return rc;
7240         }
7241
7242         rc = pqi_get_ctrl_firmware_version(ctrl_info);
7243         if (rc) {
7244                 dev_err(&ctrl_info->pci_dev->dev,
7245                         "error obtaining firmware version\n");
7246                 return rc;
7247         }
7248
7249         rc = pqi_set_diag_rescan(ctrl_info);
7250         if (rc) {
7251                 dev_err(&ctrl_info->pci_dev->dev,
7252                         "error enabling multi-lun rescan\n");
7253                 return rc;
7254         }
7255
7256         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7257         if (rc) {
7258                 dev_err(&ctrl_info->pci_dev->dev,
7259                         "error updating host wellness\n");
7260                 return rc;
7261         }
7262
7263         pqi_schedule_update_time_worker(ctrl_info);
7264
7265         pqi_scan_scsi_devices(ctrl_info);
7266
7267         return 0;
7268 }
7269
7270 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7271         u16 timeout)
7272 {
7273         return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7274                 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7275 }
7276
7277 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7278 {
7279         int rc;
7280         u64 mask;
7281
7282         rc = pci_enable_device(ctrl_info->pci_dev);
7283         if (rc) {
7284                 dev_err(&ctrl_info->pci_dev->dev,
7285                         "failed to enable PCI device\n");
7286                 return rc;
7287         }
7288
7289         if (sizeof(dma_addr_t) > 4)
7290                 mask = DMA_BIT_MASK(64);
7291         else
7292                 mask = DMA_BIT_MASK(32);
7293
7294         rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7295         if (rc) {
7296                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7297                 goto disable_device;
7298         }
7299
7300         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7301         if (rc) {
7302                 dev_err(&ctrl_info->pci_dev->dev,
7303                         "failed to obtain PCI resources\n");
7304                 goto disable_device;
7305         }
7306
7307         ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
7308                 ctrl_info->pci_dev, 0),
7309                 sizeof(struct pqi_ctrl_registers));
7310         if (!ctrl_info->iomem_base) {
7311                 dev_err(&ctrl_info->pci_dev->dev,
7312                         "failed to map memory for controller registers\n");
7313                 rc = -ENOMEM;
7314                 goto release_regions;
7315         }
7316
7317 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS               0x6
7318
7319         /* Increase the PCIe completion timeout. */
7320         rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7321                 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7322         if (rc) {
7323                 dev_err(&ctrl_info->pci_dev->dev,
7324                         "failed to set PCIe completion timeout\n");
7325                 goto release_regions;
7326         }
7327
7328         /* Enable bus mastering. */
7329         pci_set_master(ctrl_info->pci_dev);
7330
7331         ctrl_info->registers = ctrl_info->iomem_base;
7332         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7333
7334         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7335
7336         return 0;
7337
7338 release_regions:
7339         pci_release_regions(ctrl_info->pci_dev);
7340 disable_device:
7341         pci_disable_device(ctrl_info->pci_dev);
7342
7343         return rc;
7344 }
7345
7346 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7347 {
7348         iounmap(ctrl_info->iomem_base);
7349         pci_release_regions(ctrl_info->pci_dev);
7350         if (pci_is_enabled(ctrl_info->pci_dev))
7351                 pci_disable_device(ctrl_info->pci_dev);
7352         pci_set_drvdata(ctrl_info->pci_dev, NULL);
7353 }
7354
7355 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7356 {
7357         struct pqi_ctrl_info *ctrl_info;
7358
7359         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7360                         GFP_KERNEL, numa_node);
7361         if (!ctrl_info)
7362                 return NULL;
7363
7364         mutex_init(&ctrl_info->scan_mutex);
7365         mutex_init(&ctrl_info->lun_reset_mutex);
7366         mutex_init(&ctrl_info->ofa_mutex);
7367
7368         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7369         spin_lock_init(&ctrl_info->scsi_device_list_lock);
7370
7371         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7372         atomic_set(&ctrl_info->num_interrupts, 0);
7373
7374         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7375         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7376
7377         timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7378         INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7379
7380         sema_init(&ctrl_info->sync_request_sem,
7381                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7382         init_waitqueue_head(&ctrl_info->block_requests_wait);
7383
7384         INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7385         spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7386         INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7387                 pqi_raid_bypass_retry_worker);
7388
7389         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7390         ctrl_info->irq_mode = IRQ_MODE_NONE;
7391         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7392
7393         return ctrl_info;
7394 }
7395
7396 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7397 {
7398         kfree(ctrl_info);
7399 }
7400
7401 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7402 {
7403         pqi_free_irqs(ctrl_info);
7404         pqi_disable_msix_interrupts(ctrl_info);
7405 }
7406
7407 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7408 {
7409         pqi_stop_heartbeat_timer(ctrl_info);
7410         pqi_free_interrupts(ctrl_info);
7411         if (ctrl_info->queue_memory_base)
7412                 dma_free_coherent(&ctrl_info->pci_dev->dev,
7413                         ctrl_info->queue_memory_length,
7414                         ctrl_info->queue_memory_base,
7415                         ctrl_info->queue_memory_base_dma_handle);
7416         if (ctrl_info->admin_queue_memory_base)
7417                 dma_free_coherent(&ctrl_info->pci_dev->dev,
7418                         ctrl_info->admin_queue_memory_length,
7419                         ctrl_info->admin_queue_memory_base,
7420                         ctrl_info->admin_queue_memory_base_dma_handle);
7421         pqi_free_all_io_requests(ctrl_info);
7422         if (ctrl_info->error_buffer)
7423                 dma_free_coherent(&ctrl_info->pci_dev->dev,
7424                         ctrl_info->error_buffer_length,
7425                         ctrl_info->error_buffer,
7426                         ctrl_info->error_buffer_dma_handle);
7427         if (ctrl_info->iomem_base)
7428                 pqi_cleanup_pci_init(ctrl_info);
7429         pqi_free_ctrl_info(ctrl_info);
7430 }
7431
7432 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7433 {
7434         pqi_cancel_rescan_worker(ctrl_info);
7435         pqi_cancel_update_time_worker(ctrl_info);
7436         pqi_remove_all_scsi_devices(ctrl_info);
7437         pqi_unregister_scsi(ctrl_info);
7438         if (ctrl_info->pqi_mode_enabled)
7439                 pqi_revert_to_sis_mode(ctrl_info);
7440         pqi_free_ctrl_resources(ctrl_info);
7441 }
7442
7443 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7444 {
7445         pqi_cancel_update_time_worker(ctrl_info);
7446         pqi_cancel_rescan_worker(ctrl_info);
7447         pqi_wait_until_lun_reset_finished(ctrl_info);
7448         pqi_wait_until_scan_finished(ctrl_info);
7449         pqi_ctrl_ofa_start(ctrl_info);
7450         pqi_ctrl_block_requests(ctrl_info);
7451         pqi_ctrl_wait_until_quiesced(ctrl_info);
7452         pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7453         pqi_fail_io_queued_for_all_devices(ctrl_info);
7454         pqi_wait_until_inbound_queues_empty(ctrl_info);
7455         pqi_stop_heartbeat_timer(ctrl_info);
7456         ctrl_info->pqi_mode_enabled = false;
7457         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7458 }
7459
7460 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7461 {
7462         pqi_ofa_free_host_buffer(ctrl_info);
7463         ctrl_info->pqi_mode_enabled = true;
7464         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7465         ctrl_info->controller_online = true;
7466         pqi_ctrl_unblock_requests(ctrl_info);
7467         pqi_start_heartbeat_timer(ctrl_info);
7468         pqi_schedule_update_time_worker(ctrl_info);
7469         pqi_clear_soft_reset_status(ctrl_info,
7470                 PQI_SOFT_RESET_ABORT);
7471         pqi_scan_scsi_devices(ctrl_info);
7472 }
7473
7474 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7475         u32 total_size, u32 chunk_size)
7476 {
7477         u32 sg_count;
7478         u32 size;
7479         int i;
7480         struct pqi_sg_descriptor *mem_descriptor = NULL;
7481         struct device *dev;
7482         struct pqi_ofa_memory *ofap;
7483
7484         dev = &ctrl_info->pci_dev->dev;
7485
7486         sg_count = (total_size + chunk_size - 1);
7487         sg_count /= chunk_size;
7488
7489         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7490
7491         if (sg_count*chunk_size < total_size)
7492                 goto out;
7493
7494         ctrl_info->pqi_ofa_chunk_virt_addr =
7495                                 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7496         if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7497                 goto out;
7498
7499         for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7500                 dma_addr_t dma_handle;
7501
7502                 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7503                         dma_alloc_coherent(dev, chunk_size, &dma_handle,
7504                                            GFP_KERNEL);
7505
7506                 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7507                         break;
7508
7509                 mem_descriptor = &ofap->sg_descriptor[i];
7510                 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7511                 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7512         }
7513
7514         if (!size || size < total_size)
7515                 goto out_free_chunks;
7516
7517         put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7518         put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7519         put_unaligned_le32(size, &ofap->bytes_allocated);
7520
7521         return 0;
7522
7523 out_free_chunks:
7524         while (--i >= 0) {
7525                 mem_descriptor = &ofap->sg_descriptor[i];
7526                 dma_free_coherent(dev, chunk_size,
7527                                 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7528                                 get_unaligned_le64(&mem_descriptor->address));
7529         }
7530         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7531
7532 out:
7533         put_unaligned_le32 (0, &ofap->bytes_allocated);
7534         return -ENOMEM;
7535 }
7536
7537 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7538 {
7539         u32 total_size;
7540         u32 min_chunk_size;
7541         u32 chunk_sz;
7542
7543         total_size = le32_to_cpu(
7544                         ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7545         min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7546
7547         for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7548                 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7549                         return 0;
7550
7551         return -ENOMEM;
7552 }
7553
7554 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7555         u32 bytes_requested)
7556 {
7557         struct pqi_ofa_memory *pqi_ofa_memory;
7558         struct device *dev;
7559
7560         dev = &ctrl_info->pci_dev->dev;
7561         pqi_ofa_memory = dma_alloc_coherent(dev,
7562                                             PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7563                                             &ctrl_info->pqi_ofa_mem_dma_handle,
7564                                             GFP_KERNEL);
7565
7566         if (!pqi_ofa_memory)
7567                 return;
7568
7569         put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7570         memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7571                                         sizeof(pqi_ofa_memory->signature));
7572         pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7573
7574         ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7575
7576         if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7577                 dev_err(dev, "Failed to allocate host buffer of size = %u",
7578                         bytes_requested);
7579         }
7580 }
7581
7582 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7583 {
7584         int i;
7585         struct pqi_sg_descriptor *mem_descriptor;
7586         struct pqi_ofa_memory *ofap;
7587
7588         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7589
7590         if (!ofap)
7591                 return;
7592
7593         if (!ofap->bytes_allocated)
7594                 goto out;
7595
7596         mem_descriptor = ofap->sg_descriptor;
7597
7598         for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7599                 i++) {
7600                 dma_free_coherent(&ctrl_info->pci_dev->dev,
7601                         get_unaligned_le32(&mem_descriptor[i].length),
7602                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
7603                         get_unaligned_le64(&mem_descriptor[i].address));
7604         }
7605         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7606
7607 out:
7608         dma_free_coherent(&ctrl_info->pci_dev->dev,
7609                         PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7610                         ctrl_info->pqi_ofa_mem_dma_handle);
7611         ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7612 }
7613
7614 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7615 {
7616         struct pqi_vendor_general_request request;
7617         size_t size;
7618         struct pqi_ofa_memory *ofap;
7619
7620         memset(&request, 0, sizeof(request));
7621
7622         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7623
7624         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7625         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7626                 &request.header.iu_length);
7627         put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7628                 &request.function_code);
7629
7630         if (ofap) {
7631                 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7632                         get_unaligned_le16(&ofap->num_memory_descriptors) *
7633                         sizeof(struct pqi_sg_descriptor);
7634
7635                 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7636                         &request.data.ofa_memory_allocation.buffer_address);
7637                 put_unaligned_le32(size,
7638                         &request.data.ofa_memory_allocation.buffer_length);
7639
7640         }
7641
7642         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7643                 0, NULL, NO_TIMEOUT);
7644 }
7645
7646 #define PQI_POST_RESET_DELAY_B4_MSGU_READY      5000
7647
7648 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7649 {
7650         msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7651         return pqi_ctrl_init_resume(ctrl_info);
7652 }
7653
7654 static void pqi_perform_lockup_action(void)
7655 {
7656         switch (pqi_lockup_action) {
7657         case PANIC:
7658                 panic("FATAL: Smart Family Controller lockup detected");
7659                 break;
7660         case REBOOT:
7661                 emergency_restart();
7662                 break;
7663         case NONE:
7664         default:
7665                 break;
7666         }
7667 }
7668
7669 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7670         .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7671         .status = SAM_STAT_CHECK_CONDITION,
7672 };
7673
7674 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7675 {
7676         unsigned int i;
7677         struct pqi_io_request *io_request;
7678         struct scsi_cmnd *scmd;
7679
7680         for (i = 0; i < ctrl_info->max_io_slots; i++) {
7681                 io_request = &ctrl_info->io_request_pool[i];
7682                 if (atomic_read(&io_request->refcount) == 0)
7683                         continue;
7684
7685                 scmd = io_request->scmd;
7686                 if (scmd) {
7687                         set_host_byte(scmd, DID_NO_CONNECT);
7688                 } else {
7689                         io_request->status = -ENXIO;
7690                         io_request->error_info =
7691                                 &pqi_ctrl_offline_raid_error_info;
7692                 }
7693
7694                 io_request->io_complete_callback(io_request,
7695                         io_request->context);
7696         }
7697 }
7698
7699 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7700 {
7701         pqi_perform_lockup_action();
7702         pqi_stop_heartbeat_timer(ctrl_info);
7703         pqi_free_interrupts(ctrl_info);
7704         pqi_cancel_rescan_worker(ctrl_info);
7705         pqi_cancel_update_time_worker(ctrl_info);
7706         pqi_ctrl_wait_until_quiesced(ctrl_info);
7707         pqi_fail_all_outstanding_requests(ctrl_info);
7708         pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7709         pqi_ctrl_unblock_requests(ctrl_info);
7710 }
7711
7712 static void pqi_ctrl_offline_worker(struct work_struct *work)
7713 {
7714         struct pqi_ctrl_info *ctrl_info;
7715
7716         ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7717         pqi_take_ctrl_offline_deferred(ctrl_info);
7718 }
7719
7720 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7721 {
7722         if (!ctrl_info->controller_online)
7723                 return;
7724
7725         ctrl_info->controller_online = false;
7726         ctrl_info->pqi_mode_enabled = false;
7727         pqi_ctrl_block_requests(ctrl_info);
7728         if (!pqi_disable_ctrl_shutdown)
7729                 sis_shutdown_ctrl(ctrl_info);
7730         pci_disable_device(ctrl_info->pci_dev);
7731         dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7732         schedule_work(&ctrl_info->ctrl_offline_work);
7733 }
7734
7735 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
7736         const struct pci_device_id *id)
7737 {
7738         char *ctrl_description;
7739
7740         if (id->driver_data)
7741                 ctrl_description = (char *)id->driver_data;
7742         else
7743                 ctrl_description = "Microsemi Smart Family Controller";
7744
7745         dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
7746 }
7747
7748 static int pqi_pci_probe(struct pci_dev *pci_dev,
7749         const struct pci_device_id *id)
7750 {
7751         int rc;
7752         int node, cp_node;
7753         struct pqi_ctrl_info *ctrl_info;
7754
7755         pqi_print_ctrl_info(pci_dev, id);
7756
7757         if (pqi_disable_device_id_wildcards &&
7758                 id->subvendor == PCI_ANY_ID &&
7759                 id->subdevice == PCI_ANY_ID) {
7760                 dev_warn(&pci_dev->dev,
7761                         "controller not probed because device ID wildcards are disabled\n");
7762                 return -ENODEV;
7763         }
7764
7765         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
7766                 dev_warn(&pci_dev->dev,
7767                         "controller device ID matched using wildcards\n");
7768
7769         node = dev_to_node(&pci_dev->dev);
7770         if (node == NUMA_NO_NODE) {
7771                 cp_node = cpu_to_node(0);
7772                 if (cp_node == NUMA_NO_NODE)
7773                         cp_node = 0;
7774                 set_dev_node(&pci_dev->dev, cp_node);
7775         }
7776
7777         ctrl_info = pqi_alloc_ctrl_info(node);
7778         if (!ctrl_info) {
7779                 dev_err(&pci_dev->dev,
7780                         "failed to allocate controller info block\n");
7781                 return -ENOMEM;
7782         }
7783
7784         ctrl_info->pci_dev = pci_dev;
7785
7786         rc = pqi_pci_init(ctrl_info);
7787         if (rc)
7788                 goto error;
7789
7790         rc = pqi_ctrl_init(ctrl_info);
7791         if (rc)
7792                 goto error;
7793
7794         return 0;
7795
7796 error:
7797         pqi_remove_ctrl(ctrl_info);
7798
7799         return rc;
7800 }
7801
7802 static void pqi_pci_remove(struct pci_dev *pci_dev)
7803 {
7804         struct pqi_ctrl_info *ctrl_info;
7805
7806         ctrl_info = pci_get_drvdata(pci_dev);
7807         if (!ctrl_info)
7808                 return;
7809
7810         ctrl_info->in_shutdown = true;
7811
7812         pqi_remove_ctrl(ctrl_info);
7813 }
7814
7815 static void pqi_shutdown(struct pci_dev *pci_dev)
7816 {
7817         int rc;
7818         struct pqi_ctrl_info *ctrl_info;
7819
7820         ctrl_info = pci_get_drvdata(pci_dev);
7821         if (!ctrl_info)
7822                 goto error;
7823
7824         /*
7825          * Write all data in the controller's battery-backed cache to
7826          * storage.
7827          */
7828         rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
7829         pqi_free_interrupts(ctrl_info);
7830         pqi_reset(ctrl_info);
7831         if (rc == 0)
7832                 return;
7833
7834 error:
7835         dev_warn(&pci_dev->dev,
7836                 "unable to flush controller cache\n");
7837 }
7838
7839 static void pqi_process_lockup_action_param(void)
7840 {
7841         unsigned int i;
7842
7843         if (!pqi_lockup_action_param)
7844                 return;
7845
7846         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
7847                 if (strcmp(pqi_lockup_action_param,
7848                         pqi_lockup_actions[i].name) == 0) {
7849                         pqi_lockup_action = pqi_lockup_actions[i].action;
7850                         return;
7851                 }
7852         }
7853
7854         pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
7855                 DRIVER_NAME_SHORT, pqi_lockup_action_param);
7856 }
7857
7858 static void pqi_process_module_params(void)
7859 {
7860         pqi_process_lockup_action_param();
7861 }
7862
7863 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
7864 {
7865         struct pqi_ctrl_info *ctrl_info;
7866
7867         ctrl_info = pci_get_drvdata(pci_dev);
7868
7869         pqi_disable_events(ctrl_info);
7870         pqi_cancel_update_time_worker(ctrl_info);
7871         pqi_cancel_rescan_worker(ctrl_info);
7872         pqi_wait_until_scan_finished(ctrl_info);
7873         pqi_wait_until_lun_reset_finished(ctrl_info);
7874         pqi_wait_until_ofa_finished(ctrl_info);
7875         pqi_flush_cache(ctrl_info, SUSPEND);
7876         pqi_ctrl_block_requests(ctrl_info);
7877         pqi_ctrl_wait_until_quiesced(ctrl_info);
7878         pqi_wait_until_inbound_queues_empty(ctrl_info);
7879         pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
7880         pqi_stop_heartbeat_timer(ctrl_info);
7881
7882         if (state.event == PM_EVENT_FREEZE)
7883                 return 0;
7884
7885         pci_save_state(pci_dev);
7886         pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
7887
7888         ctrl_info->controller_online = false;
7889         ctrl_info->pqi_mode_enabled = false;
7890
7891         return 0;
7892 }
7893
7894 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
7895 {
7896         int rc;
7897         struct pqi_ctrl_info *ctrl_info;
7898
7899         ctrl_info = pci_get_drvdata(pci_dev);
7900
7901         if (pci_dev->current_state != PCI_D0) {
7902                 ctrl_info->max_hw_queue_index = 0;
7903                 pqi_free_interrupts(ctrl_info);
7904                 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
7905                 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
7906                         IRQF_SHARED, DRIVER_NAME_SHORT,
7907                         &ctrl_info->queue_groups[0]);
7908                 if (rc) {
7909                         dev_err(&ctrl_info->pci_dev->dev,
7910                                 "irq %u init failed with error %d\n",
7911                                 pci_dev->irq, rc);
7912                         return rc;
7913                 }
7914                 pqi_start_heartbeat_timer(ctrl_info);
7915                 pqi_ctrl_unblock_requests(ctrl_info);
7916                 return 0;
7917         }
7918
7919         pci_set_power_state(pci_dev, PCI_D0);
7920         pci_restore_state(pci_dev);
7921
7922         return pqi_ctrl_init_resume(ctrl_info);
7923 }
7924
7925 /* Define the PCI IDs for the controllers that we support. */
7926 static const struct pci_device_id pqi_pci_id_table[] = {
7927         {
7928                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7929                                0x105b, 0x1211)
7930         },
7931         {
7932                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7933                                0x105b, 0x1321)
7934         },
7935         {
7936                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7937                                0x152d, 0x8a22)
7938         },
7939         {
7940                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7941                                0x152d, 0x8a23)
7942         },
7943         {
7944                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7945                                0x152d, 0x8a24)
7946         },
7947         {
7948                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7949                                0x152d, 0x8a36)
7950         },
7951         {
7952                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7953                                0x152d, 0x8a37)
7954         },
7955         {
7956                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7957                                0x193d, 0x1104)
7958         },
7959         {
7960                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7961                                0x193d, 0x1105)
7962         },
7963         {
7964                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7965                                0x193d, 0x1106)
7966         },
7967         {
7968                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7969                                0x193d, 0x1107)
7970         },
7971         {
7972                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7973                                0x193d, 0x8460)
7974         },
7975         {
7976                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7977                                0x193d, 0x8461)
7978         },
7979         {
7980                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7981                                0x193d, 0xc460)
7982         },
7983         {
7984                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7985                                0x193d, 0xc461)
7986         },
7987         {
7988                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7989                                0x193d, 0xf460)
7990         },
7991         {
7992                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7993                                0x193d, 0xf461)
7994         },
7995         {
7996                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7997                                0x1bd4, 0x0045)
7998         },
7999         {
8000                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8001                                0x1bd4, 0x0046)
8002         },
8003         {
8004                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8005                                0x1bd4, 0x0047)
8006         },
8007         {
8008                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8009                                0x1bd4, 0x0048)
8010         },
8011         {
8012                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8013                                0x1bd4, 0x004a)
8014         },
8015         {
8016                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8017                                0x1bd4, 0x004b)
8018         },
8019         {
8020                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8021                                0x1bd4, 0x004c)
8022         },
8023         {
8024                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8025                                0x19e5, 0xd227)
8026         },
8027         {
8028                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8029                                0x19e5, 0xd228)
8030         },
8031         {
8032                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8033                                0x19e5, 0xd229)
8034         },
8035         {
8036                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8037                                0x19e5, 0xd22a)
8038         },
8039         {
8040                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8041                                0x19e5, 0xd22b)
8042         },
8043         {
8044                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8045                                0x19e5, 0xd22c)
8046         },
8047         {
8048                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8049                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8050         },
8051         {
8052                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8053                                PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8054         },
8055         {
8056                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8057                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8058         },
8059         {
8060                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8061                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8062         },
8063         {
8064                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8065                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8066         },
8067         {
8068                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8069                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8070         },
8071         {
8072                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8073                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8074         },
8075         {
8076                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8077                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8078         },
8079         {
8080                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8081                                PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8082         },
8083         {
8084                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8085                                PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8086         },
8087         {
8088                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8089                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8090         },
8091         {
8092                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8093                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8094         },
8095         {
8096                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8097                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8098         },
8099         {
8100                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8101                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8102         },
8103         {
8104                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8105                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8106         },
8107         {
8108                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8109                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8110         },
8111         {
8112                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8113                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8114         },
8115         {
8116                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8117                                PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8118         },
8119         {
8120                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8121                                PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8122         },
8123         {
8124                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8125                                PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8126         },
8127         {
8128                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8129                                PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8130         },
8131         {
8132                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8133                                PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8134         },
8135         {
8136                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8137                                PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8138         },
8139         {
8140                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8141                                PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8142         },
8143         {
8144                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8145                                PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8146         },
8147         {
8148                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8149                                PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8150         },
8151         {
8152                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8153                                PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8154         },
8155         {
8156                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8157                                PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8158         },
8159         {
8160                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8161                                PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8162         },
8163         {
8164                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8165                                PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8166         },
8167         {
8168                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8169                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8170         },
8171         {
8172                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8173                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
8174         },
8175         {
8176                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8177                                PCI_VENDOR_ID_DELL, 0x1fe0)
8178         },
8179         {
8180                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8181                                PCI_VENDOR_ID_HP, 0x0600)
8182         },
8183         {
8184                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8185                                PCI_VENDOR_ID_HP, 0x0601)
8186         },
8187         {
8188                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8189                                PCI_VENDOR_ID_HP, 0x0602)
8190         },
8191         {
8192                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8193                                PCI_VENDOR_ID_HP, 0x0603)
8194         },
8195         {
8196                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8197                                PCI_VENDOR_ID_HP, 0x0609)
8198         },
8199         {
8200                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8201                                PCI_VENDOR_ID_HP, 0x0650)
8202         },
8203         {
8204                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8205                                PCI_VENDOR_ID_HP, 0x0651)
8206         },
8207         {
8208                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8209                                PCI_VENDOR_ID_HP, 0x0652)
8210         },
8211         {
8212                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8213                                PCI_VENDOR_ID_HP, 0x0653)
8214         },
8215         {
8216                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8217                                PCI_VENDOR_ID_HP, 0x0654)
8218         },
8219         {
8220                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8221                                PCI_VENDOR_ID_HP, 0x0655)
8222         },
8223         {
8224                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8225                                PCI_VENDOR_ID_HP, 0x0700)
8226         },
8227         {
8228                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8229                                PCI_VENDOR_ID_HP, 0x0701)
8230         },
8231         {
8232                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8233                                PCI_VENDOR_ID_HP, 0x1001)
8234         },
8235         {
8236                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8237                                PCI_VENDOR_ID_HP, 0x1100)
8238         },
8239         {
8240                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8241                                PCI_VENDOR_ID_HP, 0x1101)
8242         },
8243         {
8244                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8245                                PCI_ANY_ID, PCI_ANY_ID)
8246         },
8247         { 0 }
8248 };
8249
8250 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8251
8252 static struct pci_driver pqi_pci_driver = {
8253         .name = DRIVER_NAME_SHORT,
8254         .id_table = pqi_pci_id_table,
8255         .probe = pqi_pci_probe,
8256         .remove = pqi_pci_remove,
8257         .shutdown = pqi_shutdown,
8258 #if defined(CONFIG_PM)
8259         .suspend = pqi_suspend,
8260         .resume = pqi_resume,
8261 #endif
8262 };
8263
8264 static int __init pqi_init(void)
8265 {
8266         int rc;
8267
8268         pr_info(DRIVER_NAME "\n");
8269
8270         pqi_sas_transport_template =
8271                 sas_attach_transport(&pqi_sas_transport_functions);
8272         if (!pqi_sas_transport_template)
8273                 return -ENODEV;
8274
8275         pqi_process_module_params();
8276
8277         rc = pci_register_driver(&pqi_pci_driver);
8278         if (rc)
8279                 sas_release_transport(pqi_sas_transport_template);
8280
8281         return rc;
8282 }
8283
8284 static void __exit pqi_cleanup(void)
8285 {
8286         pci_unregister_driver(&pqi_pci_driver);
8287         sas_release_transport(pqi_sas_transport_template);
8288 }
8289
8290 module_init(pqi_init);
8291 module_exit(pqi_cleanup);
8292
8293 static void __attribute__((unused)) verify_structures(void)
8294 {
8295         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8296                 sis_host_to_ctrl_doorbell) != 0x20);
8297         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8298                 sis_interrupt_mask) != 0x34);
8299         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8300                 sis_ctrl_to_host_doorbell) != 0x9c);
8301         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8302                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8303         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8304                 sis_driver_scratch) != 0xb0);
8305         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8306                 sis_firmware_status) != 0xbc);
8307         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8308                 sis_mailbox) != 0x1000);
8309         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8310                 pqi_registers) != 0x4000);
8311
8312         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8313                 iu_type) != 0x0);
8314         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8315                 iu_length) != 0x2);
8316         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8317                 response_queue_id) != 0x4);
8318         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8319                 work_area) != 0x6);
8320         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8321
8322         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8323                 status) != 0x0);
8324         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8325                 service_response) != 0x1);
8326         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8327                 data_present) != 0x2);
8328         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8329                 reserved) != 0x3);
8330         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8331                 residual_count) != 0x4);
8332         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8333                 data_length) != 0x8);
8334         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8335                 reserved1) != 0xa);
8336         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8337                 data) != 0xc);
8338         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8339
8340         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8341                 data_in_result) != 0x0);
8342         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8343                 data_out_result) != 0x1);
8344         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8345                 reserved) != 0x2);
8346         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8347                 status) != 0x5);
8348         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8349                 status_qualifier) != 0x6);
8350         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8351                 sense_data_length) != 0x8);
8352         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8353                 response_data_length) != 0xa);
8354         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8355                 data_in_transferred) != 0xc);
8356         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8357                 data_out_transferred) != 0x10);
8358         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8359                 data) != 0x14);
8360         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8361
8362         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8363                 signature) != 0x0);
8364         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8365                 function_and_status_code) != 0x8);
8366         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8367                 max_admin_iq_elements) != 0x10);
8368         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8369                 max_admin_oq_elements) != 0x11);
8370         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8371                 admin_iq_element_length) != 0x12);
8372         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8373                 admin_oq_element_length) != 0x13);
8374         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8375                 max_reset_timeout) != 0x14);
8376         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8377                 legacy_intx_status) != 0x18);
8378         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8379                 legacy_intx_mask_set) != 0x1c);
8380         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8381                 legacy_intx_mask_clear) != 0x20);
8382         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8383                 device_status) != 0x40);
8384         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8385                 admin_iq_pi_offset) != 0x48);
8386         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8387                 admin_oq_ci_offset) != 0x50);
8388         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8389                 admin_iq_element_array_addr) != 0x58);
8390         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8391                 admin_oq_element_array_addr) != 0x60);
8392         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8393                 admin_iq_ci_addr) != 0x68);
8394         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8395                 admin_oq_pi_addr) != 0x70);
8396         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8397                 admin_iq_num_elements) != 0x78);
8398         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8399                 admin_oq_num_elements) != 0x79);
8400         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8401                 admin_queue_int_msg_num) != 0x7a);
8402         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8403                 device_error) != 0x80);
8404         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8405                 error_details) != 0x88);
8406         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8407                 device_reset) != 0x90);
8408         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8409                 power_action) != 0x94);
8410         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8411
8412         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8413                 header.iu_type) != 0);
8414         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8415                 header.iu_length) != 2);
8416         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8417                 header.work_area) != 6);
8418         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8419                 request_id) != 8);
8420         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8421                 function_code) != 10);
8422         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8423                 data.report_device_capability.buffer_length) != 44);
8424         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8425                 data.report_device_capability.sg_descriptor) != 48);
8426         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8427                 data.create_operational_iq.queue_id) != 12);
8428         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8429                 data.create_operational_iq.element_array_addr) != 16);
8430         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8431                 data.create_operational_iq.ci_addr) != 24);
8432         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8433                 data.create_operational_iq.num_elements) != 32);
8434         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8435                 data.create_operational_iq.element_length) != 34);
8436         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8437                 data.create_operational_iq.queue_protocol) != 36);
8438         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8439                 data.create_operational_oq.queue_id) != 12);
8440         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8441                 data.create_operational_oq.element_array_addr) != 16);
8442         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8443                 data.create_operational_oq.pi_addr) != 24);
8444         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8445                 data.create_operational_oq.num_elements) != 32);
8446         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8447                 data.create_operational_oq.element_length) != 34);
8448         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8449                 data.create_operational_oq.queue_protocol) != 36);
8450         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8451                 data.create_operational_oq.int_msg_num) != 40);
8452         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8453                 data.create_operational_oq.coalescing_count) != 42);
8454         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8455                 data.create_operational_oq.min_coalescing_time) != 44);
8456         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8457                 data.create_operational_oq.max_coalescing_time) != 48);
8458         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8459                 data.delete_operational_queue.queue_id) != 12);
8460         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8461         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8462                 data.create_operational_iq) != 64 - 11);
8463         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8464                 data.create_operational_oq) != 64 - 11);
8465         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
8466                 data.delete_operational_queue) != 64 - 11);
8467
8468         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8469                 header.iu_type) != 0);
8470         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8471                 header.iu_length) != 2);
8472         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8473                 header.work_area) != 6);
8474         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8475                 request_id) != 8);
8476         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8477                 function_code) != 10);
8478         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8479                 status) != 11);
8480         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8481                 data.create_operational_iq.status_descriptor) != 12);
8482         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8483                 data.create_operational_iq.iq_pi_offset) != 16);
8484         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8485                 data.create_operational_oq.status_descriptor) != 12);
8486         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8487                 data.create_operational_oq.oq_ci_offset) != 16);
8488         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8489
8490         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8491                 header.iu_type) != 0);
8492         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8493                 header.iu_length) != 2);
8494         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8495                 header.response_queue_id) != 4);
8496         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8497                 header.work_area) != 6);
8498         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8499                 request_id) != 8);
8500         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8501                 nexus_id) != 10);
8502         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8503                 buffer_length) != 12);
8504         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8505                 lun_number) != 16);
8506         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8507                 protocol_specific) != 24);
8508         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8509                 error_index) != 27);
8510         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8511                 cdb) != 32);
8512         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8513                 sg_descriptors) != 64);
8514         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8515                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8516
8517         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8518                 header.iu_type) != 0);
8519         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8520                 header.iu_length) != 2);
8521         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8522                 header.response_queue_id) != 4);
8523         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8524                 header.work_area) != 6);
8525         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8526                 request_id) != 8);
8527         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8528                 nexus_id) != 12);
8529         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8530                 buffer_length) != 16);
8531         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8532                 data_encryption_key_index) != 22);
8533         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8534                 encrypt_tweak_lower) != 24);
8535         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8536                 encrypt_tweak_upper) != 28);
8537         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8538                 cdb) != 32);
8539         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8540                 error_index) != 48);
8541         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8542                 num_sg_descriptors) != 50);
8543         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8544                 cdb_length) != 51);
8545         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8546                 lun_number) != 52);
8547         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8548                 sg_descriptors) != 64);
8549         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8550                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8551
8552         BUILD_BUG_ON(offsetof(struct pqi_io_response,
8553                 header.iu_type) != 0);
8554         BUILD_BUG_ON(offsetof(struct pqi_io_response,
8555                 header.iu_length) != 2);
8556         BUILD_BUG_ON(offsetof(struct pqi_io_response,
8557                 request_id) != 8);
8558         BUILD_BUG_ON(offsetof(struct pqi_io_response,
8559                 error_index) != 10);
8560
8561         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8562                 header.iu_type) != 0);
8563         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8564                 header.iu_length) != 2);
8565         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8566                 header.response_queue_id) != 4);
8567         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8568                 request_id) != 8);
8569         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8570                 data.report_event_configuration.buffer_length) != 12);
8571         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8572                 data.report_event_configuration.sg_descriptors) != 16);
8573         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8574                 data.set_event_configuration.global_event_oq_id) != 10);
8575         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8576                 data.set_event_configuration.buffer_length) != 12);
8577         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8578                 data.set_event_configuration.sg_descriptors) != 16);
8579
8580         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8581                 max_inbound_iu_length) != 6);
8582         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8583                 max_outbound_iu_length) != 14);
8584         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8585
8586         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8587                 data_length) != 0);
8588         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8589                 iq_arbitration_priority_support_bitmask) != 8);
8590         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8591                 maximum_aw_a) != 9);
8592         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8593                 maximum_aw_b) != 10);
8594         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8595                 maximum_aw_c) != 11);
8596         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8597                 max_inbound_queues) != 16);
8598         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8599                 max_elements_per_iq) != 18);
8600         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8601                 max_iq_element_length) != 24);
8602         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8603                 min_iq_element_length) != 26);
8604         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8605                 max_outbound_queues) != 30);
8606         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8607                 max_elements_per_oq) != 32);
8608         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8609                 intr_coalescing_time_granularity) != 34);
8610         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8611                 max_oq_element_length) != 36);
8612         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8613                 min_oq_element_length) != 38);
8614         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8615                 iu_layer_descriptors) != 64);
8616         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8617
8618         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8619                 event_type) != 0);
8620         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8621                 oq_id) != 2);
8622         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8623
8624         BUILD_BUG_ON(offsetof(struct pqi_event_config,
8625                 num_event_descriptors) != 2);
8626         BUILD_BUG_ON(offsetof(struct pqi_event_config,
8627                 descriptors) != 4);
8628
8629         BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8630                 ARRAY_SIZE(pqi_supported_event_types));
8631
8632         BUILD_BUG_ON(offsetof(struct pqi_event_response,
8633                 header.iu_type) != 0);
8634         BUILD_BUG_ON(offsetof(struct pqi_event_response,
8635                 header.iu_length) != 2);
8636         BUILD_BUG_ON(offsetof(struct pqi_event_response,
8637                 event_type) != 8);
8638         BUILD_BUG_ON(offsetof(struct pqi_event_response,
8639                 event_id) != 10);
8640         BUILD_BUG_ON(offsetof(struct pqi_event_response,
8641                 additional_event_id) != 12);
8642         BUILD_BUG_ON(offsetof(struct pqi_event_response,
8643                 data) != 16);
8644         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8645
8646         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8647                 header.iu_type) != 0);
8648         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8649                 header.iu_length) != 2);
8650         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8651                 event_type) != 8);
8652         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8653                 event_id) != 10);
8654         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8655                 additional_event_id) != 12);
8656         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8657
8658         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8659                 header.iu_type) != 0);
8660         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8661                 header.iu_length) != 2);
8662         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8663                 request_id) != 8);
8664         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8665                 nexus_id) != 10);
8666         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8667                 lun_number) != 16);
8668         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8669                 protocol_specific) != 24);
8670         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8671                 outbound_queue_id_to_manage) != 26);
8672         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8673                 request_id_to_manage) != 28);
8674         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8675                 task_management_function) != 30);
8676         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8677
8678         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8679                 header.iu_type) != 0);
8680         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8681                 header.iu_length) != 2);
8682         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8683                 request_id) != 8);
8684         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8685                 nexus_id) != 10);
8686         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8687                 additional_response_info) != 12);
8688         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8689                 response_code) != 15);
8690         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8691
8692         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8693                 configured_logical_drive_count) != 0);
8694         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8695                 configuration_signature) != 1);
8696         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8697                 firmware_version) != 5);
8698         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8699                 extended_logical_unit_count) != 154);
8700         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8701                 firmware_build_number) != 190);
8702         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8703                 controller_mode) != 292);
8704
8705         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8706                 phys_bay_in_box) != 115);
8707         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8708                 device_type) != 120);
8709         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8710                 redundant_path_present_map) != 1736);
8711         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8712                 active_path_number) != 1738);
8713         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8714                 alternate_paths_phys_connector) != 1739);
8715         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8716                 alternate_paths_phys_box_on_port) != 1755);
8717         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8718                 current_queue_depth_limit) != 1796);
8719         BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8720
8721         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8722         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8723         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8724                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8725         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8726                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8727         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8728         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8729                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8730         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8731         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8732                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8733
8734         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
8735         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8736                 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
8737 }