]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/ipr.c
e63785d5df322be3a33cf3a4caaa6e2fb882d06f
[linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4086: SAS Adapter Hardware Configuration Error"},
440         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "3140: Device bus not ready to ready transition"},
442         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset"},
444         {0x06290500, 0, 0,
445         "FFFE: SCSI bus transition to single ended"},
446         {0x06290600, 0, 0,
447         "FFFE: SCSI bus transition to LVD"},
448         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "FFFB: SCSI bus was reset by another initiator"},
450         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "3029: A device replacement has occurred"},
452         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4102: Device bus fabric performance degradation"},
454         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9051: IOA cache data exists for a missing or failed device"},
456         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9025: Disk unit is not supported at its physical location"},
460         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3020: IOA detected a SCSI bus configuration error"},
462         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463         "3150: SCSI bus configuration error"},
464         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9074: Asymmetric advanced function disk configuration"},
466         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4040: Incomplete multipath connection between IOA and enclosure"},
468         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4041: Incomplete multipath connection between enclosure and device"},
470         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9075: Incomplete multipath connection between IOA and remote IOA"},
472         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9076: Configuration error, missing remote IOA"},
474         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4050: Enclosure does not support a required multipath function"},
476         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4121: Configuration error, required cable is missing"},
478         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4122: Cable is not plugged into the correct location on remote IOA"},
480         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4123: Configuration error, invalid cable vital product data"},
482         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4124: Configuration error, both cable ends are plugged into the same IOA"},
484         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485         "4070: Logically bad block written on device"},
486         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9041: Array protection temporarily suspended"},
488         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9042: Corrupt array parity detected on specified device"},
490         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9030: Array no longer protected due to missing or failed disk unit"},
492         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9071: Link operational transition"},
494         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9072: Link not operational transition"},
496         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9032: Array exposed but still protected"},
498         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499         "70DD: Device forced failed by disrupt device command"},
500         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4061: Multipath redundancy level got better"},
502         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503         "4060: Multipath redundancy level got worse"},
504         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505         "9083: Device raw mode enabled"},
506         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507         "9084: Device raw mode disabled"},
508         {0x07270000, 0, 0,
509         "Failure due to other device"},
510         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9008: IOA does not support functions expected by devices"},
512         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9010: Cache data associated with attached devices cannot be found"},
514         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9011: Cache data belongs to devices other than those attached"},
516         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9020: Array missing 2 or more devices with only 1 device present"},
518         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9021: Array missing 2 or more devices with 2 or more devices present"},
520         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9022: Exposed array is missing a required device"},
522         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9023: Array member(s) not at required physical locations"},
524         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9024: Array not functional due to present hardware configuration"},
526         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9026: Array not functional due to present hardware configuration"},
528         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9027: Array is missing a device and parity is out of sync"},
530         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9028: Maximum number of arrays already exist"},
532         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9050: Required cache data cannot be located for a disk unit"},
534         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9052: Cache data exists for a device that has been modified"},
536         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9054: IOA resources not available due to previous problems"},
538         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9092: Disk unit requires initialization before use"},
540         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9029: Incorrect hardware configuration change has been detected"},
542         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9060: One or more disk pairs are missing from an array"},
544         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9061: One or more disks are missing from an array"},
546         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9062: One or more disks are missing from an array"},
548         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549         "9063: Maximum number of functional arrays has been exceeded"},
550         {0x07279A00, 0, 0,
551         "Data protect, other volume set problem"},
552         {0x0B260000, 0, 0,
553         "Aborted command, invalid descriptor"},
554         {0x0B3F9000, 0, 0,
555         "Target operating conditions have changed, dual adapter takeover"},
556         {0x0B530200, 0, 0,
557         "Aborted command, medium removal prevented"},
558         {0x0B5A0000, 0, 0,
559         "Command terminated by host"},
560         {0x0B5B8000, 0, 0,
561         "Aborted command, command terminated by host"}
562 };
563
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
566         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
567         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
572         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
576         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 };
579
580 /*
581  *  Function Prototypes
582  */
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588                                    enum ipr_shutdown_type);
589
590 #ifdef CONFIG_SCSI_IPR_TRACE
591 /**
592  * ipr_trc_hook - Add a trace entry to the driver trace
593  * @ipr_cmd:    ipr command struct
594  * @type:               trace type
595  * @add_data:   additional data
596  *
597  * Return value:
598  *      none
599  **/
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601                          u8 type, u32 add_data)
602 {
603         struct ipr_trace_entry *trace_entry;
604         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605         unsigned int trace_index;
606
607         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608         trace_entry = &ioa_cfg->trace[trace_index];
609         trace_entry->time = jiffies;
610         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611         trace_entry->type = type;
612         if (ipr_cmd->ioa_cfg->sis64)
613                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614         else
615                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618         trace_entry->u.add_data = add_data;
619         wmb();
620 }
621 #else
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 #endif
624
625 /**
626  * ipr_lock_and_done - Acquire lock and complete command
627  * @ipr_cmd:    ipr command struct
628  *
629  * Return value:
630  *      none
631  **/
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633 {
634         unsigned long lock_flags;
635         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638         ipr_cmd->done(ipr_cmd);
639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640 }
641
642 /**
643  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644  * @ipr_cmd:    ipr command struct
645  *
646  * Return value:
647  *      none
648  **/
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650 {
651         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654         dma_addr_t dma_addr = ipr_cmd->dma_addr;
655         int hrrq_id;
656
657         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660         ioarcb->data_transfer_length = 0;
661         ioarcb->read_data_transfer_length = 0;
662         ioarcb->ioadl_len = 0;
663         ioarcb->read_ioadl_len = 0;
664
665         if (ipr_cmd->ioa_cfg->sis64) {
666                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668                 ioasa64->u.gata.status = 0;
669         } else {
670                 ioarcb->write_ioadl_addr =
671                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673                 ioasa->u.gata.status = 0;
674         }
675
676         ioasa->hdr.ioasc = 0;
677         ioasa->hdr.residual_data_len = 0;
678         ipr_cmd->scsi_cmd = NULL;
679         ipr_cmd->qc = NULL;
680         ipr_cmd->sense_buffer[0] = 0;
681         ipr_cmd->dma_use_sg = 0;
682 }
683
684 /**
685  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686  * @ipr_cmd:    ipr command struct
687  *
688  * Return value:
689  *      none
690  **/
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692                               void (*fast_done) (struct ipr_cmnd *))
693 {
694         ipr_reinit_ipr_cmnd(ipr_cmd);
695         ipr_cmd->u.scratch = 0;
696         ipr_cmd->sibling = NULL;
697         ipr_cmd->eh_comp = NULL;
698         ipr_cmd->fast_done = fast_done;
699         timer_setup(&ipr_cmd->timer, NULL, 0);
700 }
701
702 /**
703  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711 {
712         struct ipr_cmnd *ipr_cmd = NULL;
713
714         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716                         struct ipr_cmnd, queue);
717                 list_del(&ipr_cmd->queue);
718         }
719
720
721         return ipr_cmd;
722 }
723
724 /**
725  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726  * @ioa_cfg:    ioa config struct
727  *
728  * Return value:
729  *      pointer to ipr command struct
730  **/
731 static
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733 {
734         struct ipr_cmnd *ipr_cmd =
735                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737         return ipr_cmd;
738 }
739
740 /**
741  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742  * @ioa_cfg:    ioa config struct
743  * @clr_ints:     interrupts to clear
744  *
745  * This function masks all interrupts on the adapter, then clears the
746  * interrupts specified in the mask
747  *
748  * Return value:
749  *      none
750  **/
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752                                           u32 clr_ints)
753 {
754         volatile u32 int_reg;
755         int i;
756
757         /* Stop new interrupts */
758         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759                 spin_lock(&ioa_cfg->hrrq[i]._lock);
760                 ioa_cfg->hrrq[i].allow_interrupts = 0;
761                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762         }
763         wmb();
764
765         /* Set interrupt mask to stop all new interrupts */
766         if (ioa_cfg->sis64)
767                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768         else
769                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
770
771         /* Clear any pending interrupts */
772         if (ioa_cfg->sis64)
773                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
774         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
775         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
776 }
777
778 /**
779  * ipr_save_pcix_cmd_reg - Save PCI-X command register
780  * @ioa_cfg:    ioa config struct
781  *
782  * Return value:
783  *      0 on success / -EIO on failure
784  **/
785 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786 {
787         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788
789         if (pcix_cmd_reg == 0)
790                 return 0;
791
792         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
793                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
794                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
795                 return -EIO;
796         }
797
798         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
799         return 0;
800 }
801
802 /**
803  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
804  * @ioa_cfg:    ioa config struct
805  *
806  * Return value:
807  *      0 on success / -EIO on failure
808  **/
809 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810 {
811         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812
813         if (pcix_cmd_reg) {
814                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
815                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
816                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817                         return -EIO;
818                 }
819         }
820
821         return 0;
822 }
823
824 /**
825  * __ipr_sata_eh_done - done function for aborted SATA commands
826  * @ipr_cmd:    ipr command struct
827  *
828  * This function is invoked for ops generated to SATA
829  * devices which are being aborted.
830  *
831  * Return value:
832  *      none
833  **/
834 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835 {
836         struct ata_queued_cmd *qc = ipr_cmd->qc;
837         struct ipr_sata_port *sata_port = qc->ap->private_data;
838
839         qc->err_mask |= AC_ERR_OTHER;
840         sata_port->ioasa.status |= ATA_BUSY;
841         ata_qc_complete(qc);
842         if (ipr_cmd->eh_comp)
843                 complete(ipr_cmd->eh_comp);
844         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
845 }
846
847 /**
848  * ipr_sata_eh_done - done function for aborted SATA commands
849  * @ipr_cmd:    ipr command struct
850  *
851  * This function is invoked for ops generated to SATA
852  * devices which are being aborted.
853  *
854  * Return value:
855  *      none
856  **/
857 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858 {
859         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
860         unsigned long hrrq_flags;
861
862         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
863         __ipr_sata_eh_done(ipr_cmd);
864         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
865 }
866
867 /**
868  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
869  * @ipr_cmd:    ipr command struct
870  *
871  * This function is invoked by the interrupt handler for
872  * ops generated by the SCSI mid-layer which are being aborted.
873  *
874  * Return value:
875  *      none
876  **/
877 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
878 {
879         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880
881         scsi_cmd->result |= (DID_ERROR << 16);
882
883         scsi_dma_unmap(ipr_cmd->scsi_cmd);
884         scsi_cmd->scsi_done(scsi_cmd);
885         if (ipr_cmd->eh_comp)
886                 complete(ipr_cmd->eh_comp);
887         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
888 }
889
890 /**
891  * ipr_scsi_eh_done - mid-layer done function for aborted ops
892  * @ipr_cmd:    ipr command struct
893  *
894  * This function is invoked by the interrupt handler for
895  * ops generated by the SCSI mid-layer which are being aborted.
896  *
897  * Return value:
898  *      none
899  **/
900 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901 {
902         unsigned long hrrq_flags;
903         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
904
905         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
906         __ipr_scsi_eh_done(ipr_cmd);
907         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
908 }
909
910 /**
911  * ipr_fail_all_ops - Fails all outstanding ops.
912  * @ioa_cfg:    ioa config struct
913  *
914  * This function fails all outstanding ops.
915  *
916  * Return value:
917  *      none
918  **/
919 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920 {
921         struct ipr_cmnd *ipr_cmd, *temp;
922         struct ipr_hrr_queue *hrrq;
923
924         ENTER;
925         for_each_hrrq(hrrq, ioa_cfg) {
926                 spin_lock(&hrrq->_lock);
927                 list_for_each_entry_safe(ipr_cmd,
928                                         temp, &hrrq->hrrq_pending_q, queue) {
929                         list_del(&ipr_cmd->queue);
930
931                         ipr_cmd->s.ioasa.hdr.ioasc =
932                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
933                         ipr_cmd->s.ioasa.hdr.ilid =
934                                 cpu_to_be32(IPR_DRIVER_ILID);
935
936                         if (ipr_cmd->scsi_cmd)
937                                 ipr_cmd->done = __ipr_scsi_eh_done;
938                         else if (ipr_cmd->qc)
939                                 ipr_cmd->done = __ipr_sata_eh_done;
940
941                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
942                                      IPR_IOASC_IOA_WAS_RESET);
943                         del_timer(&ipr_cmd->timer);
944                         ipr_cmd->done(ipr_cmd);
945                 }
946                 spin_unlock(&hrrq->_lock);
947         }
948         LEAVE;
949 }
950
951 /**
952  * ipr_send_command -  Send driver initiated requests.
953  * @ipr_cmd:            ipr command struct
954  *
955  * This function sends a command to the adapter using the correct write call.
956  * In the case of sis64, calculate the ioarcb size required. Then or in the
957  * appropriate bits.
958  *
959  * Return value:
960  *      none
961  **/
962 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963 {
964         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
965         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966
967         if (ioa_cfg->sis64) {
968                 /* The default size is 256 bytes */
969                 send_dma_addr |= 0x1;
970
971                 /* If the number of ioadls * size of ioadl > 128 bytes,
972                    then use a 512 byte ioarcb */
973                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
974                         send_dma_addr |= 0x4;
975                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976         } else
977                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
978 }
979
980 /**
981  * ipr_do_req -  Send driver initiated requests.
982  * @ipr_cmd:            ipr command struct
983  * @done:                       done function
984  * @timeout_func:       timeout function
985  * @timeout:            timeout value
986  *
987  * This function sends the specified command to the adapter with the
988  * timeout given. The done function is invoked on command completion.
989  *
990  * Return value:
991  *      none
992  **/
993 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
994                        void (*done) (struct ipr_cmnd *),
995                        void (*timeout_func) (struct timer_list *), u32 timeout)
996 {
997         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
998
999         ipr_cmd->done = done;
1000
1001         ipr_cmd->timer.expires = jiffies + timeout;
1002         ipr_cmd->timer.function = timeout_func;
1003
1004         add_timer(&ipr_cmd->timer);
1005
1006         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007
1008         ipr_send_command(ipr_cmd);
1009 }
1010
1011 /**
1012  * ipr_internal_cmd_done - Op done function for an internally generated op.
1013  * @ipr_cmd:    ipr command struct
1014  *
1015  * This function is the op done function for an internally generated,
1016  * blocking op. It simply wakes the sleeping thread.
1017  *
1018  * Return value:
1019  *      none
1020  **/
1021 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022 {
1023         if (ipr_cmd->sibling)
1024                 ipr_cmd->sibling = NULL;
1025         else
1026                 complete(&ipr_cmd->completion);
1027 }
1028
1029 /**
1030  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1031  * @ipr_cmd:    ipr command struct
1032  * @dma_addr:   dma address
1033  * @len:        transfer length
1034  * @flags:      ioadl flag value
1035  *
1036  * This function initializes an ioadl in the case where there is only a single
1037  * descriptor.
1038  *
1039  * Return value:
1040  *      nothing
1041  **/
1042 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1043                            u32 len, int flags)
1044 {
1045         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1046         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047
1048         ipr_cmd->dma_use_sg = 1;
1049
1050         if (ipr_cmd->ioa_cfg->sis64) {
1051                 ioadl64->flags = cpu_to_be32(flags);
1052                 ioadl64->data_len = cpu_to_be32(len);
1053                 ioadl64->address = cpu_to_be64(dma_addr);
1054
1055                 ipr_cmd->ioarcb.ioadl_len =
1056                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1057                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058         } else {
1059                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1060                 ioadl->address = cpu_to_be32(dma_addr);
1061
1062                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1063                         ipr_cmd->ioarcb.read_ioadl_len =
1064                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1065                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066                 } else {
1067                         ipr_cmd->ioarcb.ioadl_len =
1068                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1069                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1070                 }
1071         }
1072 }
1073
1074 /**
1075  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1076  * @ipr_cmd:    ipr command struct
1077  * @timeout_func:       function to invoke if command times out
1078  * @timeout:    timeout
1079  *
1080  * Return value:
1081  *      none
1082  **/
1083 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1084                                   void (*timeout_func) (struct timer_list *),
1085                                   u32 timeout)
1086 {
1087         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088
1089         init_completion(&ipr_cmd->completion);
1090         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091
1092         spin_unlock_irq(ioa_cfg->host->host_lock);
1093         wait_for_completion(&ipr_cmd->completion);
1094         spin_lock_irq(ioa_cfg->host->host_lock);
1095 }
1096
1097 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1098 {
1099         unsigned int hrrq;
1100
1101         if (ioa_cfg->hrrq_num == 1)
1102                 hrrq = 0;
1103         else {
1104                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1105                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1106         }
1107         return hrrq;
1108 }
1109
1110 /**
1111  * ipr_send_hcam - Send an HCAM to the adapter.
1112  * @ioa_cfg:    ioa config struct
1113  * @type:               HCAM type
1114  * @hostrcb:    hostrcb struct
1115  *
1116  * This function will send a Host Controlled Async command to the adapter.
1117  * If HCAMs are currently not allowed to be issued to the adapter, it will
1118  * place the hostrcb on the free queue.
1119  *
1120  * Return value:
1121  *      none
1122  **/
1123 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1124                           struct ipr_hostrcb *hostrcb)
1125 {
1126         struct ipr_cmnd *ipr_cmd;
1127         struct ipr_ioarcb *ioarcb;
1128
1129         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1130                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1131                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1132                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133
1134                 ipr_cmd->u.hostrcb = hostrcb;
1135                 ioarcb = &ipr_cmd->ioarcb;
1136
1137                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1138                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1139                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1140                 ioarcb->cmd_pkt.cdb[1] = type;
1141                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1142                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143
1144                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1145                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1146
1147                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1148                         ipr_cmd->done = ipr_process_ccn;
1149                 else
1150                         ipr_cmd->done = ipr_process_error;
1151
1152                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153
1154                 ipr_send_command(ipr_cmd);
1155         } else {
1156                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1157         }
1158 }
1159
1160 /**
1161  * ipr_update_ata_class - Update the ata class in the resource entry
1162  * @res:        resource entry struct
1163  * @proto:      cfgte device bus protocol value
1164  *
1165  * Return value:
1166  *      none
1167  **/
1168 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1169 {
1170         switch (proto) {
1171         case IPR_PROTO_SATA:
1172         case IPR_PROTO_SAS_STP:
1173                 res->ata_class = ATA_DEV_ATA;
1174                 break;
1175         case IPR_PROTO_SATA_ATAPI:
1176         case IPR_PROTO_SAS_STP_ATAPI:
1177                 res->ata_class = ATA_DEV_ATAPI;
1178                 break;
1179         default:
1180                 res->ata_class = ATA_DEV_UNKNOWN;
1181                 break;
1182         };
1183 }
1184
1185 /**
1186  * ipr_init_res_entry - Initialize a resource entry struct.
1187  * @res:        resource entry struct
1188  * @cfgtew:     config table entry wrapper struct
1189  *
1190  * Return value:
1191  *      none
1192  **/
1193 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1194                                struct ipr_config_table_entry_wrapper *cfgtew)
1195 {
1196         int found = 0;
1197         unsigned int proto;
1198         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1199         struct ipr_resource_entry *gscsi_res = NULL;
1200
1201         res->needs_sync_complete = 0;
1202         res->in_erp = 0;
1203         res->add_to_ml = 0;
1204         res->del_from_ml = 0;
1205         res->resetting_device = 0;
1206         res->reset_occurred = 0;
1207         res->sdev = NULL;
1208         res->sata_port = NULL;
1209
1210         if (ioa_cfg->sis64) {
1211                 proto = cfgtew->u.cfgte64->proto;
1212                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1213                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1214                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1215                 res->type = cfgtew->u.cfgte64->res_type;
1216
1217                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1218                         sizeof(res->res_path));
1219
1220                 res->bus = 0;
1221                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1222                         sizeof(res->dev_lun.scsi_lun));
1223                 res->lun = scsilun_to_int(&res->dev_lun);
1224
1225                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1226                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1227                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228                                         found = 1;
1229                                         res->target = gscsi_res->target;
1230                                         break;
1231                                 }
1232                         }
1233                         if (!found) {
1234                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1235                                                                   ioa_cfg->max_devs_supported);
1236                                 set_bit(res->target, ioa_cfg->target_ids);
1237                         }
1238                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1239                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240                         res->target = 0;
1241                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1242                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1243                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1244                                                           ioa_cfg->max_devs_supported);
1245                         set_bit(res->target, ioa_cfg->array_ids);
1246                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1247                         res->bus = IPR_VSET_VIRTUAL_BUS;
1248                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1249                                                           ioa_cfg->max_devs_supported);
1250                         set_bit(res->target, ioa_cfg->vset_ids);
1251                 } else {
1252                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1253                                                           ioa_cfg->max_devs_supported);
1254                         set_bit(res->target, ioa_cfg->target_ids);
1255                 }
1256         } else {
1257                 proto = cfgtew->u.cfgte->proto;
1258                 res->qmodel = IPR_QUEUEING_MODEL(res);
1259                 res->flags = cfgtew->u.cfgte->flags;
1260                 if (res->flags & IPR_IS_IOA_RESOURCE)
1261                         res->type = IPR_RES_TYPE_IOAFP;
1262                 else
1263                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264
1265                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1266                 res->target = cfgtew->u.cfgte->res_addr.target;
1267                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1268                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1269         }
1270
1271         ipr_update_ata_class(res, proto);
1272 }
1273
1274 /**
1275  * ipr_is_same_device - Determine if two devices are the same.
1276  * @res:        resource entry struct
1277  * @cfgtew:     config table entry wrapper struct
1278  *
1279  * Return value:
1280  *      1 if the devices are the same / 0 otherwise
1281  **/
1282 static int ipr_is_same_device(struct ipr_resource_entry *res,
1283                               struct ipr_config_table_entry_wrapper *cfgtew)
1284 {
1285         if (res->ioa_cfg->sis64) {
1286                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1287                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1288                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1289                                         sizeof(cfgtew->u.cfgte64->lun))) {
1290                         return 1;
1291                 }
1292         } else {
1293                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1294                     res->target == cfgtew->u.cfgte->res_addr.target &&
1295                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1296                         return 1;
1297         }
1298
1299         return 0;
1300 }
1301
1302 /**
1303  * __ipr_format_res_path - Format the resource path for printing.
1304  * @res_path:   resource path
1305  * @buf:        buffer
1306  * @len:        length of buffer provided
1307  *
1308  * Return value:
1309  *      pointer to buffer
1310  **/
1311 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1312 {
1313         int i;
1314         char *p = buffer;
1315
1316         *p = '\0';
1317         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1318         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1319                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1320
1321         return buffer;
1322 }
1323
1324 /**
1325  * ipr_format_res_path - Format the resource path for printing.
1326  * @ioa_cfg:    ioa config struct
1327  * @res_path:   resource path
1328  * @buf:        buffer
1329  * @len:        length of buffer provided
1330  *
1331  * Return value:
1332  *      pointer to buffer
1333  **/
1334 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1335                                  u8 *res_path, char *buffer, int len)
1336 {
1337         char *p = buffer;
1338
1339         *p = '\0';
1340         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1341         __ipr_format_res_path(res_path, p, len - (buffer - p));
1342         return buffer;
1343 }
1344
1345 /**
1346  * ipr_update_res_entry - Update the resource entry.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1354                                  struct ipr_config_table_entry_wrapper *cfgtew)
1355 {
1356         char buffer[IPR_MAX_RES_PATH_LENGTH];
1357         unsigned int proto;
1358         int new_path = 0;
1359
1360         if (res->ioa_cfg->sis64) {
1361                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1362                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1363                 res->type = cfgtew->u.cfgte64->res_type;
1364
1365                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1366                         sizeof(struct ipr_std_inq_data));
1367
1368                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1369                 proto = cfgtew->u.cfgte64->proto;
1370                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1371                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372
1373                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1374                         sizeof(res->dev_lun.scsi_lun));
1375
1376                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1377                                         sizeof(res->res_path))) {
1378                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1379                                 sizeof(res->res_path));
1380                         new_path = 1;
1381                 }
1382
1383                 if (res->sdev && new_path)
1384                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1385                                     ipr_format_res_path(res->ioa_cfg,
1386                                         res->res_path, buffer, sizeof(buffer)));
1387         } else {
1388                 res->flags = cfgtew->u.cfgte->flags;
1389                 if (res->flags & IPR_IS_IOA_RESOURCE)
1390                         res->type = IPR_RES_TYPE_IOAFP;
1391                 else
1392                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393
1394                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1395                         sizeof(struct ipr_std_inq_data));
1396
1397                 res->qmodel = IPR_QUEUEING_MODEL(res);
1398                 proto = cfgtew->u.cfgte->proto;
1399                 res->res_handle = cfgtew->u.cfgte->res_handle;
1400         }
1401
1402         ipr_update_ata_class(res, proto);
1403 }
1404
1405 /**
1406  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407  *                        for the resource.
1408  * @res:        resource entry struct
1409  * @cfgtew:     config table entry wrapper struct
1410  *
1411  * Return value:
1412  *      none
1413  **/
1414 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415 {
1416         struct ipr_resource_entry *gscsi_res = NULL;
1417         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418
1419         if (!ioa_cfg->sis64)
1420                 return;
1421
1422         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1423                 clear_bit(res->target, ioa_cfg->array_ids);
1424         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1425                 clear_bit(res->target, ioa_cfg->vset_ids);
1426         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1427                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1428                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429                                 return;
1430                 clear_bit(res->target, ioa_cfg->target_ids);
1431
1432         } else if (res->bus == 0)
1433                 clear_bit(res->target, ioa_cfg->target_ids);
1434 }
1435
1436 /**
1437  * ipr_handle_config_change - Handle a config change from the adapter
1438  * @ioa_cfg:    ioa config struct
1439  * @hostrcb:    hostrcb
1440  *
1441  * Return value:
1442  *      none
1443  **/
1444 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1445                                      struct ipr_hostrcb *hostrcb)
1446 {
1447         struct ipr_resource_entry *res = NULL;
1448         struct ipr_config_table_entry_wrapper cfgtew;
1449         __be32 cc_res_handle;
1450
1451         u32 is_ndn = 1;
1452
1453         if (ioa_cfg->sis64) {
1454                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1455                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456         } else {
1457                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1458                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1459         }
1460
1461         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1462                 if (res->res_handle == cc_res_handle) {
1463                         is_ndn = 0;
1464                         break;
1465                 }
1466         }
1467
1468         if (is_ndn) {
1469                 if (list_empty(&ioa_cfg->free_res_q)) {
1470                         ipr_send_hcam(ioa_cfg,
1471                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1472                                       hostrcb);
1473                         return;
1474                 }
1475
1476                 res = list_entry(ioa_cfg->free_res_q.next,
1477                                  struct ipr_resource_entry, queue);
1478
1479                 list_del(&res->queue);
1480                 ipr_init_res_entry(res, &cfgtew);
1481                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1482         }
1483
1484         ipr_update_res_entry(res, &cfgtew);
1485
1486         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487                 if (res->sdev) {
1488                         res->del_from_ml = 1;
1489                         res->res_handle = IPR_INVALID_RES_HANDLE;
1490                         schedule_work(&ioa_cfg->work_q);
1491                 } else {
1492                         ipr_clear_res_target(res);
1493                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1494                 }
1495         } else if (!res->sdev || res->del_from_ml) {
1496                 res->add_to_ml = 1;
1497                 schedule_work(&ioa_cfg->work_q);
1498         }
1499
1500         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1501 }
1502
1503 /**
1504  * ipr_process_ccn - Op done function for a CCN.
1505  * @ipr_cmd:    ipr command struct
1506  *
1507  * This function is the op done function for a configuration
1508  * change notification host controlled async from the adapter.
1509  *
1510  * Return value:
1511  *      none
1512  **/
1513 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514 {
1515         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1516         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1517         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1518
1519         list_del_init(&hostrcb->queue);
1520         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1521
1522         if (ioasc) {
1523                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1524                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1525                         dev_err(&ioa_cfg->pdev->dev,
1526                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527
1528                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529         } else {
1530                 ipr_handle_config_change(ioa_cfg, hostrcb);
1531         }
1532 }
1533
1534 /**
1535  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1536  * @i:          index into buffer
1537  * @buf:                string to modify
1538  *
1539  * This function will strip all trailing whitespace, pad the end
1540  * of the string with a single space, and NULL terminate the string.
1541  *
1542  * Return value:
1543  *      new length of string
1544  **/
1545 static int strip_and_pad_whitespace(int i, char *buf)
1546 {
1547         while (i && buf[i] == ' ')
1548                 i--;
1549         buf[i+1] = ' ';
1550         buf[i+2] = '\0';
1551         return i + 2;
1552 }
1553
1554 /**
1555  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1556  * @prefix:             string to print at start of printk
1557  * @hostrcb:    hostrcb pointer
1558  * @vpd:                vendor/product id/sn struct
1559  *
1560  * Return value:
1561  *      none
1562  **/
1563 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1564                                 struct ipr_vpd *vpd)
1565 {
1566         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1567         int i = 0;
1568
1569         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1570         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1571
1572         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1573         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1574
1575         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1576         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1577
1578         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1579 }
1580
1581 /**
1582  * ipr_log_vpd - Log the passed VPD to the error log.
1583  * @vpd:                vendor/product id/sn struct
1584  *
1585  * Return value:
1586  *      none
1587  **/
1588 static void ipr_log_vpd(struct ipr_vpd *vpd)
1589 {
1590         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591                     + IPR_SERIAL_NUM_LEN];
1592
1593         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1595                IPR_PROD_ID_LEN);
1596         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597         ipr_err("Vendor/Product ID: %s\n", buffer);
1598
1599         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1600         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601         ipr_err("    Serial Number: %s\n", buffer);
1602 }
1603
1604 /**
1605  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606  * @prefix:             string to print at start of printk
1607  * @hostrcb:    hostrcb pointer
1608  * @vpd:                vendor/product id/sn/wwn struct
1609  *
1610  * Return value:
1611  *      none
1612  **/
1613 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614                                     struct ipr_ext_vpd *vpd)
1615 {
1616         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619 }
1620
1621 /**
1622  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623  * @vpd:                vendor/product id/sn/wwn struct
1624  *
1625  * Return value:
1626  *      none
1627  **/
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629 {
1630         ipr_log_vpd(&vpd->vpd);
1631         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632                 be32_to_cpu(vpd->wwid[1]));
1633 }
1634
1635 /**
1636  * ipr_log_enhanced_cache_error - Log a cache error.
1637  * @ioa_cfg:    ioa config struct
1638  * @hostrcb:    hostrcb struct
1639  *
1640  * Return value:
1641  *      none
1642  **/
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644                                          struct ipr_hostrcb *hostrcb)
1645 {
1646         struct ipr_hostrcb_type_12_error *error;
1647
1648         if (ioa_cfg->sis64)
1649                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650         else
1651                 error = &hostrcb->hcam.u.error.u.type_12_error;
1652
1653         ipr_err("-----Current Configuration-----\n");
1654         ipr_err("Cache Directory Card Information:\n");
1655         ipr_log_ext_vpd(&error->ioa_vpd);
1656         ipr_err("Adapter Card Information:\n");
1657         ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659         ipr_err("-----Expected Configuration-----\n");
1660         ipr_err("Cache Directory Card Information:\n");
1661         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662         ipr_err("Adapter Card Information:\n");
1663         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666                      be32_to_cpu(error->ioa_data[0]),
1667                      be32_to_cpu(error->ioa_data[1]),
1668                      be32_to_cpu(error->ioa_data[2]));
1669 }
1670
1671 /**
1672  * ipr_log_cache_error - Log a cache error.
1673  * @ioa_cfg:    ioa config struct
1674  * @hostrcb:    hostrcb struct
1675  *
1676  * Return value:
1677  *      none
1678  **/
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680                                 struct ipr_hostrcb *hostrcb)
1681 {
1682         struct ipr_hostrcb_type_02_error *error =
1683                 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685         ipr_err("-----Current Configuration-----\n");
1686         ipr_err("Cache Directory Card Information:\n");
1687         ipr_log_vpd(&error->ioa_vpd);
1688         ipr_err("Adapter Card Information:\n");
1689         ipr_log_vpd(&error->cfc_vpd);
1690
1691         ipr_err("-----Expected Configuration-----\n");
1692         ipr_err("Cache Directory Card Information:\n");
1693         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1694         ipr_err("Adapter Card Information:\n");
1695         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1696
1697         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698                      be32_to_cpu(error->ioa_data[0]),
1699                      be32_to_cpu(error->ioa_data[1]),
1700                      be32_to_cpu(error->ioa_data[2]));
1701 }
1702
1703 /**
1704  * ipr_log_enhanced_config_error - Log a configuration error.
1705  * @ioa_cfg:    ioa config struct
1706  * @hostrcb:    hostrcb struct
1707  *
1708  * Return value:
1709  *      none
1710  **/
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712                                           struct ipr_hostrcb *hostrcb)
1713 {
1714         int errors_logged, i;
1715         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716         struct ipr_hostrcb_type_13_error *error;
1717
1718         error = &hostrcb->hcam.u.error.u.type_13_error;
1719         errors_logged = be32_to_cpu(error->errors_logged);
1720
1721         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722                 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724         dev_entry = error->dev;
1725
1726         for (i = 0; i < errors_logged; i++, dev_entry++) {
1727                 ipr_err_separator;
1728
1729                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730                 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732                 ipr_err("-----New Device Information-----\n");
1733                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735                 ipr_err("Cache Directory Card Information:\n");
1736                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738                 ipr_err("Adapter Card Information:\n");
1739                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740         }
1741 }
1742
1743 /**
1744  * ipr_log_sis64_config_error - Log a device error.
1745  * @ioa_cfg:    ioa config struct
1746  * @hostrcb:    hostrcb struct
1747  *
1748  * Return value:
1749  *      none
1750  **/
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752                                        struct ipr_hostrcb *hostrcb)
1753 {
1754         int errors_logged, i;
1755         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756         struct ipr_hostrcb_type_23_error *error;
1757         char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759         error = &hostrcb->hcam.u.error64.u.type_23_error;
1760         errors_logged = be32_to_cpu(error->errors_logged);
1761
1762         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763                 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765         dev_entry = error->dev;
1766
1767         for (i = 0; i < errors_logged; i++, dev_entry++) {
1768                 ipr_err_separator;
1769
1770                 ipr_err("Device %d : %s", i + 1,
1771                         __ipr_format_res_path(dev_entry->res_path,
1772                                               buffer, sizeof(buffer)));
1773                 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775                 ipr_err("-----New Device Information-----\n");
1776                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778                 ipr_err("Cache Directory Card Information:\n");
1779                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781                 ipr_err("Adapter Card Information:\n");
1782                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783         }
1784 }
1785
1786 /**
1787  * ipr_log_config_error - Log a configuration error.
1788  * @ioa_cfg:    ioa config struct
1789  * @hostrcb:    hostrcb struct
1790  *
1791  * Return value:
1792  *      none
1793  **/
1794 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795                                  struct ipr_hostrcb *hostrcb)
1796 {
1797         int errors_logged, i;
1798         struct ipr_hostrcb_device_data_entry *dev_entry;
1799         struct ipr_hostrcb_type_03_error *error;
1800
1801         error = &hostrcb->hcam.u.error.u.type_03_error;
1802         errors_logged = be32_to_cpu(error->errors_logged);
1803
1804         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805                 be32_to_cpu(error->errors_detected), errors_logged);
1806
1807         dev_entry = error->dev;
1808
1809         for (i = 0; i < errors_logged; i++, dev_entry++) {
1810                 ipr_err_separator;
1811
1812                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1813                 ipr_log_vpd(&dev_entry->vpd);
1814
1815                 ipr_err("-----New Device Information-----\n");
1816                 ipr_log_vpd(&dev_entry->new_vpd);
1817
1818                 ipr_err("Cache Directory Card Information:\n");
1819                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1820
1821                 ipr_err("Adapter Card Information:\n");
1822                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1823
1824                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825                         be32_to_cpu(dev_entry->ioa_data[0]),
1826                         be32_to_cpu(dev_entry->ioa_data[1]),
1827                         be32_to_cpu(dev_entry->ioa_data[2]),
1828                         be32_to_cpu(dev_entry->ioa_data[3]),
1829                         be32_to_cpu(dev_entry->ioa_data[4]));
1830         }
1831 }
1832
1833 /**
1834  * ipr_log_enhanced_array_error - Log an array configuration error.
1835  * @ioa_cfg:    ioa config struct
1836  * @hostrcb:    hostrcb struct
1837  *
1838  * Return value:
1839  *      none
1840  **/
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842                                          struct ipr_hostrcb *hostrcb)
1843 {
1844         int i, num_entries;
1845         struct ipr_hostrcb_type_14_error *error;
1846         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849         error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851         ipr_err_separator;
1852
1853         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854                 error->protection_level,
1855                 ioa_cfg->host->host_no,
1856                 error->last_func_vset_res_addr.bus,
1857                 error->last_func_vset_res_addr.target,
1858                 error->last_func_vset_res_addr.lun);
1859
1860         ipr_err_separator;
1861
1862         array_entry = error->array_member;
1863         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1864                             ARRAY_SIZE(error->array_member));
1865
1866         for (i = 0; i < num_entries; i++, array_entry++) {
1867                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868                         continue;
1869
1870                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871                         ipr_err("Exposed Array Member %d:\n", i);
1872                 else
1873                         ipr_err("Array Member %d:\n", i);
1874
1875                 ipr_log_ext_vpd(&array_entry->vpd);
1876                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878                                  "Expected Location");
1879
1880                 ipr_err_separator;
1881         }
1882 }
1883
1884 /**
1885  * ipr_log_array_error - Log an array configuration error.
1886  * @ioa_cfg:    ioa config struct
1887  * @hostrcb:    hostrcb struct
1888  *
1889  * Return value:
1890  *      none
1891  **/
1892 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893                                 struct ipr_hostrcb *hostrcb)
1894 {
1895         int i;
1896         struct ipr_hostrcb_type_04_error *error;
1897         struct ipr_hostrcb_array_data_entry *array_entry;
1898         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900         error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902         ipr_err_separator;
1903
1904         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905                 error->protection_level,
1906                 ioa_cfg->host->host_no,
1907                 error->last_func_vset_res_addr.bus,
1908                 error->last_func_vset_res_addr.target,
1909                 error->last_func_vset_res_addr.lun);
1910
1911         ipr_err_separator;
1912
1913         array_entry = error->array_member;
1914
1915         for (i = 0; i < 18; i++) {
1916                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1917                         continue;
1918
1919                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1920                         ipr_err("Exposed Array Member %d:\n", i);
1921                 else
1922                         ipr_err("Array Member %d:\n", i);
1923
1924                 ipr_log_vpd(&array_entry->vpd);
1925
1926                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928                                  "Expected Location");
1929
1930                 ipr_err_separator;
1931
1932                 if (i == 9)
1933                         array_entry = error->array_member2;
1934                 else
1935                         array_entry++;
1936         }
1937 }
1938
1939 /**
1940  * ipr_log_hex_data - Log additional hex IOA error data.
1941  * @ioa_cfg:    ioa config struct
1942  * @data:               IOA error data
1943  * @len:                data length
1944  *
1945  * Return value:
1946  *      none
1947  **/
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1949 {
1950         int i;
1951
1952         if (len == 0)
1953                 return;
1954
1955         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
1958         for (i = 0; i < len / 4; i += 4) {
1959                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960                         be32_to_cpu(data[i]),
1961                         be32_to_cpu(data[i+1]),
1962                         be32_to_cpu(data[i+2]),
1963                         be32_to_cpu(data[i+3]));
1964         }
1965 }
1966
1967 /**
1968  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969  * @ioa_cfg:    ioa config struct
1970  * @hostrcb:    hostrcb struct
1971  *
1972  * Return value:
1973  *      none
1974  **/
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976                                             struct ipr_hostrcb *hostrcb)
1977 {
1978         struct ipr_hostrcb_type_17_error *error;
1979
1980         if (ioa_cfg->sis64)
1981                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982         else
1983                 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
1985         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1986         strim(error->failure_reason);
1987
1988         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1990         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1991         ipr_log_hex_data(ioa_cfg, error->data,
1992                          be32_to_cpu(hostrcb->hcam.length) -
1993                          (offsetof(struct ipr_hostrcb_error, u) +
1994                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1995 }
1996
1997 /**
1998  * ipr_log_dual_ioa_error - Log a dual adapter error.
1999  * @ioa_cfg:    ioa config struct
2000  * @hostrcb:    hostrcb struct
2001  *
2002  * Return value:
2003  *      none
2004  **/
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006                                    struct ipr_hostrcb *hostrcb)
2007 {
2008         struct ipr_hostrcb_type_07_error *error;
2009
2010         error = &hostrcb->hcam.u.error.u.type_07_error;
2011         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2012         strim(error->failure_reason);
2013
2014         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2016         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2017         ipr_log_hex_data(ioa_cfg, error->data,
2018                          be32_to_cpu(hostrcb->hcam.length) -
2019                          (offsetof(struct ipr_hostrcb_error, u) +
2020                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2021 }
2022
2023 static const struct {
2024         u8 active;
2025         char *desc;
2026 } path_active_desc[] = {
2027         { IPR_PATH_NO_INFO, "Path" },
2028         { IPR_PATH_ACTIVE, "Active path" },
2029         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030 };
2031
2032 static const struct {
2033         u8 state;
2034         char *desc;
2035 } path_state_desc[] = {
2036         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037         { IPR_PATH_HEALTHY, "is healthy" },
2038         { IPR_PATH_DEGRADED, "is degraded" },
2039         { IPR_PATH_FAILED, "is failed" }
2040 };
2041
2042 /**
2043  * ipr_log_fabric_path - Log a fabric path error
2044  * @hostrcb:    hostrcb struct
2045  * @fabric:             fabric descriptor
2046  *
2047  * Return value:
2048  *      none
2049  **/
2050 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051                                 struct ipr_hostrcb_fabric_desc *fabric)
2052 {
2053         int i, j;
2054         u8 path_state = fabric->path_state;
2055         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056         u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059                 if (path_active_desc[i].active != active)
2060                         continue;
2061
2062                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063                         if (path_state_desc[j].state != state)
2064                                 continue;
2065
2066                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068                                              path_active_desc[i].desc, path_state_desc[j].desc,
2069                                              fabric->ioa_port);
2070                         } else if (fabric->cascaded_expander == 0xff) {
2071                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072                                              path_active_desc[i].desc, path_state_desc[j].desc,
2073                                              fabric->ioa_port, fabric->phy);
2074                         } else if (fabric->phy == 0xff) {
2075                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076                                              path_active_desc[i].desc, path_state_desc[j].desc,
2077                                              fabric->ioa_port, fabric->cascaded_expander);
2078                         } else {
2079                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080                                              path_active_desc[i].desc, path_state_desc[j].desc,
2081                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082                         }
2083                         return;
2084                 }
2085         }
2086
2087         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089 }
2090
2091 /**
2092  * ipr_log64_fabric_path - Log a fabric path error
2093  * @hostrcb:    hostrcb struct
2094  * @fabric:             fabric descriptor
2095  *
2096  * Return value:
2097  *      none
2098  **/
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100                                   struct ipr_hostrcb64_fabric_desc *fabric)
2101 {
2102         int i, j;
2103         u8 path_state = fabric->path_state;
2104         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105         u8 state = path_state & IPR_PATH_STATE_MASK;
2106         char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109                 if (path_active_desc[i].active != active)
2110                         continue;
2111
2112                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113                         if (path_state_desc[j].state != state)
2114                                 continue;
2115
2116                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117                                      path_active_desc[i].desc, path_state_desc[j].desc,
2118                                      ipr_format_res_path(hostrcb->ioa_cfg,
2119                                                 fabric->res_path,
2120                                                 buffer, sizeof(buffer)));
2121                         return;
2122                 }
2123         }
2124
2125         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2126                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127                                     buffer, sizeof(buffer)));
2128 }
2129
2130 static const struct {
2131         u8 type;
2132         char *desc;
2133 } path_type_desc[] = {
2134         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138 };
2139
2140 static const struct {
2141         u8 status;
2142         char *desc;
2143 } path_status_desc[] = {
2144         { IPR_PATH_CFG_NO_PROB, "Functional" },
2145         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146         { IPR_PATH_CFG_FAILED, "Failed" },
2147         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148         { IPR_PATH_NOT_DETECTED, "Missing" },
2149         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150 };
2151
2152 static const char *link_rate[] = {
2153         "unknown",
2154         "disabled",
2155         "phy reset problem",
2156         "spinup hold",
2157         "port selector",
2158         "unknown",
2159         "unknown",
2160         "unknown",
2161         "1.5Gbps",
2162         "3.0Gbps",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown",
2168         "unknown"
2169 };
2170
2171 /**
2172  * ipr_log_path_elem - Log a fabric path element.
2173  * @hostrcb:    hostrcb struct
2174  * @cfg:                fabric path element struct
2175  *
2176  * Return value:
2177  *      none
2178  **/
2179 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180                               struct ipr_hostrcb_config_element *cfg)
2181 {
2182         int i, j;
2183         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186         if (type == IPR_PATH_CFG_NOT_EXIST)
2187                 return;
2188
2189         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190                 if (path_type_desc[i].type != type)
2191                         continue;
2192
2193                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194                         if (path_status_desc[j].status != status)
2195                                 continue;
2196
2197                         if (type == IPR_PATH_CFG_IOA_PORT) {
2198                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199                                              path_status_desc[j].desc, path_type_desc[i].desc,
2200                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202                         } else {
2203                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2206                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208                                 } else if (cfg->cascaded_expander == 0xff) {
2209                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2211                                                      path_type_desc[i].desc, cfg->phy,
2212                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214                                 } else if (cfg->phy == 0xff) {
2215                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2217                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2218                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220                                 } else {
2221                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2223                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226                                 }
2227                         }
2228                         return;
2229                 }
2230         }
2231
2232         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236 }
2237
2238 /**
2239  * ipr_log64_path_elem - Log a fabric path element.
2240  * @hostrcb:    hostrcb struct
2241  * @cfg:                fabric path element struct
2242  *
2243  * Return value:
2244  *      none
2245  **/
2246 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247                                 struct ipr_hostrcb64_config_element *cfg)
2248 {
2249         int i, j;
2250         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253         char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256                 return;
2257
2258         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259                 if (path_type_desc[i].type != type)
2260                         continue;
2261
2262                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263                         if (path_status_desc[j].status != status)
2264                                 continue;
2265
2266                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267                                      path_status_desc[j].desc, path_type_desc[i].desc,
2268                                      ipr_format_res_path(hostrcb->ioa_cfg,
2269                                         cfg->res_path, buffer, sizeof(buffer)),
2270                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271                                         be32_to_cpu(cfg->wwid[0]),
2272                                         be32_to_cpu(cfg->wwid[1]));
2273                         return;
2274                 }
2275         }
2276         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277                      "WWN=%08X%08X\n", cfg->type_status,
2278                      ipr_format_res_path(hostrcb->ioa_cfg,
2279                         cfg->res_path, buffer, sizeof(buffer)),
2280                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2282 }
2283
2284 /**
2285  * ipr_log_fabric_error - Log a fabric error.
2286  * @ioa_cfg:    ioa config struct
2287  * @hostrcb:    hostrcb struct
2288  *
2289  * Return value:
2290  *      none
2291  **/
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293                                  struct ipr_hostrcb *hostrcb)
2294 {
2295         struct ipr_hostrcb_type_20_error *error;
2296         struct ipr_hostrcb_fabric_desc *fabric;
2297         struct ipr_hostrcb_config_element *cfg;
2298         int i, add_len;
2299
2300         error = &hostrcb->hcam.u.error.u.type_20_error;
2301         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304         add_len = be32_to_cpu(hostrcb->hcam.length) -
2305                 (offsetof(struct ipr_hostrcb_error, u) +
2306                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309                 ipr_log_fabric_path(hostrcb, fabric);
2310                 for_each_fabric_cfg(fabric, cfg)
2311                         ipr_log_path_elem(hostrcb, cfg);
2312
2313                 add_len -= be16_to_cpu(fabric->length);
2314                 fabric = (struct ipr_hostrcb_fabric_desc *)
2315                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316         }
2317
2318         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2319 }
2320
2321 /**
2322  * ipr_log_sis64_array_error - Log a sis64 array error.
2323  * @ioa_cfg:    ioa config struct
2324  * @hostrcb:    hostrcb struct
2325  *
2326  * Return value:
2327  *      none
2328  **/
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330                                       struct ipr_hostrcb *hostrcb)
2331 {
2332         int i, num_entries;
2333         struct ipr_hostrcb_type_24_error *error;
2334         struct ipr_hostrcb64_array_data_entry *array_entry;
2335         char buffer[IPR_MAX_RES_PATH_LENGTH];
2336         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338         error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340         ipr_err_separator;
2341
2342         ipr_err("RAID %s Array Configuration: %s\n",
2343                 error->protection_level,
2344                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345                         buffer, sizeof(buffer)));
2346
2347         ipr_err_separator;
2348
2349         array_entry = error->array_member;
2350         num_entries = min_t(u32, error->num_entries,
2351                             ARRAY_SIZE(error->array_member));
2352
2353         for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356                         continue;
2357
2358                 if (error->exposed_mode_adn == i)
2359                         ipr_err("Exposed Array Member %d:\n", i);
2360                 else
2361                         ipr_err("Array Member %d:\n", i);
2362
2363                 ipr_err("Array Member %d:\n", i);
2364                 ipr_log_ext_vpd(&array_entry->vpd);
2365                 ipr_err("Current Location: %s\n",
2366                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367                                 buffer, sizeof(buffer)));
2368                 ipr_err("Expected Location: %s\n",
2369                          ipr_format_res_path(ioa_cfg,
2370                                 array_entry->expected_res_path,
2371                                 buffer, sizeof(buffer)));
2372
2373                 ipr_err_separator;
2374         }
2375 }
2376
2377 /**
2378  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379  * @ioa_cfg:    ioa config struct
2380  * @hostrcb:    hostrcb struct
2381  *
2382  * Return value:
2383  *      none
2384  **/
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386                                        struct ipr_hostrcb *hostrcb)
2387 {
2388         struct ipr_hostrcb_type_30_error *error;
2389         struct ipr_hostrcb64_fabric_desc *fabric;
2390         struct ipr_hostrcb64_config_element *cfg;
2391         int i, add_len;
2392
2393         error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398         add_len = be32_to_cpu(hostrcb->hcam.length) -
2399                 (offsetof(struct ipr_hostrcb64_error, u) +
2400                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403                 ipr_log64_fabric_path(hostrcb, fabric);
2404                 for_each_fabric_cfg(fabric, cfg)
2405                         ipr_log64_path_elem(hostrcb, cfg);
2406
2407                 add_len -= be16_to_cpu(fabric->length);
2408                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410         }
2411
2412         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2413 }
2414
2415 /**
2416  * ipr_log_generic_error - Log an adapter error.
2417  * @ioa_cfg:    ioa config struct
2418  * @hostrcb:    hostrcb struct
2419  *
2420  * Return value:
2421  *      none
2422  **/
2423 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424                                   struct ipr_hostrcb *hostrcb)
2425 {
2426         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2427                          be32_to_cpu(hostrcb->hcam.length));
2428 }
2429
2430 /**
2431  * ipr_log_sis64_device_error - Log a cache error.
2432  * @ioa_cfg:    ioa config struct
2433  * @hostrcb:    hostrcb struct
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439                                          struct ipr_hostrcb *hostrcb)
2440 {
2441         struct ipr_hostrcb_type_21_error *error;
2442         char buffer[IPR_MAX_RES_PATH_LENGTH];
2443
2444         error = &hostrcb->hcam.u.error64.u.type_21_error;
2445
2446         ipr_err("-----Failing Device Information-----\n");
2447         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450         ipr_err("Device Resource Path: %s\n",
2451                 __ipr_format_res_path(error->res_path,
2452                                       buffer, sizeof(buffer)));
2453         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2457         ipr_err("SCSI Sense Data:\n");
2458         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459         ipr_err("SCSI Command Descriptor Block: \n");
2460         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2461
2462         ipr_err("Additional IOA Data:\n");
2463         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2464 }
2465
2466 /**
2467  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468  * @ioasc:      IOASC
2469  *
2470  * This function will return the index of into the ipr_error_table
2471  * for the specified IOASC. If the IOASC is not in the table,
2472  * 0 will be returned, which points to the entry used for unknown errors.
2473  *
2474  * Return value:
2475  *      index into the ipr_error_table
2476  **/
2477 static u32 ipr_get_error(u32 ioasc)
2478 {
2479         int i;
2480
2481         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2482                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2483                         return i;
2484
2485         return 0;
2486 }
2487
2488 /**
2489  * ipr_handle_log_data - Log an adapter error.
2490  * @ioa_cfg:    ioa config struct
2491  * @hostrcb:    hostrcb struct
2492  *
2493  * This function logs an adapter error to the system.
2494  *
2495  * Return value:
2496  *      none
2497  **/
2498 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499                                 struct ipr_hostrcb *hostrcb)
2500 {
2501         u32 ioasc;
2502         int error_index;
2503         struct ipr_hostrcb_type_21_error *error;
2504
2505         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2506                 return;
2507
2508         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2510
2511         if (ioa_cfg->sis64)
2512                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2513         else
2514                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2515
2516         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2518                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519                 scsi_report_bus_reset(ioa_cfg->host,
2520                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2521         }
2522
2523         error_index = ipr_get_error(ioasc);
2524
2525         if (!ipr_error_table[error_index].log_hcam)
2526                 return;
2527
2528         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2531
2532                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2534                                 return;
2535         }
2536
2537         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2538
2539         /* Set indication we have logged an error */
2540         ioa_cfg->errors_logged++;
2541
2542         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2543                 return;
2544         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2546
2547         switch (hostrcb->hcam.overlay_id) {
2548         case IPR_HOST_RCB_OVERLAY_ID_2:
2549                 ipr_log_cache_error(ioa_cfg, hostrcb);
2550                 break;
2551         case IPR_HOST_RCB_OVERLAY_ID_3:
2552                 ipr_log_config_error(ioa_cfg, hostrcb);
2553                 break;
2554         case IPR_HOST_RCB_OVERLAY_ID_4:
2555         case IPR_HOST_RCB_OVERLAY_ID_6:
2556                 ipr_log_array_error(ioa_cfg, hostrcb);
2557                 break;
2558         case IPR_HOST_RCB_OVERLAY_ID_7:
2559                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2560                 break;
2561         case IPR_HOST_RCB_OVERLAY_ID_12:
2562                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2563                 break;
2564         case IPR_HOST_RCB_OVERLAY_ID_13:
2565                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2566                 break;
2567         case IPR_HOST_RCB_OVERLAY_ID_14:
2568         case IPR_HOST_RCB_OVERLAY_ID_16:
2569                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2570                 break;
2571         case IPR_HOST_RCB_OVERLAY_ID_17:
2572                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2573                 break;
2574         case IPR_HOST_RCB_OVERLAY_ID_20:
2575                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2576                 break;
2577         case IPR_HOST_RCB_OVERLAY_ID_21:
2578                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2579                 break;
2580         case IPR_HOST_RCB_OVERLAY_ID_23:
2581                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2582                 break;
2583         case IPR_HOST_RCB_OVERLAY_ID_24:
2584         case IPR_HOST_RCB_OVERLAY_ID_26:
2585                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2586                 break;
2587         case IPR_HOST_RCB_OVERLAY_ID_30:
2588                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2589                 break;
2590         case IPR_HOST_RCB_OVERLAY_ID_1:
2591         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2592         default:
2593                 ipr_log_generic_error(ioa_cfg, hostrcb);
2594                 break;
2595         }
2596 }
2597
2598 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2599 {
2600         struct ipr_hostrcb *hostrcb;
2601
2602         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603                                         struct ipr_hostrcb, queue);
2604
2605         if (unlikely(!hostrcb)) {
2606                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608                                                 struct ipr_hostrcb, queue);
2609         }
2610
2611         list_del_init(&hostrcb->queue);
2612         return hostrcb;
2613 }
2614
2615 /**
2616  * ipr_process_error - Op done function for an adapter error log.
2617  * @ipr_cmd:    ipr command struct
2618  *
2619  * This function is the op done function for an error log host
2620  * controlled async from the adapter. It will log the error and
2621  * send the HCAM back to the adapter.
2622  *
2623  * Return value:
2624  *      none
2625  **/
2626 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2627 {
2628         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2630         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2631         u32 fd_ioasc;
2632
2633         if (ioa_cfg->sis64)
2634                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2635         else
2636                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2637
2638         list_del_init(&hostrcb->queue);
2639         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2640
2641         if (!ioasc) {
2642                 ipr_handle_log_data(ioa_cfg, hostrcb);
2643                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2645         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2647                 dev_err(&ioa_cfg->pdev->dev,
2648                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2649         }
2650
2651         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2652         schedule_work(&ioa_cfg->work_q);
2653         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2654
2655         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2656 }
2657
2658 /**
2659  * ipr_timeout -  An internally generated op has timed out.
2660  * @ipr_cmd:    ipr command struct
2661  *
2662  * This function blocks host requests and initiates an
2663  * adapter reset.
2664  *
2665  * Return value:
2666  *      none
2667  **/
2668 static void ipr_timeout(struct timer_list *t)
2669 {
2670         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2671         unsigned long lock_flags = 0;
2672         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2673
2674         ENTER;
2675         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2676
2677         ioa_cfg->errors_logged++;
2678         dev_err(&ioa_cfg->pdev->dev,
2679                 "Adapter being reset due to command timeout.\n");
2680
2681         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2682                 ioa_cfg->sdt_state = GET_DUMP;
2683
2684         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2685                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2686
2687         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688         LEAVE;
2689 }
2690
2691 /**
2692  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2693  * @ipr_cmd:    ipr command struct
2694  *
2695  * This function blocks host requests and initiates an
2696  * adapter reset.
2697  *
2698  * Return value:
2699  *      none
2700  **/
2701 static void ipr_oper_timeout(struct timer_list *t)
2702 {
2703         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2704         unsigned long lock_flags = 0;
2705         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2706
2707         ENTER;
2708         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709
2710         ioa_cfg->errors_logged++;
2711         dev_err(&ioa_cfg->pdev->dev,
2712                 "Adapter timed out transitioning to operational.\n");
2713
2714         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2715                 ioa_cfg->sdt_state = GET_DUMP;
2716
2717         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2718                 if (ipr_fastfail)
2719                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2720                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2721         }
2722
2723         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724         LEAVE;
2725 }
2726
2727 /**
2728  * ipr_find_ses_entry - Find matching SES in SES table
2729  * @res:        resource entry struct of SES
2730  *
2731  * Return value:
2732  *      pointer to SES table entry / NULL on failure
2733  **/
2734 static const struct ipr_ses_table_entry *
2735 ipr_find_ses_entry(struct ipr_resource_entry *res)
2736 {
2737         int i, j, matches;
2738         struct ipr_std_inq_vpids *vpids;
2739         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2740
2741         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2742                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2743                         if (ste->compare_product_id_byte[j] == 'X') {
2744                                 vpids = &res->std_inq_data.vpids;
2745                                 if (vpids->product_id[j] == ste->product_id[j])
2746                                         matches++;
2747                                 else
2748                                         break;
2749                         } else
2750                                 matches++;
2751                 }
2752
2753                 if (matches == IPR_PROD_ID_LEN)
2754                         return ste;
2755         }
2756
2757         return NULL;
2758 }
2759
2760 /**
2761  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2762  * @ioa_cfg:    ioa config struct
2763  * @bus:                SCSI bus
2764  * @bus_width:  bus width
2765  *
2766  * Return value:
2767  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2768  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2769  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2770  *      max 160MHz = max 320MB/sec).
2771  **/
2772 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2773 {
2774         struct ipr_resource_entry *res;
2775         const struct ipr_ses_table_entry *ste;
2776         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2777
2778         /* Loop through each config table entry in the config table buffer */
2779         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2780                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2781                         continue;
2782
2783                 if (bus != res->bus)
2784                         continue;
2785
2786                 if (!(ste = ipr_find_ses_entry(res)))
2787                         continue;
2788
2789                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2790         }
2791
2792         return max_xfer_rate;
2793 }
2794
2795 /**
2796  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2797  * @ioa_cfg:            ioa config struct
2798  * @max_delay:          max delay in micro-seconds to wait
2799  *
2800  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801  *
2802  * Return value:
2803  *      0 on success / other on failure
2804  **/
2805 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2806 {
2807         volatile u32 pcii_reg;
2808         int delay = 1;
2809
2810         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2811         while (delay < max_delay) {
2812                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2813
2814                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2815                         return 0;
2816
2817                 /* udelay cannot be used if delay is more than a few milliseconds */
2818                 if ((delay / 1000) > MAX_UDELAY_MS)
2819                         mdelay(delay / 1000);
2820                 else
2821                         udelay(delay);
2822
2823                 delay += delay;
2824         }
2825         return -EIO;
2826 }
2827
2828 /**
2829  * ipr_get_sis64_dump_data_section - Dump IOA memory
2830  * @ioa_cfg:                    ioa config struct
2831  * @start_addr:                 adapter address to dump
2832  * @dest:                       destination kernel buffer
2833  * @length_in_words:            length to dump in 4 byte words
2834  *
2835  * Return value:
2836  *      0 on success
2837  **/
2838 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2839                                            u32 start_addr,
2840                                            __be32 *dest, u32 length_in_words)
2841 {
2842         int i;
2843
2844         for (i = 0; i < length_in_words; i++) {
2845                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2846                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2847                 dest++;
2848         }
2849
2850         return 0;
2851 }
2852
2853 /**
2854  * ipr_get_ldump_data_section - Dump IOA memory
2855  * @ioa_cfg:                    ioa config struct
2856  * @start_addr:                 adapter address to dump
2857  * @dest:                               destination kernel buffer
2858  * @length_in_words:    length to dump in 4 byte words
2859  *
2860  * Return value:
2861  *      0 on success / -EIO on failure
2862  **/
2863 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2864                                       u32 start_addr,
2865                                       __be32 *dest, u32 length_in_words)
2866 {
2867         volatile u32 temp_pcii_reg;
2868         int i, delay = 0;
2869
2870         if (ioa_cfg->sis64)
2871                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2872                                                        dest, length_in_words);
2873
2874         /* Write IOA interrupt reg starting LDUMP state  */
2875         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2876                ioa_cfg->regs.set_uproc_interrupt_reg32);
2877
2878         /* Wait for IO debug acknowledge */
2879         if (ipr_wait_iodbg_ack(ioa_cfg,
2880                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2881                 dev_err(&ioa_cfg->pdev->dev,
2882                         "IOA dump long data transfer timeout\n");
2883                 return -EIO;
2884         }
2885
2886         /* Signal LDUMP interlocked - clear IO debug ack */
2887         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2888                ioa_cfg->regs.clr_interrupt_reg);
2889
2890         /* Write Mailbox with starting address */
2891         writel(start_addr, ioa_cfg->ioa_mailbox);
2892
2893         /* Signal address valid - clear IOA Reset alert */
2894         writel(IPR_UPROCI_RESET_ALERT,
2895                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2896
2897         for (i = 0; i < length_in_words; i++) {
2898                 /* Wait for IO debug acknowledge */
2899                 if (ipr_wait_iodbg_ack(ioa_cfg,
2900                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2901                         dev_err(&ioa_cfg->pdev->dev,
2902                                 "IOA dump short data transfer timeout\n");
2903                         return -EIO;
2904                 }
2905
2906                 /* Read data from mailbox and increment destination pointer */
2907                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2908                 dest++;
2909
2910                 /* For all but the last word of data, signal data received */
2911                 if (i < (length_in_words - 1)) {
2912                         /* Signal dump data received - Clear IO debug Ack */
2913                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2914                                ioa_cfg->regs.clr_interrupt_reg);
2915                 }
2916         }
2917
2918         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2919         writel(IPR_UPROCI_RESET_ALERT,
2920                ioa_cfg->regs.set_uproc_interrupt_reg32);
2921
2922         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2923                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2924
2925         /* Signal dump data received - Clear IO debug Ack */
2926         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2927                ioa_cfg->regs.clr_interrupt_reg);
2928
2929         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2930         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2931                 temp_pcii_reg =
2932                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2933
2934                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2935                         return 0;
2936
2937                 udelay(10);
2938                 delay += 10;
2939         }
2940
2941         return 0;
2942 }
2943
2944 #ifdef CONFIG_SCSI_IPR_DUMP
2945 /**
2946  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2947  * @ioa_cfg:            ioa config struct
2948  * @pci_address:        adapter address
2949  * @length:                     length of data to copy
2950  *
2951  * Copy data from PCI adapter to kernel buffer.
2952  * Note: length MUST be a 4 byte multiple
2953  * Return value:
2954  *      0 on success / other on failure
2955  **/
2956 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2957                         unsigned long pci_address, u32 length)
2958 {
2959         int bytes_copied = 0;
2960         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2961         __be32 *page;
2962         unsigned long lock_flags = 0;
2963         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2964
2965         if (ioa_cfg->sis64)
2966                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2967         else
2968                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2969
2970         while (bytes_copied < length &&
2971                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2972                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2973                     ioa_dump->page_offset == 0) {
2974                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2975
2976                         if (!page) {
2977                                 ipr_trace;
2978                                 return bytes_copied;
2979                         }
2980
2981                         ioa_dump->page_offset = 0;
2982                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2983                         ioa_dump->next_page_index++;
2984                 } else
2985                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2986
2987                 rem_len = length - bytes_copied;
2988                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2989                 cur_len = min(rem_len, rem_page_len);
2990
2991                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2992                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2993                         rc = -EIO;
2994                 } else {
2995                         rc = ipr_get_ldump_data_section(ioa_cfg,
2996                                                         pci_address + bytes_copied,
2997                                                         &page[ioa_dump->page_offset / 4],
2998                                                         (cur_len / sizeof(u32)));
2999                 }
3000                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001
3002                 if (!rc) {
3003                         ioa_dump->page_offset += cur_len;
3004                         bytes_copied += cur_len;
3005                 } else {
3006                         ipr_trace;
3007                         break;
3008                 }
3009                 schedule();
3010         }
3011
3012         return bytes_copied;
3013 }
3014
3015 /**
3016  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3017  * @hdr:        dump entry header struct
3018  *
3019  * Return value:
3020  *      nothing
3021  **/
3022 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3023 {
3024         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3025         hdr->num_elems = 1;
3026         hdr->offset = sizeof(*hdr);
3027         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3028 }
3029
3030 /**
3031  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3032  * @ioa_cfg:    ioa config struct
3033  * @driver_dump:        driver dump struct
3034  *
3035  * Return value:
3036  *      nothing
3037  **/
3038 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3039                                    struct ipr_driver_dump *driver_dump)
3040 {
3041         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3042
3043         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3044         driver_dump->ioa_type_entry.hdr.len =
3045                 sizeof(struct ipr_dump_ioa_type_entry) -
3046                 sizeof(struct ipr_dump_entry_header);
3047         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3048         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3049         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3050         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3051                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3052                 ucode_vpd->minor_release[1];
3053         driver_dump->hdr.num_entries++;
3054 }
3055
3056 /**
3057  * ipr_dump_version_data - Fill in the driver version in the dump.
3058  * @ioa_cfg:    ioa config struct
3059  * @driver_dump:        driver dump struct
3060  *
3061  * Return value:
3062  *      nothing
3063  **/
3064 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3065                                   struct ipr_driver_dump *driver_dump)
3066 {
3067         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3068         driver_dump->version_entry.hdr.len =
3069                 sizeof(struct ipr_dump_version_entry) -
3070                 sizeof(struct ipr_dump_entry_header);
3071         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3072         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3073         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3074         driver_dump->hdr.num_entries++;
3075 }
3076
3077 /**
3078  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3079  * @ioa_cfg:    ioa config struct
3080  * @driver_dump:        driver dump struct
3081  *
3082  * Return value:
3083  *      nothing
3084  **/
3085 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3086                                    struct ipr_driver_dump *driver_dump)
3087 {
3088         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3089         driver_dump->trace_entry.hdr.len =
3090                 sizeof(struct ipr_dump_trace_entry) -
3091                 sizeof(struct ipr_dump_entry_header);
3092         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3094         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3095         driver_dump->hdr.num_entries++;
3096 }
3097
3098 /**
3099  * ipr_dump_location_data - Fill in the IOA location in the dump.
3100  * @ioa_cfg:    ioa config struct
3101  * @driver_dump:        driver dump struct
3102  *
3103  * Return value:
3104  *      nothing
3105  **/
3106 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3107                                    struct ipr_driver_dump *driver_dump)
3108 {
3109         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3110         driver_dump->location_entry.hdr.len =
3111                 sizeof(struct ipr_dump_location_entry) -
3112                 sizeof(struct ipr_dump_entry_header);
3113         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3114         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3115         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3116         driver_dump->hdr.num_entries++;
3117 }
3118
3119 /**
3120  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3121  * @ioa_cfg:    ioa config struct
3122  * @dump:               dump struct
3123  *
3124  * Return value:
3125  *      nothing
3126  **/
3127 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3128 {
3129         unsigned long start_addr, sdt_word;
3130         unsigned long lock_flags = 0;
3131         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3132         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3133         u32 num_entries, max_num_entries, start_off, end_off;
3134         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3135         struct ipr_sdt *sdt;
3136         int valid = 1;
3137         int i;
3138
3139         ENTER;
3140
3141         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142
3143         if (ioa_cfg->sdt_state != READ_DUMP) {
3144                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3145                 return;
3146         }
3147
3148         if (ioa_cfg->sis64) {
3149                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150                 ssleep(IPR_DUMP_DELAY_SECONDS);
3151                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152         }
3153
3154         start_addr = readl(ioa_cfg->ioa_mailbox);
3155
3156         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3157                 dev_err(&ioa_cfg->pdev->dev,
3158                         "Invalid dump table format: %lx\n", start_addr);
3159                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160                 return;
3161         }
3162
3163         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3164
3165         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3166
3167         /* Initialize the overall dump header */
3168         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3169         driver_dump->hdr.num_entries = 1;
3170         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3171         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3172         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3173         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3174
3175         ipr_dump_version_data(ioa_cfg, driver_dump);
3176         ipr_dump_location_data(ioa_cfg, driver_dump);
3177         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3178         ipr_dump_trace_data(ioa_cfg, driver_dump);
3179
3180         /* Update dump_header */
3181         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3182
3183         /* IOA Dump entry */
3184         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3185         ioa_dump->hdr.len = 0;
3186         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3187         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3188
3189         /* First entries in sdt are actually a list of dump addresses and
3190          lengths to gather the real dump data.  sdt represents the pointer
3191          to the ioa generated dump table.  Dump data will be extracted based
3192          on entries in this table */
3193         sdt = &ioa_dump->sdt;
3194
3195         if (ioa_cfg->sis64) {
3196                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3197                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3198         } else {
3199                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3200                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3201         }
3202
3203         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3204                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3205         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3206                                         bytes_to_copy / sizeof(__be32));
3207
3208         /* Smart Dump table is ready to use and the first entry is valid */
3209         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3210             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3211                 dev_err(&ioa_cfg->pdev->dev,
3212                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3213                         rc, be32_to_cpu(sdt->hdr.state));
3214                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3215                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3216                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217                 return;
3218         }
3219
3220         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3221
3222         if (num_entries > max_num_entries)
3223                 num_entries = max_num_entries;
3224
3225         /* Update dump length to the actual data to be copied */
3226         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3227         if (ioa_cfg->sis64)
3228                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3229         else
3230                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3231
3232         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233
3234         for (i = 0; i < num_entries; i++) {
3235                 if (ioa_dump->hdr.len > max_dump_size) {
3236                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3237                         break;
3238                 }
3239
3240                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3241                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3242                         if (ioa_cfg->sis64)
3243                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3244                         else {
3245                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3246                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3247
3248                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3249                                         bytes_to_copy = end_off - start_off;
3250                                 else
3251                                         valid = 0;
3252                         }
3253                         if (valid) {
3254                                 if (bytes_to_copy > max_dump_size) {
3255                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3256                                         continue;
3257                                 }
3258
3259                                 /* Copy data from adapter to driver buffers */
3260                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3261                                                             bytes_to_copy);
3262
3263                                 ioa_dump->hdr.len += bytes_copied;
3264
3265                                 if (bytes_copied != bytes_to_copy) {
3266                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3267                                         break;
3268                                 }
3269                         }
3270                 }
3271         }
3272
3273         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3274
3275         /* Update dump_header */
3276         driver_dump->hdr.len += ioa_dump->hdr.len;
3277         wmb();
3278         ioa_cfg->sdt_state = DUMP_OBTAINED;
3279         LEAVE;
3280 }
3281
3282 #else
3283 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3284 #endif
3285
3286 /**
3287  * ipr_release_dump - Free adapter dump memory
3288  * @kref:       kref struct
3289  *
3290  * Return value:
3291  *      nothing
3292  **/
3293 static void ipr_release_dump(struct kref *kref)
3294 {
3295         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3296         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3297         unsigned long lock_flags = 0;
3298         int i;
3299
3300         ENTER;
3301         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3302         ioa_cfg->dump = NULL;
3303         ioa_cfg->sdt_state = INACTIVE;
3304         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305
3306         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3307                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3308
3309         vfree(dump->ioa_dump.ioa_data);
3310         kfree(dump);
3311         LEAVE;
3312 }
3313
3314 /**
3315  * ipr_worker_thread - Worker thread
3316  * @work:               ioa config struct
3317  *
3318  * Called at task level from a work thread. This function takes care
3319  * of adding and removing device from the mid-layer as configuration
3320  * changes are detected by the adapter.
3321  *
3322  * Return value:
3323  *      nothing
3324  **/
3325 static void ipr_worker_thread(struct work_struct *work)
3326 {
3327         unsigned long lock_flags;
3328         struct ipr_resource_entry *res;
3329         struct scsi_device *sdev;
3330         struct ipr_dump *dump;
3331         struct ipr_ioa_cfg *ioa_cfg =
3332                 container_of(work, struct ipr_ioa_cfg, work_q);
3333         u8 bus, target, lun;
3334         int did_work;
3335
3336         ENTER;
3337         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3338
3339         if (ioa_cfg->sdt_state == READ_DUMP) {
3340                 dump = ioa_cfg->dump;
3341                 if (!dump) {
3342                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343                         return;
3344                 }
3345                 kref_get(&dump->kref);
3346                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347                 ipr_get_ioa_dump(ioa_cfg, dump);
3348                 kref_put(&dump->kref, ipr_release_dump);
3349
3350                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3351                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3352                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3353                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354                 return;
3355         }
3356
3357         if (ioa_cfg->scsi_unblock) {
3358                 ioa_cfg->scsi_unblock = 0;
3359                 ioa_cfg->scsi_blocked = 0;
3360                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361                 scsi_unblock_requests(ioa_cfg->host);
3362                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363                 if (ioa_cfg->scsi_blocked)
3364                         scsi_block_requests(ioa_cfg->host);
3365         }
3366
3367         if (!ioa_cfg->scan_enabled) {
3368                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369                 return;
3370         }
3371
3372 restart:
3373         do {
3374                 did_work = 0;
3375                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3376                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377                         return;
3378                 }
3379
3380                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3381                         if (res->del_from_ml && res->sdev) {
3382                                 did_work = 1;
3383                                 sdev = res->sdev;
3384                                 if (!scsi_device_get(sdev)) {
3385                                         if (!res->add_to_ml)
3386                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3387                                         else
3388                                                 res->del_from_ml = 0;
3389                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390                                         scsi_remove_device(sdev);
3391                                         scsi_device_put(sdev);
3392                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393                                 }
3394                                 break;
3395                         }
3396                 }
3397         } while (did_work);
3398
3399         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3400                 if (res->add_to_ml) {
3401                         bus = res->bus;
3402                         target = res->target;
3403                         lun = res->lun;
3404                         res->add_to_ml = 0;
3405                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3407                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408                         goto restart;
3409                 }
3410         }
3411
3412         ioa_cfg->scan_done = 1;
3413         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3415         LEAVE;
3416 }
3417
3418 #ifdef CONFIG_SCSI_IPR_TRACE
3419 /**
3420  * ipr_read_trace - Dump the adapter trace
3421  * @filp:               open sysfs file
3422  * @kobj:               kobject struct
3423  * @bin_attr:           bin_attribute struct
3424  * @buf:                buffer
3425  * @off:                offset
3426  * @count:              buffer size
3427  *
3428  * Return value:
3429  *      number of bytes printed to buffer
3430  **/
3431 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3432                               struct bin_attribute *bin_attr,
3433                               char *buf, loff_t off, size_t count)
3434 {
3435         struct device *dev = container_of(kobj, struct device, kobj);
3436         struct Scsi_Host *shost = class_to_shost(dev);
3437         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3438         unsigned long lock_flags = 0;
3439         ssize_t ret;
3440
3441         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3443                                 IPR_TRACE_SIZE);
3444         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445
3446         return ret;
3447 }
3448
3449 static struct bin_attribute ipr_trace_attr = {
3450         .attr = {
3451                 .name = "trace",
3452                 .mode = S_IRUGO,
3453         },
3454         .size = 0,
3455         .read = ipr_read_trace,
3456 };
3457 #endif
3458
3459 /**
3460  * ipr_show_fw_version - Show the firmware version
3461  * @dev:        class device struct
3462  * @buf:        buffer
3463  *
3464  * Return value:
3465  *      number of bytes printed to buffer
3466  **/
3467 static ssize_t ipr_show_fw_version(struct device *dev,
3468                                    struct device_attribute *attr, char *buf)
3469 {
3470         struct Scsi_Host *shost = class_to_shost(dev);
3471         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3473         unsigned long lock_flags = 0;
3474         int len;
3475
3476         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3477         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3478                        ucode_vpd->major_release, ucode_vpd->card_type,
3479                        ucode_vpd->minor_release[0],
3480                        ucode_vpd->minor_release[1]);
3481         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482         return len;
3483 }
3484
3485 static struct device_attribute ipr_fw_version_attr = {
3486         .attr = {
3487                 .name =         "fw_version",
3488                 .mode =         S_IRUGO,
3489         },
3490         .show = ipr_show_fw_version,
3491 };
3492
3493 /**
3494  * ipr_show_log_level - Show the adapter's error logging level
3495  * @dev:        class device struct
3496  * @buf:        buffer
3497  *
3498  * Return value:
3499  *      number of bytes printed to buffer
3500  **/
3501 static ssize_t ipr_show_log_level(struct device *dev,
3502                                    struct device_attribute *attr, char *buf)
3503 {
3504         struct Scsi_Host *shost = class_to_shost(dev);
3505         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3506         unsigned long lock_flags = 0;
3507         int len;
3508
3509         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3511         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512         return len;
3513 }
3514
3515 /**
3516  * ipr_store_log_level - Change the adapter's error logging level
3517  * @dev:        class device struct
3518  * @buf:        buffer
3519  *
3520  * Return value:
3521  *      number of bytes printed to buffer
3522  **/
3523 static ssize_t ipr_store_log_level(struct device *dev,
3524                                    struct device_attribute *attr,
3525                                    const char *buf, size_t count)
3526 {
3527         struct Scsi_Host *shost = class_to_shost(dev);
3528         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3529         unsigned long lock_flags = 0;
3530
3531         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3532         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3533         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534         return strlen(buf);
3535 }
3536
3537 static struct device_attribute ipr_log_level_attr = {
3538         .attr = {
3539                 .name =         "log_level",
3540                 .mode =         S_IRUGO | S_IWUSR,
3541         },
3542         .show = ipr_show_log_level,
3543         .store = ipr_store_log_level
3544 };
3545
3546 /**
3547  * ipr_store_diagnostics - IOA Diagnostics interface
3548  * @dev:        device struct
3549  * @buf:        buffer
3550  * @count:      buffer size
3551  *
3552  * This function will reset the adapter and wait a reasonable
3553  * amount of time for any errors that the adapter might log.
3554  *
3555  * Return value:
3556  *      count on success / other on failure
3557  **/
3558 static ssize_t ipr_store_diagnostics(struct device *dev,
3559                                      struct device_attribute *attr,
3560                                      const char *buf, size_t count)
3561 {
3562         struct Scsi_Host *shost = class_to_shost(dev);
3563         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3564         unsigned long lock_flags = 0;
3565         int rc = count;
3566
3567         if (!capable(CAP_SYS_ADMIN))
3568                 return -EACCES;
3569
3570         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571         while (ioa_cfg->in_reset_reload) {
3572                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3574                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3575         }
3576
3577         ioa_cfg->errors_logged = 0;
3578         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3579
3580         if (ioa_cfg->in_reset_reload) {
3581                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3583
3584                 /* Wait for a second for any errors to be logged */
3585                 msleep(1000);
3586         } else {
3587                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3588                 return -EIO;
3589         }
3590
3591         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3592         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3593                 rc = -EIO;
3594         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595
3596         return rc;
3597 }
3598
3599 static struct device_attribute ipr_diagnostics_attr = {
3600         .attr = {
3601                 .name =         "run_diagnostics",
3602                 .mode =         S_IWUSR,
3603         },
3604         .store = ipr_store_diagnostics
3605 };
3606
3607 /**
3608  * ipr_show_adapter_state - Show the adapter's state
3609  * @class_dev:  device struct
3610  * @buf:        buffer
3611  *
3612  * Return value:
3613  *      number of bytes printed to buffer
3614  **/
3615 static ssize_t ipr_show_adapter_state(struct device *dev,
3616                                       struct device_attribute *attr, char *buf)
3617 {
3618         struct Scsi_Host *shost = class_to_shost(dev);
3619         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3620         unsigned long lock_flags = 0;
3621         int len;
3622
3623         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3624         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3625                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3626         else
3627                 len = snprintf(buf, PAGE_SIZE, "online\n");
3628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3629         return len;
3630 }
3631
3632 /**
3633  * ipr_store_adapter_state - Change adapter state
3634  * @dev:        device struct
3635  * @buf:        buffer
3636  * @count:      buffer size
3637  *
3638  * This function will change the adapter's state.
3639  *
3640  * Return value:
3641  *      count on success / other on failure
3642  **/
3643 static ssize_t ipr_store_adapter_state(struct device *dev,
3644                                        struct device_attribute *attr,
3645                                        const char *buf, size_t count)
3646 {
3647         struct Scsi_Host *shost = class_to_shost(dev);
3648         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3649         unsigned long lock_flags;
3650         int result = count, i;
3651
3652         if (!capable(CAP_SYS_ADMIN))
3653                 return -EACCES;
3654
3655         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3656         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3657             !strncmp(buf, "online", 6)) {
3658                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3659                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3660                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3661                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3662                 }
3663                 wmb();
3664                 ioa_cfg->reset_retries = 0;
3665                 ioa_cfg->in_ioa_bringdown = 0;
3666                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3667         }
3668         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3670
3671         return result;
3672 }
3673
3674 static struct device_attribute ipr_ioa_state_attr = {
3675         .attr = {
3676                 .name =         "online_state",
3677                 .mode =         S_IRUGO | S_IWUSR,
3678         },
3679         .show = ipr_show_adapter_state,
3680         .store = ipr_store_adapter_state
3681 };
3682
3683 /**
3684  * ipr_store_reset_adapter - Reset the adapter
3685  * @dev:        device struct
3686  * @buf:        buffer
3687  * @count:      buffer size
3688  *
3689  * This function will reset the adapter.
3690  *
3691  * Return value:
3692  *      count on success / other on failure
3693  **/
3694 static ssize_t ipr_store_reset_adapter(struct device *dev,
3695                                        struct device_attribute *attr,
3696                                        const char *buf, size_t count)
3697 {
3698         struct Scsi_Host *shost = class_to_shost(dev);
3699         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700         unsigned long lock_flags;
3701         int result = count;
3702
3703         if (!capable(CAP_SYS_ADMIN))
3704                 return -EACCES;
3705
3706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3707         if (!ioa_cfg->in_reset_reload)
3708                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3709         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3710         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3711
3712         return result;
3713 }
3714
3715 static struct device_attribute ipr_ioa_reset_attr = {
3716         .attr = {
3717                 .name =         "reset_host",
3718                 .mode =         S_IWUSR,
3719         },
3720         .store = ipr_store_reset_adapter
3721 };
3722
3723 static int ipr_iopoll(struct irq_poll *iop, int budget);
3724  /**
3725  * ipr_show_iopoll_weight - Show ipr polling mode
3726  * @dev:        class device struct
3727  * @buf:        buffer
3728  *
3729  * Return value:
3730  *      number of bytes printed to buffer
3731  **/
3732 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3733                                    struct device_attribute *attr, char *buf)
3734 {
3735         struct Scsi_Host *shost = class_to_shost(dev);
3736         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3737         unsigned long lock_flags = 0;
3738         int len;
3739
3740         spin_lock_irqsave(shost->host_lock, lock_flags);
3741         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3742         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744         return len;
3745 }
3746
3747 /**
3748  * ipr_store_iopoll_weight - Change the adapter's polling mode
3749  * @dev:        class device struct
3750  * @buf:        buffer
3751  *
3752  * Return value:
3753  *      number of bytes printed to buffer
3754  **/
3755 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3756                                         struct device_attribute *attr,
3757                                         const char *buf, size_t count)
3758 {
3759         struct Scsi_Host *shost = class_to_shost(dev);
3760         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3761         unsigned long user_iopoll_weight;
3762         unsigned long lock_flags = 0;
3763         int i;
3764
3765         if (!ioa_cfg->sis64) {
3766                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3767                 return -EINVAL;
3768         }
3769         if (kstrtoul(buf, 10, &user_iopoll_weight))
3770                 return -EINVAL;
3771
3772         if (user_iopoll_weight > 256) {
3773                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3774                 return -EINVAL;
3775         }
3776
3777         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3778                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3779                 return strlen(buf);
3780         }
3781
3782         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3783                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3784                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3785         }
3786
3787         spin_lock_irqsave(shost->host_lock, lock_flags);
3788         ioa_cfg->iopoll_weight = user_iopoll_weight;
3789         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3790                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3791                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3792                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3793                 }
3794         }
3795         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3796
3797         return strlen(buf);
3798 }
3799
3800 static struct device_attribute ipr_iopoll_weight_attr = {
3801         .attr = {
3802                 .name =         "iopoll_weight",
3803                 .mode =         S_IRUGO | S_IWUSR,
3804         },
3805         .show = ipr_show_iopoll_weight,
3806         .store = ipr_store_iopoll_weight
3807 };
3808
3809 /**
3810  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3811  * @buf_len:            buffer length
3812  *
3813  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3814  * list to use for microcode download
3815  *
3816  * Return value:
3817  *      pointer to sglist / NULL on failure
3818  **/
3819 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3820 {
3821         int sg_size, order;
3822         struct ipr_sglist *sglist;
3823
3824         /* Get the minimum size per scatter/gather element */
3825         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3826
3827         /* Get the actual size per element */
3828         order = get_order(sg_size);
3829
3830         /* Allocate a scatter/gather list for the DMA */
3831         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3832         if (sglist == NULL) {
3833                 ipr_trace;
3834                 return NULL;
3835         }
3836         sglist->order = order;
3837         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3838                                               &sglist->num_sg);
3839         if (!sglist->scatterlist) {
3840                 kfree(sglist);
3841                 return NULL;
3842         }
3843
3844         return sglist;
3845 }
3846
3847 /**
3848  * ipr_free_ucode_buffer - Frees a microcode download buffer
3849  * @p_dnld:             scatter/gather list pointer
3850  *
3851  * Free a DMA'able ucode download buffer previously allocated with
3852  * ipr_alloc_ucode_buffer
3853  *
3854  * Return value:
3855  *      nothing
3856  **/
3857 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3858 {
3859         sgl_free_order(sglist->scatterlist, sglist->order);
3860         kfree(sglist);
3861 }
3862
3863 /**
3864  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3865  * @sglist:             scatter/gather list pointer
3866  * @buffer:             buffer pointer
3867  * @len:                buffer length
3868  *
3869  * Copy a microcode image from a user buffer into a buffer allocated by
3870  * ipr_alloc_ucode_buffer
3871  *
3872  * Return value:
3873  *      0 on success / other on failure
3874  **/
3875 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3876                                  u8 *buffer, u32 len)
3877 {
3878         int bsize_elem, i, result = 0;
3879         struct scatterlist *scatterlist;
3880         void *kaddr;
3881
3882         /* Determine the actual number of bytes per element */
3883         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3884
3885         scatterlist = sglist->scatterlist;
3886
3887         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3888                 struct page *page = sg_page(&scatterlist[i]);
3889
3890                 kaddr = kmap(page);
3891                 memcpy(kaddr, buffer, bsize_elem);
3892                 kunmap(page);
3893
3894                 scatterlist[i].length = bsize_elem;
3895
3896                 if (result != 0) {
3897                         ipr_trace;
3898                         return result;
3899                 }
3900         }
3901
3902         if (len % bsize_elem) {
3903                 struct page *page = sg_page(&scatterlist[i]);
3904
3905                 kaddr = kmap(page);
3906                 memcpy(kaddr, buffer, len % bsize_elem);
3907                 kunmap(page);
3908
3909                 scatterlist[i].length = len % bsize_elem;
3910         }
3911
3912         sglist->buffer_len = len;
3913         return result;
3914 }
3915
3916 /**
3917  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3918  * @ipr_cmd:            ipr command struct
3919  * @sglist:             scatter/gather list
3920  *
3921  * Builds a microcode download IOA data list (IOADL).
3922  *
3923  **/
3924 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3925                                     struct ipr_sglist *sglist)
3926 {
3927         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3928         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3929         struct scatterlist *scatterlist = sglist->scatterlist;
3930         int i;
3931
3932         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3933         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3934         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3935
3936         ioarcb->ioadl_len =
3937                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3938         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3939                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3940                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3941                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3942         }
3943
3944         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3945 }
3946
3947 /**
3948  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3949  * @ipr_cmd:    ipr command struct
3950  * @sglist:             scatter/gather list
3951  *
3952  * Builds a microcode download IOA data list (IOADL).
3953  *
3954  **/
3955 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3956                                   struct ipr_sglist *sglist)
3957 {
3958         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3959         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3960         struct scatterlist *scatterlist = sglist->scatterlist;
3961         int i;
3962
3963         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3964         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3965         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3966
3967         ioarcb->ioadl_len =
3968                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3969
3970         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3971                 ioadl[i].flags_and_data_len =
3972                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3973                 ioadl[i].address =
3974                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3975         }
3976
3977         ioadl[i-1].flags_and_data_len |=
3978                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3979 }
3980
3981 /**
3982  * ipr_update_ioa_ucode - Update IOA's microcode
3983  * @ioa_cfg:    ioa config struct
3984  * @sglist:             scatter/gather list
3985  *
3986  * Initiate an adapter reset to update the IOA's microcode
3987  *
3988  * Return value:
3989  *      0 on success / -EIO on failure
3990  **/
3991 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3992                                 struct ipr_sglist *sglist)
3993 {
3994         unsigned long lock_flags;
3995
3996         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3997         while (ioa_cfg->in_reset_reload) {
3998                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3999                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4000                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001         }
4002
4003         if (ioa_cfg->ucode_sglist) {
4004                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4005                 dev_err(&ioa_cfg->pdev->dev,
4006                         "Microcode download already in progress\n");
4007                 return -EIO;
4008         }
4009
4010         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4011                                         sglist->scatterlist, sglist->num_sg,
4012                                         DMA_TO_DEVICE);
4013
4014         if (!sglist->num_dma_sg) {
4015                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016                 dev_err(&ioa_cfg->pdev->dev,
4017                         "Failed to map microcode download buffer!\n");
4018                 return -EIO;
4019         }
4020
4021         ioa_cfg->ucode_sglist = sglist;
4022         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4023         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4024         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4025
4026         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027         ioa_cfg->ucode_sglist = NULL;
4028         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4029         return 0;
4030 }
4031
4032 /**
4033  * ipr_store_update_fw - Update the firmware on the adapter
4034  * @class_dev:  device struct
4035  * @buf:        buffer
4036  * @count:      buffer size
4037  *
4038  * This function will update the firmware on the adapter.
4039  *
4040  * Return value:
4041  *      count on success / other on failure
4042  **/
4043 static ssize_t ipr_store_update_fw(struct device *dev,
4044                                    struct device_attribute *attr,
4045                                    const char *buf, size_t count)
4046 {
4047         struct Scsi_Host *shost = class_to_shost(dev);
4048         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4049         struct ipr_ucode_image_header *image_hdr;
4050         const struct firmware *fw_entry;
4051         struct ipr_sglist *sglist;
4052         char fname[100];
4053         char *src;
4054         char *endline;
4055         int result, dnld_size;
4056
4057         if (!capable(CAP_SYS_ADMIN))
4058                 return -EACCES;
4059
4060         snprintf(fname, sizeof(fname), "%s", buf);
4061
4062         endline = strchr(fname, '\n');
4063         if (endline)
4064                 *endline = '\0';
4065
4066         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4067                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4068                 return -EIO;
4069         }
4070
4071         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4072
4073         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4074         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4075         sglist = ipr_alloc_ucode_buffer(dnld_size);
4076
4077         if (!sglist) {
4078                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4079                 release_firmware(fw_entry);
4080                 return -ENOMEM;
4081         }
4082
4083         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4084
4085         if (result) {
4086                 dev_err(&ioa_cfg->pdev->dev,
4087                         "Microcode buffer copy to DMA buffer failed\n");
4088                 goto out;
4089         }
4090
4091         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4092
4093         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4094
4095         if (!result)
4096                 result = count;
4097 out:
4098         ipr_free_ucode_buffer(sglist);
4099         release_firmware(fw_entry);
4100         return result;
4101 }
4102
4103 static struct device_attribute ipr_update_fw_attr = {
4104         .attr = {
4105                 .name =         "update_fw",
4106                 .mode =         S_IWUSR,
4107         },
4108         .store = ipr_store_update_fw
4109 };
4110
4111 /**
4112  * ipr_show_fw_type - Show the adapter's firmware type.
4113  * @dev:        class device struct
4114  * @buf:        buffer
4115  *
4116  * Return value:
4117  *      number of bytes printed to buffer
4118  **/
4119 static ssize_t ipr_show_fw_type(struct device *dev,
4120                                 struct device_attribute *attr, char *buf)
4121 {
4122         struct Scsi_Host *shost = class_to_shost(dev);
4123         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4124         unsigned long lock_flags = 0;
4125         int len;
4126
4127         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4128         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4129         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4130         return len;
4131 }
4132
4133 static struct device_attribute ipr_ioa_fw_type_attr = {
4134         .attr = {
4135                 .name =         "fw_type",
4136                 .mode =         S_IRUGO,
4137         },
4138         .show = ipr_show_fw_type
4139 };
4140
4141 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4142                                 struct bin_attribute *bin_attr, char *buf,
4143                                 loff_t off, size_t count)
4144 {
4145         struct device *cdev = container_of(kobj, struct device, kobj);
4146         struct Scsi_Host *shost = class_to_shost(cdev);
4147         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4148         struct ipr_hostrcb *hostrcb;
4149         unsigned long lock_flags = 0;
4150         int ret;
4151
4152         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4153         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4154                                         struct ipr_hostrcb, queue);
4155         if (!hostrcb) {
4156                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4157                 return 0;
4158         }
4159         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4160                                 sizeof(hostrcb->hcam));
4161         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4162         return ret;
4163 }
4164
4165 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4166                                 struct bin_attribute *bin_attr, char *buf,
4167                                 loff_t off, size_t count)
4168 {
4169         struct device *cdev = container_of(kobj, struct device, kobj);
4170         struct Scsi_Host *shost = class_to_shost(cdev);
4171         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4172         struct ipr_hostrcb *hostrcb;
4173         unsigned long lock_flags = 0;
4174
4175         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4176         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4177                                         struct ipr_hostrcb, queue);
4178         if (!hostrcb) {
4179                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4180                 return count;
4181         }
4182
4183         /* Reclaim hostrcb before exit */
4184         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4185         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186         return count;
4187 }
4188
4189 static struct bin_attribute ipr_ioa_async_err_log = {
4190         .attr = {
4191                 .name =         "async_err_log",
4192                 .mode =         S_IRUGO | S_IWUSR,
4193         },
4194         .size = 0,
4195         .read = ipr_read_async_err_log,
4196         .write = ipr_next_async_err_log
4197 };
4198
4199 static struct device_attribute *ipr_ioa_attrs[] = {
4200         &ipr_fw_version_attr,
4201         &ipr_log_level_attr,
4202         &ipr_diagnostics_attr,
4203         &ipr_ioa_state_attr,
4204         &ipr_ioa_reset_attr,
4205         &ipr_update_fw_attr,
4206         &ipr_ioa_fw_type_attr,
4207         &ipr_iopoll_weight_attr,
4208         NULL,
4209 };
4210
4211 #ifdef CONFIG_SCSI_IPR_DUMP
4212 /**
4213  * ipr_read_dump - Dump the adapter
4214  * @filp:               open sysfs file
4215  * @kobj:               kobject struct
4216  * @bin_attr:           bin_attribute struct
4217  * @buf:                buffer
4218  * @off:                offset
4219  * @count:              buffer size
4220  *
4221  * Return value:
4222  *      number of bytes printed to buffer
4223  **/
4224 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4225                              struct bin_attribute *bin_attr,
4226                              char *buf, loff_t off, size_t count)
4227 {
4228         struct device *cdev = container_of(kobj, struct device, kobj);
4229         struct Scsi_Host *shost = class_to_shost(cdev);
4230         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4231         struct ipr_dump *dump;
4232         unsigned long lock_flags = 0;
4233         char *src;
4234         int len, sdt_end;
4235         size_t rc = count;
4236
4237         if (!capable(CAP_SYS_ADMIN))
4238                 return -EACCES;
4239
4240         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4241         dump = ioa_cfg->dump;
4242
4243         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4244                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4245                 return 0;
4246         }
4247         kref_get(&dump->kref);
4248         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249
4250         if (off > dump->driver_dump.hdr.len) {
4251                 kref_put(&dump->kref, ipr_release_dump);
4252                 return 0;
4253         }
4254
4255         if (off + count > dump->driver_dump.hdr.len) {
4256                 count = dump->driver_dump.hdr.len - off;
4257                 rc = count;
4258         }
4259
4260         if (count && off < sizeof(dump->driver_dump)) {
4261                 if (off + count > sizeof(dump->driver_dump))
4262                         len = sizeof(dump->driver_dump) - off;
4263                 else
4264                         len = count;
4265                 src = (u8 *)&dump->driver_dump + off;
4266                 memcpy(buf, src, len);
4267                 buf += len;
4268                 off += len;
4269                 count -= len;
4270         }
4271
4272         off -= sizeof(dump->driver_dump);
4273
4274         if (ioa_cfg->sis64)
4275                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4276                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4277                            sizeof(struct ipr_sdt_entry));
4278         else
4279                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4280                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4281
4282         if (count && off < sdt_end) {
4283                 if (off + count > sdt_end)
4284                         len = sdt_end - off;
4285                 else
4286                         len = count;
4287                 src = (u8 *)&dump->ioa_dump + off;
4288                 memcpy(buf, src, len);
4289                 buf += len;
4290                 off += len;
4291                 count -= len;
4292         }
4293
4294         off -= sdt_end;
4295
4296         while (count) {
4297                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4298                         len = PAGE_ALIGN(off) - off;
4299                 else
4300                         len = count;
4301                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4302                 src += off & ~PAGE_MASK;
4303                 memcpy(buf, src, len);
4304                 buf += len;
4305                 off += len;
4306                 count -= len;
4307         }
4308
4309         kref_put(&dump->kref, ipr_release_dump);
4310         return rc;
4311 }
4312
4313 /**
4314  * ipr_alloc_dump - Prepare for adapter dump
4315  * @ioa_cfg:    ioa config struct
4316  *
4317  * Return value:
4318  *      0 on success / other on failure
4319  **/
4320 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4321 {
4322         struct ipr_dump *dump;
4323         __be32 **ioa_data;
4324         unsigned long lock_flags = 0;
4325
4326         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4327
4328         if (!dump) {
4329                 ipr_err("Dump memory allocation failed\n");
4330                 return -ENOMEM;
4331         }
4332
4333         if (ioa_cfg->sis64)
4334                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4335         else
4336                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4337
4338         if (!ioa_data) {
4339                 ipr_err("Dump memory allocation failed\n");
4340                 kfree(dump);
4341                 return -ENOMEM;
4342         }
4343
4344         dump->ioa_dump.ioa_data = ioa_data;
4345
4346         kref_init(&dump->kref);
4347         dump->ioa_cfg = ioa_cfg;
4348
4349         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4350
4351         if (INACTIVE != ioa_cfg->sdt_state) {
4352                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4353                 vfree(dump->ioa_dump.ioa_data);
4354                 kfree(dump);
4355                 return 0;
4356         }
4357
4358         ioa_cfg->dump = dump;
4359         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4360         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4361                 ioa_cfg->dump_taken = 1;
4362                 schedule_work(&ioa_cfg->work_q);
4363         }
4364         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4365
4366         return 0;
4367 }
4368
4369 /**
4370  * ipr_free_dump - Free adapter dump memory
4371  * @ioa_cfg:    ioa config struct
4372  *
4373  * Return value:
4374  *      0 on success / other on failure
4375  **/
4376 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4377 {
4378         struct ipr_dump *dump;
4379         unsigned long lock_flags = 0;
4380
4381         ENTER;
4382
4383         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4384         dump = ioa_cfg->dump;
4385         if (!dump) {
4386                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4387                 return 0;
4388         }
4389
4390         ioa_cfg->dump = NULL;
4391         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4392
4393         kref_put(&dump->kref, ipr_release_dump);
4394
4395         LEAVE;
4396         return 0;
4397 }
4398
4399 /**
4400  * ipr_write_dump - Setup dump state of adapter
4401  * @filp:               open sysfs file
4402  * @kobj:               kobject struct
4403  * @bin_attr:           bin_attribute struct
4404  * @buf:                buffer
4405  * @off:                offset
4406  * @count:              buffer size
4407  *
4408  * Return value:
4409  *      number of bytes printed to buffer
4410  **/
4411 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4412                               struct bin_attribute *bin_attr,
4413                               char *buf, loff_t off, size_t count)
4414 {
4415         struct device *cdev = container_of(kobj, struct device, kobj);
4416         struct Scsi_Host *shost = class_to_shost(cdev);
4417         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4418         int rc;
4419
4420         if (!capable(CAP_SYS_ADMIN))
4421                 return -EACCES;
4422
4423         if (buf[0] == '1')
4424                 rc = ipr_alloc_dump(ioa_cfg);
4425         else if (buf[0] == '0')
4426                 rc = ipr_free_dump(ioa_cfg);
4427         else
4428                 return -EINVAL;
4429
4430         if (rc)
4431                 return rc;
4432         else
4433                 return count;
4434 }
4435
4436 static struct bin_attribute ipr_dump_attr = {
4437         .attr = {
4438                 .name = "dump",
4439                 .mode = S_IRUSR | S_IWUSR,
4440         },
4441         .size = 0,
4442         .read = ipr_read_dump,
4443         .write = ipr_write_dump
4444 };
4445 #else
4446 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4447 #endif
4448
4449 /**
4450  * ipr_change_queue_depth - Change the device's queue depth
4451  * @sdev:       scsi device struct
4452  * @qdepth:     depth to set
4453  * @reason:     calling context
4454  *
4455  * Return value:
4456  *      actual depth set
4457  **/
4458 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4459 {
4460         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4461         struct ipr_resource_entry *res;
4462         unsigned long lock_flags = 0;
4463
4464         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4465         res = (struct ipr_resource_entry *)sdev->hostdata;
4466
4467         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4468                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4469         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4470
4471         scsi_change_queue_depth(sdev, qdepth);
4472         return sdev->queue_depth;
4473 }
4474
4475 /**
4476  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4477  * @dev:        device struct
4478  * @attr:       device attribute structure
4479  * @buf:        buffer
4480  *
4481  * Return value:
4482  *      number of bytes printed to buffer
4483  **/
4484 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4485 {
4486         struct scsi_device *sdev = to_scsi_device(dev);
4487         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4488         struct ipr_resource_entry *res;
4489         unsigned long lock_flags = 0;
4490         ssize_t len = -ENXIO;
4491
4492         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4493         res = (struct ipr_resource_entry *)sdev->hostdata;
4494         if (res)
4495                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4496         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4497         return len;
4498 }
4499
4500 static struct device_attribute ipr_adapter_handle_attr = {
4501         .attr = {
4502                 .name =         "adapter_handle",
4503                 .mode =         S_IRUSR,
4504         },
4505         .show = ipr_show_adapter_handle
4506 };
4507
4508 /**
4509  * ipr_show_resource_path - Show the resource path or the resource address for
4510  *                          this device.
4511  * @dev:        device struct
4512  * @attr:       device attribute structure
4513  * @buf:        buffer
4514  *
4515  * Return value:
4516  *      number of bytes printed to buffer
4517  **/
4518 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4519 {
4520         struct scsi_device *sdev = to_scsi_device(dev);
4521         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4522         struct ipr_resource_entry *res;
4523         unsigned long lock_flags = 0;
4524         ssize_t len = -ENXIO;
4525         char buffer[IPR_MAX_RES_PATH_LENGTH];
4526
4527         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4528         res = (struct ipr_resource_entry *)sdev->hostdata;
4529         if (res && ioa_cfg->sis64)
4530                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4531                                __ipr_format_res_path(res->res_path, buffer,
4532                                                      sizeof(buffer)));
4533         else if (res)
4534                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4535                                res->bus, res->target, res->lun);
4536
4537         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4538         return len;
4539 }
4540
4541 static struct device_attribute ipr_resource_path_attr = {
4542         .attr = {
4543                 .name =         "resource_path",
4544                 .mode =         S_IRUGO,
4545         },
4546         .show = ipr_show_resource_path
4547 };
4548
4549 /**
4550  * ipr_show_device_id - Show the device_id for this device.
4551  * @dev:        device struct
4552  * @attr:       device attribute structure
4553  * @buf:        buffer
4554  *
4555  * Return value:
4556  *      number of bytes printed to buffer
4557  **/
4558 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4559 {
4560         struct scsi_device *sdev = to_scsi_device(dev);
4561         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4562         struct ipr_resource_entry *res;
4563         unsigned long lock_flags = 0;
4564         ssize_t len = -ENXIO;
4565
4566         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4567         res = (struct ipr_resource_entry *)sdev->hostdata;
4568         if (res && ioa_cfg->sis64)
4569                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4570         else if (res)
4571                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4572
4573         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4574         return len;
4575 }
4576
4577 static struct device_attribute ipr_device_id_attr = {
4578         .attr = {
4579                 .name =         "device_id",
4580                 .mode =         S_IRUGO,
4581         },
4582         .show = ipr_show_device_id
4583 };
4584
4585 /**
4586  * ipr_show_resource_type - Show the resource type for this device.
4587  * @dev:        device struct
4588  * @attr:       device attribute structure
4589  * @buf:        buffer
4590  *
4591  * Return value:
4592  *      number of bytes printed to buffer
4593  **/
4594 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4595 {
4596         struct scsi_device *sdev = to_scsi_device(dev);
4597         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4598         struct ipr_resource_entry *res;
4599         unsigned long lock_flags = 0;
4600         ssize_t len = -ENXIO;
4601
4602         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4603         res = (struct ipr_resource_entry *)sdev->hostdata;
4604
4605         if (res)
4606                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4607
4608         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4609         return len;
4610 }
4611
4612 static struct device_attribute ipr_resource_type_attr = {
4613         .attr = {
4614                 .name =         "resource_type",
4615                 .mode =         S_IRUGO,
4616         },
4617         .show = ipr_show_resource_type
4618 };
4619
4620 /**
4621  * ipr_show_raw_mode - Show the adapter's raw mode
4622  * @dev:        class device struct
4623  * @buf:        buffer
4624  *
4625  * Return value:
4626  *      number of bytes printed to buffer
4627  **/
4628 static ssize_t ipr_show_raw_mode(struct device *dev,
4629                                  struct device_attribute *attr, char *buf)
4630 {
4631         struct scsi_device *sdev = to_scsi_device(dev);
4632         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4633         struct ipr_resource_entry *res;
4634         unsigned long lock_flags = 0;
4635         ssize_t len;
4636
4637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4638         res = (struct ipr_resource_entry *)sdev->hostdata;
4639         if (res)
4640                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4641         else
4642                 len = -ENXIO;
4643         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4644         return len;
4645 }
4646
4647 /**
4648  * ipr_store_raw_mode - Change the adapter's raw mode
4649  * @dev:        class device struct
4650  * @buf:        buffer
4651  *
4652  * Return value:
4653  *      number of bytes printed to buffer
4654  **/
4655 static ssize_t ipr_store_raw_mode(struct device *dev,
4656                                   struct device_attribute *attr,
4657                                   const char *buf, size_t count)
4658 {
4659         struct scsi_device *sdev = to_scsi_device(dev);
4660         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4661         struct ipr_resource_entry *res;
4662         unsigned long lock_flags = 0;
4663         ssize_t len;
4664
4665         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4666         res = (struct ipr_resource_entry *)sdev->hostdata;
4667         if (res) {
4668                 if (ipr_is_af_dasd_device(res)) {
4669                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4670                         len = strlen(buf);
4671                         if (res->sdev)
4672                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4673                                         res->raw_mode ? "enabled" : "disabled");
4674                 } else
4675                         len = -EINVAL;
4676         } else
4677                 len = -ENXIO;
4678         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4679         return len;
4680 }
4681
4682 static struct device_attribute ipr_raw_mode_attr = {
4683         .attr = {
4684                 .name =         "raw_mode",
4685                 .mode =         S_IRUGO | S_IWUSR,
4686         },
4687         .show = ipr_show_raw_mode,
4688         .store = ipr_store_raw_mode
4689 };
4690
4691 static struct device_attribute *ipr_dev_attrs[] = {
4692         &ipr_adapter_handle_attr,
4693         &ipr_resource_path_attr,
4694         &ipr_device_id_attr,
4695         &ipr_resource_type_attr,
4696         &ipr_raw_mode_attr,
4697         NULL,
4698 };
4699
4700 /**
4701  * ipr_biosparam - Return the HSC mapping
4702  * @sdev:                       scsi device struct
4703  * @block_device:       block device pointer
4704  * @capacity:           capacity of the device
4705  * @parm:                       Array containing returned HSC values.
4706  *
4707  * This function generates the HSC parms that fdisk uses.
4708  * We want to make sure we return something that places partitions
4709  * on 4k boundaries for best performance with the IOA.
4710  *
4711  * Return value:
4712  *      0 on success
4713  **/
4714 static int ipr_biosparam(struct scsi_device *sdev,
4715                          struct block_device *block_device,
4716                          sector_t capacity, int *parm)
4717 {
4718         int heads, sectors;
4719         sector_t cylinders;
4720
4721         heads = 128;
4722         sectors = 32;
4723
4724         cylinders = capacity;
4725         sector_div(cylinders, (128 * 32));
4726
4727         /* return result */
4728         parm[0] = heads;
4729         parm[1] = sectors;
4730         parm[2] = cylinders;
4731
4732         return 0;
4733 }
4734
4735 /**
4736  * ipr_find_starget - Find target based on bus/target.
4737  * @starget:    scsi target struct
4738  *
4739  * Return value:
4740  *      resource entry pointer if found / NULL if not found
4741  **/
4742 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4743 {
4744         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4745         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4746         struct ipr_resource_entry *res;
4747
4748         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4749                 if ((res->bus == starget->channel) &&
4750                     (res->target == starget->id)) {
4751                         return res;
4752                 }
4753         }
4754
4755         return NULL;
4756 }
4757
4758 static struct ata_port_info sata_port_info;
4759
4760 /**
4761  * ipr_target_alloc - Prepare for commands to a SCSI target
4762  * @starget:    scsi target struct
4763  *
4764  * If the device is a SATA device, this function allocates an
4765  * ATA port with libata, else it does nothing.
4766  *
4767  * Return value:
4768  *      0 on success / non-0 on failure
4769  **/
4770 static int ipr_target_alloc(struct scsi_target *starget)
4771 {
4772         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4773         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4774         struct ipr_sata_port *sata_port;
4775         struct ata_port *ap;
4776         struct ipr_resource_entry *res;
4777         unsigned long lock_flags;
4778
4779         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4780         res = ipr_find_starget(starget);
4781         starget->hostdata = NULL;
4782
4783         if (res && ipr_is_gata(res)) {
4784                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4785                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4786                 if (!sata_port)
4787                         return -ENOMEM;
4788
4789                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4790                 if (ap) {
4791                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4792                         sata_port->ioa_cfg = ioa_cfg;
4793                         sata_port->ap = ap;
4794                         sata_port->res = res;
4795
4796                         res->sata_port = sata_port;
4797                         ap->private_data = sata_port;
4798                         starget->hostdata = sata_port;
4799                 } else {
4800                         kfree(sata_port);
4801                         return -ENOMEM;
4802                 }
4803         }
4804         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805
4806         return 0;
4807 }
4808
4809 /**
4810  * ipr_target_destroy - Destroy a SCSI target
4811  * @starget:    scsi target struct
4812  *
4813  * If the device was a SATA device, this function frees the libata
4814  * ATA port, else it does nothing.
4815  *
4816  **/
4817 static void ipr_target_destroy(struct scsi_target *starget)
4818 {
4819         struct ipr_sata_port *sata_port = starget->hostdata;
4820         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4821         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4822
4823         if (ioa_cfg->sis64) {
4824                 if (!ipr_find_starget(starget)) {
4825                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4826                                 clear_bit(starget->id, ioa_cfg->array_ids);
4827                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4828                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4829                         else if (starget->channel == 0)
4830                                 clear_bit(starget->id, ioa_cfg->target_ids);
4831                 }
4832         }
4833
4834         if (sata_port) {
4835                 starget->hostdata = NULL;
4836                 ata_sas_port_destroy(sata_port->ap);
4837                 kfree(sata_port);
4838         }
4839 }
4840
4841 /**
4842  * ipr_find_sdev - Find device based on bus/target/lun.
4843  * @sdev:       scsi device struct
4844  *
4845  * Return value:
4846  *      resource entry pointer if found / NULL if not found
4847  **/
4848 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4849 {
4850         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4851         struct ipr_resource_entry *res;
4852
4853         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4854                 if ((res->bus == sdev->channel) &&
4855                     (res->target == sdev->id) &&
4856                     (res->lun == sdev->lun))
4857                         return res;
4858         }
4859
4860         return NULL;
4861 }
4862
4863 /**
4864  * ipr_slave_destroy - Unconfigure a SCSI device
4865  * @sdev:       scsi device struct
4866  *
4867  * Return value:
4868  *      nothing
4869  **/
4870 static void ipr_slave_destroy(struct scsi_device *sdev)
4871 {
4872         struct ipr_resource_entry *res;
4873         struct ipr_ioa_cfg *ioa_cfg;
4874         unsigned long lock_flags = 0;
4875
4876         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4877
4878         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4879         res = (struct ipr_resource_entry *) sdev->hostdata;
4880         if (res) {
4881                 if (res->sata_port)
4882                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4883                 sdev->hostdata = NULL;
4884                 res->sdev = NULL;
4885                 res->sata_port = NULL;
4886         }
4887         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4888 }
4889
4890 /**
4891  * ipr_slave_configure - Configure a SCSI device
4892  * @sdev:       scsi device struct
4893  *
4894  * This function configures the specified scsi device.
4895  *
4896  * Return value:
4897  *      0 on success
4898  **/
4899 static int ipr_slave_configure(struct scsi_device *sdev)
4900 {
4901         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4902         struct ipr_resource_entry *res;
4903         struct ata_port *ap = NULL;
4904         unsigned long lock_flags = 0;
4905         char buffer[IPR_MAX_RES_PATH_LENGTH];
4906
4907         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4908         res = sdev->hostdata;
4909         if (res) {
4910                 if (ipr_is_af_dasd_device(res))
4911                         sdev->type = TYPE_RAID;
4912                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4913                         sdev->scsi_level = 4;
4914                         sdev->no_uld_attach = 1;
4915                 }
4916                 if (ipr_is_vset_device(res)) {
4917                         sdev->scsi_level = SCSI_SPC_3;
4918                         sdev->no_report_opcodes = 1;
4919                         blk_queue_rq_timeout(sdev->request_queue,
4920                                              IPR_VSET_RW_TIMEOUT);
4921                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4922                 }
4923                 if (ipr_is_gata(res) && res->sata_port)
4924                         ap = res->sata_port->ap;
4925                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4926
4927                 if (ap) {
4928                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4929                         ata_sas_slave_configure(sdev, ap);
4930                 }
4931
4932                 if (ioa_cfg->sis64)
4933                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4934                                     ipr_format_res_path(ioa_cfg,
4935                                 res->res_path, buffer, sizeof(buffer)));
4936                 return 0;
4937         }
4938         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4939         return 0;
4940 }
4941
4942 /**
4943  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4944  * @sdev:       scsi device struct
4945  *
4946  * This function initializes an ATA port so that future commands
4947  * sent through queuecommand will work.
4948  *
4949  * Return value:
4950  *      0 on success
4951  **/
4952 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4953 {
4954         struct ipr_sata_port *sata_port = NULL;
4955         int rc = -ENXIO;
4956
4957         ENTER;
4958         if (sdev->sdev_target)
4959                 sata_port = sdev->sdev_target->hostdata;
4960         if (sata_port) {
4961                 rc = ata_sas_port_init(sata_port->ap);
4962                 if (rc == 0)
4963                         rc = ata_sas_sync_probe(sata_port->ap);
4964         }
4965
4966         if (rc)
4967                 ipr_slave_destroy(sdev);
4968
4969         LEAVE;
4970         return rc;
4971 }
4972
4973 /**
4974  * ipr_slave_alloc - Prepare for commands to a device.
4975  * @sdev:       scsi device struct
4976  *
4977  * This function saves a pointer to the resource entry
4978  * in the scsi device struct if the device exists. We
4979  * can then use this pointer in ipr_queuecommand when
4980  * handling new commands.
4981  *
4982  * Return value:
4983  *      0 on success / -ENXIO if device does not exist
4984  **/
4985 static int ipr_slave_alloc(struct scsi_device *sdev)
4986 {
4987         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4988         struct ipr_resource_entry *res;
4989         unsigned long lock_flags;
4990         int rc = -ENXIO;
4991
4992         sdev->hostdata = NULL;
4993
4994         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4995
4996         res = ipr_find_sdev(sdev);
4997         if (res) {
4998                 res->sdev = sdev;
4999                 res->add_to_ml = 0;
5000                 res->in_erp = 0;
5001                 sdev->hostdata = res;
5002                 if (!ipr_is_naca_model(res))
5003                         res->needs_sync_complete = 1;
5004                 rc = 0;
5005                 if (ipr_is_gata(res)) {
5006                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5007                         return ipr_ata_slave_alloc(sdev);
5008                 }
5009         }
5010
5011         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5012
5013         return rc;
5014 }
5015
5016 /**
5017  * ipr_match_lun - Match function for specified LUN
5018  * @ipr_cmd:    ipr command struct
5019  * @device:             device to match (sdev)
5020  *
5021  * Returns:
5022  *      1 if command matches sdev / 0 if command does not match sdev
5023  **/
5024 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5025 {
5026         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5027                 return 1;
5028         return 0;
5029 }
5030
5031 /**
5032  * ipr_cmnd_is_free - Check if a command is free or not
5033  * @ipr_cmd     ipr command struct
5034  *
5035  * Returns:
5036  *      true / false
5037  **/
5038 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5039 {
5040         struct ipr_cmnd *loop_cmd;
5041
5042         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5043                 if (loop_cmd == ipr_cmd)
5044                         return true;
5045         }
5046
5047         return false;
5048 }
5049
5050 /**
5051  * ipr_match_res - Match function for specified resource entry
5052  * @ipr_cmd:    ipr command struct
5053  * @resource:   resource entry to match
5054  *
5055  * Returns:
5056  *      1 if command matches sdev / 0 if command does not match sdev
5057  **/
5058 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5059 {
5060         struct ipr_resource_entry *res = resource;
5061
5062         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5063                 return 1;
5064         return 0;
5065 }
5066
5067 /**
5068  * ipr_wait_for_ops - Wait for matching commands to complete
5069  * @ipr_cmd:    ipr command struct
5070  * @device:             device to match (sdev)
5071  * @match:              match function to use
5072  *
5073  * Returns:
5074  *      SUCCESS / FAILED
5075  **/
5076 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5077                             int (*match)(struct ipr_cmnd *, void *))
5078 {
5079         struct ipr_cmnd *ipr_cmd;
5080         int wait, i;
5081         unsigned long flags;
5082         struct ipr_hrr_queue *hrrq;
5083         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5084         DECLARE_COMPLETION_ONSTACK(comp);
5085
5086         ENTER;
5087         do {
5088                 wait = 0;
5089
5090                 for_each_hrrq(hrrq, ioa_cfg) {
5091                         spin_lock_irqsave(hrrq->lock, flags);
5092                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5093                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5094                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5095                                         if (match(ipr_cmd, device)) {
5096                                                 ipr_cmd->eh_comp = &comp;
5097                                                 wait++;
5098                                         }
5099                                 }
5100                         }
5101                         spin_unlock_irqrestore(hrrq->lock, flags);
5102                 }
5103
5104                 if (wait) {
5105                         timeout = wait_for_completion_timeout(&comp, timeout);
5106
5107                         if (!timeout) {
5108                                 wait = 0;
5109
5110                                 for_each_hrrq(hrrq, ioa_cfg) {
5111                                         spin_lock_irqsave(hrrq->lock, flags);
5112                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5113                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5114                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5115                                                         if (match(ipr_cmd, device)) {
5116                                                                 ipr_cmd->eh_comp = NULL;
5117                                                                 wait++;
5118                                                         }
5119                                                 }
5120                                         }
5121                                         spin_unlock_irqrestore(hrrq->lock, flags);
5122                                 }
5123
5124                                 if (wait)
5125                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5126                                 LEAVE;
5127                                 return wait ? FAILED : SUCCESS;
5128                         }
5129                 }
5130         } while (wait);
5131
5132         LEAVE;
5133         return SUCCESS;
5134 }
5135
5136 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5137 {
5138         struct ipr_ioa_cfg *ioa_cfg;
5139         unsigned long lock_flags = 0;
5140         int rc = SUCCESS;
5141
5142         ENTER;
5143         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5144         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5145
5146         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5147                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5148                 dev_err(&ioa_cfg->pdev->dev,
5149                         "Adapter being reset as a result of error recovery.\n");
5150
5151                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5152                         ioa_cfg->sdt_state = GET_DUMP;
5153         }
5154
5155         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5156         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5157         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5158
5159         /* If we got hit with a host reset while we were already resetting
5160          the adapter for some reason, and the reset failed. */
5161         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5162                 ipr_trace;
5163                 rc = FAILED;
5164         }
5165
5166         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5167         LEAVE;
5168         return rc;
5169 }
5170
5171 /**
5172  * ipr_device_reset - Reset the device
5173  * @ioa_cfg:    ioa config struct
5174  * @res:                resource entry struct
5175  *
5176  * This function issues a device reset to the affected device.
5177  * If the device is a SCSI device, a LUN reset will be sent
5178  * to the device first. If that does not work, a target reset
5179  * will be sent. If the device is a SATA device, a PHY reset will
5180  * be sent.
5181  *
5182  * Return value:
5183  *      0 on success / non-zero on failure
5184  **/
5185 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5186                             struct ipr_resource_entry *res)
5187 {
5188         struct ipr_cmnd *ipr_cmd;
5189         struct ipr_ioarcb *ioarcb;
5190         struct ipr_cmd_pkt *cmd_pkt;
5191         struct ipr_ioarcb_ata_regs *regs;
5192         u32 ioasc;
5193
5194         ENTER;
5195         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5196         ioarcb = &ipr_cmd->ioarcb;
5197         cmd_pkt = &ioarcb->cmd_pkt;
5198
5199         if (ipr_cmd->ioa_cfg->sis64) {
5200                 regs = &ipr_cmd->i.ata_ioadl.regs;
5201                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5202         } else
5203                 regs = &ioarcb->u.add_data.u.regs;
5204
5205         ioarcb->res_handle = res->res_handle;
5206         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5207         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5208         if (ipr_is_gata(res)) {
5209                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5210                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5211                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5212         }
5213
5214         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5215         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5216         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5217         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5218                 if (ipr_cmd->ioa_cfg->sis64)
5219                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5220                                sizeof(struct ipr_ioasa_gata));
5221                 else
5222                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5223                                sizeof(struct ipr_ioasa_gata));
5224         }
5225
5226         LEAVE;
5227         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5228 }
5229
5230 /**
5231  * ipr_sata_reset - Reset the SATA port
5232  * @link:       SATA link to reset
5233  * @classes:    class of the attached device
5234  *
5235  * This function issues a SATA phy reset to the affected ATA link.
5236  *
5237  * Return value:
5238  *      0 on success / non-zero on failure
5239  **/
5240 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5241                                 unsigned long deadline)
5242 {
5243         struct ipr_sata_port *sata_port = link->ap->private_data;
5244         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5245         struct ipr_resource_entry *res;
5246         unsigned long lock_flags = 0;
5247         int rc = -ENXIO, ret;
5248
5249         ENTER;
5250         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5251         while (ioa_cfg->in_reset_reload) {
5252                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5253                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5254                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5255         }
5256
5257         res = sata_port->res;
5258         if (res) {
5259                 rc = ipr_device_reset(ioa_cfg, res);
5260                 *classes = res->ata_class;
5261                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5262
5263                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5264                 if (ret != SUCCESS) {
5265                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5266                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5267                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5268
5269                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5270                 }
5271         } else
5272                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5273
5274         LEAVE;
5275         return rc;
5276 }
5277
5278 /**
5279  * ipr_eh_dev_reset - Reset the device
5280  * @scsi_cmd:   scsi command struct
5281  *
5282  * This function issues a device reset to the affected device.
5283  * A LUN reset will be sent to the device first. If that does
5284  * not work, a target reset will be sent.
5285  *
5286  * Return value:
5287  *      SUCCESS / FAILED
5288  **/
5289 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5290 {
5291         struct ipr_cmnd *ipr_cmd;
5292         struct ipr_ioa_cfg *ioa_cfg;
5293         struct ipr_resource_entry *res;
5294         struct ata_port *ap;
5295         int rc = 0, i;
5296         struct ipr_hrr_queue *hrrq;
5297
5298         ENTER;
5299         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5300         res = scsi_cmd->device->hostdata;
5301
5302         /*
5303          * If we are currently going through reset/reload, return failed. This will force the
5304          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5305          * reset to complete
5306          */
5307         if (ioa_cfg->in_reset_reload)
5308                 return FAILED;
5309         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5310                 return FAILED;
5311
5312         for_each_hrrq(hrrq, ioa_cfg) {
5313                 spin_lock(&hrrq->_lock);
5314                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5315                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5316
5317                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5318                                 if (!ipr_cmd->qc)
5319                                         continue;
5320                                 if (ipr_cmnd_is_free(ipr_cmd))
5321                                         continue;
5322
5323                                 ipr_cmd->done = ipr_sata_eh_done;
5324                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5325                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5326                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5327                                 }
5328                         }
5329                 }
5330                 spin_unlock(&hrrq->_lock);
5331         }
5332         res->resetting_device = 1;
5333         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5334
5335         if (ipr_is_gata(res) && res->sata_port) {
5336                 ap = res->sata_port->ap;
5337                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5338                 ata_std_error_handler(ap);
5339                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5340         } else
5341                 rc = ipr_device_reset(ioa_cfg, res);
5342         res->resetting_device = 0;
5343         res->reset_occurred = 1;
5344
5345         LEAVE;
5346         return rc ? FAILED : SUCCESS;
5347 }
5348
5349 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5350 {
5351         int rc;
5352         struct ipr_ioa_cfg *ioa_cfg;
5353         struct ipr_resource_entry *res;
5354
5355         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5356         res = cmd->device->hostdata;
5357
5358         if (!res)
5359                 return FAILED;
5360
5361         spin_lock_irq(cmd->device->host->host_lock);
5362         rc = __ipr_eh_dev_reset(cmd);
5363         spin_unlock_irq(cmd->device->host->host_lock);
5364
5365         if (rc == SUCCESS) {
5366                 if (ipr_is_gata(res) && res->sata_port)
5367                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5368                 else
5369                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5370         }
5371
5372         return rc;
5373 }
5374
5375 /**
5376  * ipr_bus_reset_done - Op done function for bus reset.
5377  * @ipr_cmd:    ipr command struct
5378  *
5379  * This function is the op done function for a bus reset
5380  *
5381  * Return value:
5382  *      none
5383  **/
5384 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5385 {
5386         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5387         struct ipr_resource_entry *res;
5388
5389         ENTER;
5390         if (!ioa_cfg->sis64)
5391                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5392                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5393                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5394                                 break;
5395                         }
5396                 }
5397
5398         /*
5399          * If abort has not completed, indicate the reset has, else call the
5400          * abort's done function to wake the sleeping eh thread
5401          */
5402         if (ipr_cmd->sibling->sibling)
5403                 ipr_cmd->sibling->sibling = NULL;
5404         else
5405                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5406
5407         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5408         LEAVE;
5409 }
5410
5411 /**
5412  * ipr_abort_timeout - An abort task has timed out
5413  * @ipr_cmd:    ipr command struct
5414  *
5415  * This function handles when an abort task times out. If this
5416  * happens we issue a bus reset since we have resources tied
5417  * up that must be freed before returning to the midlayer.
5418  *
5419  * Return value:
5420  *      none
5421  **/
5422 static void ipr_abort_timeout(struct timer_list *t)
5423 {
5424         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5425         struct ipr_cmnd *reset_cmd;
5426         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5427         struct ipr_cmd_pkt *cmd_pkt;
5428         unsigned long lock_flags = 0;
5429
5430         ENTER;
5431         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5432         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5433                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5434                 return;
5435         }
5436
5437         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5438         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5439         ipr_cmd->sibling = reset_cmd;
5440         reset_cmd->sibling = ipr_cmd;
5441         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5442         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5443         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5444         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5445         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5446
5447         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5448         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5449         LEAVE;
5450 }
5451
5452 /**
5453  * ipr_cancel_op - Cancel specified op
5454  * @scsi_cmd:   scsi command struct
5455  *
5456  * This function cancels specified op.
5457  *
5458  * Return value:
5459  *      SUCCESS / FAILED
5460  **/
5461 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5462 {
5463         struct ipr_cmnd *ipr_cmd;
5464         struct ipr_ioa_cfg *ioa_cfg;
5465         struct ipr_resource_entry *res;
5466         struct ipr_cmd_pkt *cmd_pkt;
5467         u32 ioasc, int_reg;
5468         int i, op_found = 0;
5469         struct ipr_hrr_queue *hrrq;
5470
5471         ENTER;
5472         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5473         res = scsi_cmd->device->hostdata;
5474
5475         /* If we are currently going through reset/reload, return failed.
5476          * This will force the mid-layer to call ipr_eh_host_reset,
5477          * which will then go to sleep and wait for the reset to complete
5478          */
5479         if (ioa_cfg->in_reset_reload ||
5480             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5481                 return FAILED;
5482         if (!res)
5483                 return FAILED;
5484
5485         /*
5486          * If we are aborting a timed out op, chances are that the timeout was caused
5487          * by a still not detected EEH error. In such cases, reading a register will
5488          * trigger the EEH recovery infrastructure.
5489          */
5490         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5491
5492         if (!ipr_is_gscsi(res))
5493                 return FAILED;
5494
5495         for_each_hrrq(hrrq, ioa_cfg) {
5496                 spin_lock(&hrrq->_lock);
5497                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5498                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5499                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5500                                         op_found = 1;
5501                                         break;
5502                                 }
5503                         }
5504                 }
5505                 spin_unlock(&hrrq->_lock);
5506         }
5507
5508         if (!op_found)
5509                 return SUCCESS;
5510
5511         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5512         ipr_cmd->ioarcb.res_handle = res->res_handle;
5513         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5514         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5515         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5516         ipr_cmd->u.sdev = scsi_cmd->device;
5517
5518         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5519                     scsi_cmd->cmnd[0]);
5520         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5521         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5522
5523         /*
5524          * If the abort task timed out and we sent a bus reset, we will get
5525          * one the following responses to the abort
5526          */
5527         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5528                 ioasc = 0;
5529                 ipr_trace;
5530         }
5531
5532         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5533         if (!ipr_is_naca_model(res))
5534                 res->needs_sync_complete = 1;
5535
5536         LEAVE;
5537         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5538 }
5539
5540 /**
5541  * ipr_eh_abort - Abort a single op
5542  * @scsi_cmd:   scsi command struct
5543  *
5544  * Return value:
5545  *      0 if scan in progress / 1 if scan is complete
5546  **/
5547 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5548 {
5549         unsigned long lock_flags;
5550         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5551         int rc = 0;
5552
5553         spin_lock_irqsave(shost->host_lock, lock_flags);
5554         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5555                 rc = 1;
5556         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5557                 rc = 1;
5558         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5559         return rc;
5560 }
5561
5562 /**
5563  * ipr_eh_host_reset - Reset the host adapter
5564  * @scsi_cmd:   scsi command struct
5565  *
5566  * Return value:
5567  *      SUCCESS / FAILED
5568  **/
5569 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5570 {
5571         unsigned long flags;
5572         int rc;
5573         struct ipr_ioa_cfg *ioa_cfg;
5574
5575         ENTER;
5576
5577         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5578
5579         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5580         rc = ipr_cancel_op(scsi_cmd);
5581         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5582
5583         if (rc == SUCCESS)
5584                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5585         LEAVE;
5586         return rc;
5587 }
5588
5589 /**
5590  * ipr_handle_other_interrupt - Handle "other" interrupts
5591  * @ioa_cfg:    ioa config struct
5592  * @int_reg:    interrupt register
5593  *
5594  * Return value:
5595  *      IRQ_NONE / IRQ_HANDLED
5596  **/
5597 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5598                                               u32 int_reg)
5599 {
5600         irqreturn_t rc = IRQ_HANDLED;
5601         u32 int_mask_reg;
5602
5603         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5604         int_reg &= ~int_mask_reg;
5605
5606         /* If an interrupt on the adapter did not occur, ignore it.
5607          * Or in the case of SIS 64, check for a stage change interrupt.
5608          */
5609         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5610                 if (ioa_cfg->sis64) {
5611                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5612                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5613                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5614
5615                                 /* clear stage change */
5616                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5617                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5618                                 list_del(&ioa_cfg->reset_cmd->queue);
5619                                 del_timer(&ioa_cfg->reset_cmd->timer);
5620                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5621                                 return IRQ_HANDLED;
5622                         }
5623                 }
5624
5625                 return IRQ_NONE;
5626         }
5627
5628         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5629                 /* Mask the interrupt */
5630                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5631                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5632
5633                 list_del(&ioa_cfg->reset_cmd->queue);
5634                 del_timer(&ioa_cfg->reset_cmd->timer);
5635                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5636         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5637                 if (ioa_cfg->clear_isr) {
5638                         if (ipr_debug && printk_ratelimit())
5639                                 dev_err(&ioa_cfg->pdev->dev,
5640                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5641                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5642                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5643                         return IRQ_NONE;
5644                 }
5645         } else {
5646                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5647                         ioa_cfg->ioa_unit_checked = 1;
5648                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5649                         dev_err(&ioa_cfg->pdev->dev,
5650                                 "No Host RRQ. 0x%08X\n", int_reg);
5651                 else
5652                         dev_err(&ioa_cfg->pdev->dev,
5653                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5654
5655                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5656                         ioa_cfg->sdt_state = GET_DUMP;
5657
5658                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5659                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5660         }
5661
5662         return rc;
5663 }
5664
5665 /**
5666  * ipr_isr_eh - Interrupt service routine error handler
5667  * @ioa_cfg:    ioa config struct
5668  * @msg:        message to log
5669  *
5670  * Return value:
5671  *      none
5672  **/
5673 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5674 {
5675         ioa_cfg->errors_logged++;
5676         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5677
5678         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5679                 ioa_cfg->sdt_state = GET_DUMP;
5680
5681         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5682 }
5683
5684 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5685                                                 struct list_head *doneq)
5686 {
5687         u32 ioasc;
5688         u16 cmd_index;
5689         struct ipr_cmnd *ipr_cmd;
5690         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5691         int num_hrrq = 0;
5692
5693         /* If interrupts are disabled, ignore the interrupt */
5694         if (!hrr_queue->allow_interrupts)
5695                 return 0;
5696
5697         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5698                hrr_queue->toggle_bit) {
5699
5700                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5701                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5702                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5703
5704                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5705                              cmd_index < hrr_queue->min_cmd_id)) {
5706                         ipr_isr_eh(ioa_cfg,
5707                                 "Invalid response handle from IOA: ",
5708                                 cmd_index);
5709                         break;
5710                 }
5711
5712                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5713                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5714
5715                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5716
5717                 list_move_tail(&ipr_cmd->queue, doneq);
5718
5719                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5720                         hrr_queue->hrrq_curr++;
5721                 } else {
5722                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5723                         hrr_queue->toggle_bit ^= 1u;
5724                 }
5725                 num_hrrq++;
5726                 if (budget > 0 && num_hrrq >= budget)
5727                         break;
5728         }
5729
5730         return num_hrrq;
5731 }
5732
5733 static int ipr_iopoll(struct irq_poll *iop, int budget)
5734 {
5735         struct ipr_ioa_cfg *ioa_cfg;
5736         struct ipr_hrr_queue *hrrq;
5737         struct ipr_cmnd *ipr_cmd, *temp;
5738         unsigned long hrrq_flags;
5739         int completed_ops;
5740         LIST_HEAD(doneq);
5741
5742         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5743         ioa_cfg = hrrq->ioa_cfg;
5744
5745         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5746         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5747
5748         if (completed_ops < budget)
5749                 irq_poll_complete(iop);
5750         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5751
5752         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5753                 list_del(&ipr_cmd->queue);
5754                 del_timer(&ipr_cmd->timer);
5755                 ipr_cmd->fast_done(ipr_cmd);
5756         }
5757
5758         return completed_ops;
5759 }
5760
5761 /**
5762  * ipr_isr - Interrupt service routine
5763  * @irq:        irq number
5764  * @devp:       pointer to ioa config struct
5765  *
5766  * Return value:
5767  *      IRQ_NONE / IRQ_HANDLED
5768  **/
5769 static irqreturn_t ipr_isr(int irq, void *devp)
5770 {
5771         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5772         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5773         unsigned long hrrq_flags = 0;
5774         u32 int_reg = 0;
5775         int num_hrrq = 0;
5776         int irq_none = 0;
5777         struct ipr_cmnd *ipr_cmd, *temp;
5778         irqreturn_t rc = IRQ_NONE;
5779         LIST_HEAD(doneq);
5780
5781         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5782         /* If interrupts are disabled, ignore the interrupt */
5783         if (!hrrq->allow_interrupts) {
5784                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5785                 return IRQ_NONE;
5786         }
5787
5788         while (1) {
5789                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5790                         rc =  IRQ_HANDLED;
5791
5792                         if (!ioa_cfg->clear_isr)
5793                                 break;
5794
5795                         /* Clear the PCI interrupt */
5796                         num_hrrq = 0;
5797                         do {
5798                                 writel(IPR_PCII_HRRQ_UPDATED,
5799                                      ioa_cfg->regs.clr_interrupt_reg32);
5800                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5801                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5802                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5803
5804                 } else if (rc == IRQ_NONE && irq_none == 0) {
5805                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5806                         irq_none++;
5807                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5808                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5809                         ipr_isr_eh(ioa_cfg,
5810                                 "Error clearing HRRQ: ", num_hrrq);
5811                         rc = IRQ_HANDLED;
5812                         break;
5813                 } else
5814                         break;
5815         }
5816
5817         if (unlikely(rc == IRQ_NONE))
5818                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5819
5820         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5821         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5822                 list_del(&ipr_cmd->queue);
5823                 del_timer(&ipr_cmd->timer);
5824                 ipr_cmd->fast_done(ipr_cmd);
5825         }
5826         return rc;
5827 }
5828
5829 /**
5830  * ipr_isr_mhrrq - Interrupt service routine
5831  * @irq:        irq number
5832  * @devp:       pointer to ioa config struct
5833  *
5834  * Return value:
5835  *      IRQ_NONE / IRQ_HANDLED
5836  **/
5837 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5838 {
5839         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5840         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5841         unsigned long hrrq_flags = 0;
5842         struct ipr_cmnd *ipr_cmd, *temp;
5843         irqreturn_t rc = IRQ_NONE;
5844         LIST_HEAD(doneq);
5845
5846         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5847
5848         /* If interrupts are disabled, ignore the interrupt */
5849         if (!hrrq->allow_interrupts) {
5850                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5851                 return IRQ_NONE;
5852         }
5853
5854         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5855                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5856                        hrrq->toggle_bit) {
5857                         irq_poll_sched(&hrrq->iopoll);
5858                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5859                         return IRQ_HANDLED;
5860                 }
5861         } else {
5862                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5863                         hrrq->toggle_bit)
5864
5865                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5866                                 rc =  IRQ_HANDLED;
5867         }
5868
5869         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5870
5871         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5872                 list_del(&ipr_cmd->queue);
5873                 del_timer(&ipr_cmd->timer);
5874                 ipr_cmd->fast_done(ipr_cmd);
5875         }
5876         return rc;
5877 }
5878
5879 /**
5880  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5881  * @ioa_cfg:    ioa config struct
5882  * @ipr_cmd:    ipr command struct
5883  *
5884  * Return value:
5885  *      0 on success / -1 on failure
5886  **/
5887 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5888                              struct ipr_cmnd *ipr_cmd)
5889 {
5890         int i, nseg;
5891         struct scatterlist *sg;
5892         u32 length;
5893         u32 ioadl_flags = 0;
5894         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5895         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5896         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5897
5898         length = scsi_bufflen(scsi_cmd);
5899         if (!length)
5900                 return 0;
5901
5902         nseg = scsi_dma_map(scsi_cmd);
5903         if (nseg < 0) {
5904                 if (printk_ratelimit())
5905                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5906                 return -1;
5907         }
5908
5909         ipr_cmd->dma_use_sg = nseg;
5910
5911         ioarcb->data_transfer_length = cpu_to_be32(length);
5912         ioarcb->ioadl_len =
5913                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5914
5915         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5916                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5917                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5918         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5919                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5920
5921         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5922                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5923                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5924                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5925         }
5926
5927         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5928         return 0;
5929 }
5930
5931 /**
5932  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5933  * @ioa_cfg:    ioa config struct
5934  * @ipr_cmd:    ipr command struct
5935  *
5936  * Return value:
5937  *      0 on success / -1 on failure
5938  **/
5939 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5940                            struct ipr_cmnd *ipr_cmd)
5941 {
5942         int i, nseg;
5943         struct scatterlist *sg;
5944         u32 length;
5945         u32 ioadl_flags = 0;
5946         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5947         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5948         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5949
5950         length = scsi_bufflen(scsi_cmd);
5951         if (!length)
5952                 return 0;
5953
5954         nseg = scsi_dma_map(scsi_cmd);
5955         if (nseg < 0) {
5956                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5957                 return -1;
5958         }
5959
5960         ipr_cmd->dma_use_sg = nseg;
5961
5962         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5963                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5964                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5965                 ioarcb->data_transfer_length = cpu_to_be32(length);
5966                 ioarcb->ioadl_len =
5967                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5968         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5969                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5970                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5971                 ioarcb->read_ioadl_len =
5972                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5973         }
5974
5975         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5976                 ioadl = ioarcb->u.add_data.u.ioadl;
5977                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5978                                     offsetof(struct ipr_ioarcb, u.add_data));
5979                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5980         }
5981
5982         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5983                 ioadl[i].flags_and_data_len =
5984                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5985                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5986         }
5987
5988         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5989         return 0;
5990 }
5991
5992 /**
5993  * __ipr_erp_done - Process completion of ERP for a device
5994  * @ipr_cmd:            ipr command struct
5995  *
5996  * This function copies the sense buffer into the scsi_cmd
5997  * struct and pushes the scsi_done function.
5998  *
5999  * Return value:
6000  *      nothing
6001  **/
6002 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6003 {
6004         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6005         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6006         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6007
6008         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6009                 scsi_cmd->result |= (DID_ERROR << 16);
6010                 scmd_printk(KERN_ERR, scsi_cmd,
6011                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6012         } else {
6013                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6014                        SCSI_SENSE_BUFFERSIZE);
6015         }
6016
6017         if (res) {
6018                 if (!ipr_is_naca_model(res))
6019                         res->needs_sync_complete = 1;
6020                 res->in_erp = 0;
6021         }
6022         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6023         scsi_cmd->scsi_done(scsi_cmd);
6024         if (ipr_cmd->eh_comp)
6025                 complete(ipr_cmd->eh_comp);
6026         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6027 }
6028
6029 /**
6030  * ipr_erp_done - Process completion of ERP for a device
6031  * @ipr_cmd:            ipr command struct
6032  *
6033  * This function copies the sense buffer into the scsi_cmd
6034  * struct and pushes the scsi_done function.
6035  *
6036  * Return value:
6037  *      nothing
6038  **/
6039 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6040 {
6041         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6042         unsigned long hrrq_flags;
6043
6044         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6045         __ipr_erp_done(ipr_cmd);
6046         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6047 }
6048
6049 /**
6050  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6051  * @ipr_cmd:    ipr command struct
6052  *
6053  * Return value:
6054  *      none
6055  **/
6056 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6057 {
6058         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6059         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6060         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6061
6062         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6063         ioarcb->data_transfer_length = 0;
6064         ioarcb->read_data_transfer_length = 0;
6065         ioarcb->ioadl_len = 0;
6066         ioarcb->read_ioadl_len = 0;
6067         ioasa->hdr.ioasc = 0;
6068         ioasa->hdr.residual_data_len = 0;
6069
6070         if (ipr_cmd->ioa_cfg->sis64)
6071                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6072                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6073         else {
6074                 ioarcb->write_ioadl_addr =
6075                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6076                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6077         }
6078 }
6079
6080 /**
6081  * __ipr_erp_request_sense - Send request sense to a device
6082  * @ipr_cmd:    ipr command struct
6083  *
6084  * This function sends a request sense to a device as a result
6085  * of a check condition.
6086  *
6087  * Return value:
6088  *      nothing
6089  **/
6090 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6091 {
6092         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6093         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6094
6095         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6096                 __ipr_erp_done(ipr_cmd);
6097                 return;
6098         }
6099
6100         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6101
6102         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6103         cmd_pkt->cdb[0] = REQUEST_SENSE;
6104         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6105         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6106         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6107         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6108
6109         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6110                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6111
6112         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6113                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6114 }
6115
6116 /**
6117  * ipr_erp_request_sense - Send request sense to a device
6118  * @ipr_cmd:    ipr command struct
6119  *
6120  * This function sends a request sense to a device as a result
6121  * of a check condition.
6122  *
6123  * Return value:
6124  *      nothing
6125  **/
6126 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6127 {
6128         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6129         unsigned long hrrq_flags;
6130
6131         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6132         __ipr_erp_request_sense(ipr_cmd);
6133         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6134 }
6135
6136 /**
6137  * ipr_erp_cancel_all - Send cancel all to a device
6138  * @ipr_cmd:    ipr command struct
6139  *
6140  * This function sends a cancel all to a device to clear the
6141  * queue. If we are running TCQ on the device, QERR is set to 1,
6142  * which means all outstanding ops have been dropped on the floor.
6143  * Cancel all will return them to us.
6144  *
6145  * Return value:
6146  *      nothing
6147  **/
6148 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6149 {
6150         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6151         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6152         struct ipr_cmd_pkt *cmd_pkt;
6153
6154         res->in_erp = 1;
6155
6156         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6157
6158         if (!scsi_cmd->device->simple_tags) {
6159                 __ipr_erp_request_sense(ipr_cmd);
6160                 return;
6161         }
6162
6163         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6164         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6165         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6166
6167         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6168                    IPR_CANCEL_ALL_TIMEOUT);
6169 }
6170
6171 /**
6172  * ipr_dump_ioasa - Dump contents of IOASA
6173  * @ioa_cfg:    ioa config struct
6174  * @ipr_cmd:    ipr command struct
6175  * @res:                resource entry struct
6176  *
6177  * This function is invoked by the interrupt handler when ops
6178  * fail. It will log the IOASA if appropriate. Only called
6179  * for GPDD ops.
6180  *
6181  * Return value:
6182  *      none
6183  **/
6184 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6185                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6186 {
6187         int i;
6188         u16 data_len;
6189         u32 ioasc, fd_ioasc;
6190         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6191         __be32 *ioasa_data = (__be32 *)ioasa;
6192         int error_index;
6193
6194         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6195         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6196
6197         if (0 == ioasc)
6198                 return;
6199
6200         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6201                 return;
6202
6203         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6204                 error_index = ipr_get_error(fd_ioasc);
6205         else
6206                 error_index = ipr_get_error(ioasc);
6207
6208         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6209                 /* Don't log an error if the IOA already logged one */
6210                 if (ioasa->hdr.ilid != 0)
6211                         return;
6212
6213                 if (!ipr_is_gscsi(res))
6214                         return;
6215
6216                 if (ipr_error_table[error_index].log_ioasa == 0)
6217                         return;
6218         }
6219
6220         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6221
6222         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6223         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6224                 data_len = sizeof(struct ipr_ioasa64);
6225         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6226                 data_len = sizeof(struct ipr_ioasa);
6227
6228         ipr_err("IOASA Dump:\n");
6229
6230         for (i = 0; i < data_len / 4; i += 4) {
6231                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6232                         be32_to_cpu(ioasa_data[i]),
6233                         be32_to_cpu(ioasa_data[i+1]),
6234                         be32_to_cpu(ioasa_data[i+2]),
6235                         be32_to_cpu(ioasa_data[i+3]));
6236         }
6237 }
6238
6239 /**
6240  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6241  * @ioasa:              IOASA
6242  * @sense_buf:  sense data buffer
6243  *
6244  * Return value:
6245  *      none
6246  **/
6247 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6248 {
6249         u32 failing_lba;
6250         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6251         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6252         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6253         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6254
6255         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6256
6257         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6258                 return;
6259
6260         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6261
6262         if (ipr_is_vset_device(res) &&
6263             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6264             ioasa->u.vset.failing_lba_hi != 0) {
6265                 sense_buf[0] = 0x72;
6266                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6267                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6268                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6269
6270                 sense_buf[7] = 12;
6271                 sense_buf[8] = 0;
6272                 sense_buf[9] = 0x0A;
6273                 sense_buf[10] = 0x80;
6274
6275                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6276
6277                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6278                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6279                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6280                 sense_buf[15] = failing_lba & 0x000000ff;
6281
6282                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6283
6284                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6285                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6286                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6287                 sense_buf[19] = failing_lba & 0x000000ff;
6288         } else {
6289                 sense_buf[0] = 0x70;
6290                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6291                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6292                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6293
6294                 /* Illegal request */
6295                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6296                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6297                         sense_buf[7] = 10;      /* additional length */
6298
6299                         /* IOARCB was in error */
6300                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6301                                 sense_buf[15] = 0xC0;
6302                         else    /* Parameter data was invalid */
6303                                 sense_buf[15] = 0x80;
6304
6305                         sense_buf[16] =
6306                             ((IPR_FIELD_POINTER_MASK &
6307                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6308                         sense_buf[17] =
6309                             (IPR_FIELD_POINTER_MASK &
6310                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6311                 } else {
6312                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6313                                 if (ipr_is_vset_device(res))
6314                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6315                                 else
6316                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6317
6318                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6319                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6320                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6321                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6322                                 sense_buf[6] = failing_lba & 0x000000ff;
6323                         }
6324
6325                         sense_buf[7] = 6;       /* additional length */
6326                 }
6327         }
6328 }
6329
6330 /**
6331  * ipr_get_autosense - Copy autosense data to sense buffer
6332  * @ipr_cmd:    ipr command struct
6333  *
6334  * This function copies the autosense buffer to the buffer
6335  * in the scsi_cmd, if there is autosense available.
6336  *
6337  * Return value:
6338  *      1 if autosense was available / 0 if not
6339  **/
6340 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6341 {
6342         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6343         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6344
6345         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6346                 return 0;
6347
6348         if (ipr_cmd->ioa_cfg->sis64)
6349                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6350                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6351                            SCSI_SENSE_BUFFERSIZE));
6352         else
6353                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6354                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6355                            SCSI_SENSE_BUFFERSIZE));
6356         return 1;
6357 }
6358
6359 /**
6360  * ipr_erp_start - Process an error response for a SCSI op
6361  * @ioa_cfg:    ioa config struct
6362  * @ipr_cmd:    ipr command struct
6363  *
6364  * This function determines whether or not to initiate ERP
6365  * on the affected device.
6366  *
6367  * Return value:
6368  *      nothing
6369  **/
6370 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6371                               struct ipr_cmnd *ipr_cmd)
6372 {
6373         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6374         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6375         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6376         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6377
6378         if (!res) {
6379                 __ipr_scsi_eh_done(ipr_cmd);
6380                 return;
6381         }
6382
6383         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6384                 ipr_gen_sense(ipr_cmd);
6385
6386         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6387
6388         switch (masked_ioasc) {
6389         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6390                 if (ipr_is_naca_model(res))
6391                         scsi_cmd->result |= (DID_ABORT << 16);
6392                 else
6393                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6394                 break;
6395         case IPR_IOASC_IR_RESOURCE_HANDLE:
6396         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6397                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6398                 break;
6399         case IPR_IOASC_HW_SEL_TIMEOUT:
6400                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6401                 if (!ipr_is_naca_model(res))
6402                         res->needs_sync_complete = 1;
6403                 break;
6404         case IPR_IOASC_SYNC_REQUIRED:
6405                 if (!res->in_erp)
6406                         res->needs_sync_complete = 1;
6407                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6408                 break;
6409         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6410         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6411                 /*
6412                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6413                  * so SCSI mid-layer and upper layers handle it accordingly.
6414                  */
6415                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6416                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6417                 break;
6418         case IPR_IOASC_BUS_WAS_RESET:
6419         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6420                 /*
6421                  * Report the bus reset and ask for a retry. The device
6422                  * will give CC/UA the next command.
6423                  */
6424                 if (!res->resetting_device)
6425                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6426                 scsi_cmd->result |= (DID_ERROR << 16);
6427                 if (!ipr_is_naca_model(res))
6428                         res->needs_sync_complete = 1;
6429                 break;
6430         case IPR_IOASC_HW_DEV_BUS_STATUS:
6431                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6432                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6433                         if (!ipr_get_autosense(ipr_cmd)) {
6434                                 if (!ipr_is_naca_model(res)) {
6435                                         ipr_erp_cancel_all(ipr_cmd);
6436                                         return;
6437                                 }
6438                         }
6439                 }
6440                 if (!ipr_is_naca_model(res))
6441                         res->needs_sync_complete = 1;
6442                 break;
6443         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6444                 break;
6445         case IPR_IOASC_IR_NON_OPTIMIZED:
6446                 if (res->raw_mode) {
6447                         res->raw_mode = 0;
6448                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6449                 } else
6450                         scsi_cmd->result |= (DID_ERROR << 16);
6451                 break;
6452         default:
6453                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6454                         scsi_cmd->result |= (DID_ERROR << 16);
6455                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6456                         res->needs_sync_complete = 1;
6457                 break;
6458         }
6459
6460         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6461         scsi_cmd->scsi_done(scsi_cmd);
6462         if (ipr_cmd->eh_comp)
6463                 complete(ipr_cmd->eh_comp);
6464         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6465 }
6466
6467 /**
6468  * ipr_scsi_done - mid-layer done function
6469  * @ipr_cmd:    ipr command struct
6470  *
6471  * This function is invoked by the interrupt handler for
6472  * ops generated by the SCSI mid-layer
6473  *
6474  * Return value:
6475  *      none
6476  **/
6477 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6478 {
6479         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6480         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6481         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6482         unsigned long lock_flags;
6483
6484         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6485
6486         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6487                 scsi_dma_unmap(scsi_cmd);
6488
6489                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6490                 scsi_cmd->scsi_done(scsi_cmd);
6491                 if (ipr_cmd->eh_comp)
6492                         complete(ipr_cmd->eh_comp);
6493                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6494                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6495         } else {
6496                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6497                 spin_lock(&ipr_cmd->hrrq->_lock);
6498                 ipr_erp_start(ioa_cfg, ipr_cmd);
6499                 spin_unlock(&ipr_cmd->hrrq->_lock);
6500                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6501         }
6502 }
6503
6504 /**
6505  * ipr_queuecommand - Queue a mid-layer request
6506  * @shost:              scsi host struct
6507  * @scsi_cmd:   scsi command struct
6508  *
6509  * This function queues a request generated by the mid-layer.
6510  *
6511  * Return value:
6512  *      0 on success
6513  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6514  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6515  **/
6516 static int ipr_queuecommand(struct Scsi_Host *shost,
6517                             struct scsi_cmnd *scsi_cmd)
6518 {
6519         struct ipr_ioa_cfg *ioa_cfg;
6520         struct ipr_resource_entry *res;
6521         struct ipr_ioarcb *ioarcb;
6522         struct ipr_cmnd *ipr_cmd;
6523         unsigned long hrrq_flags, lock_flags;
6524         int rc;
6525         struct ipr_hrr_queue *hrrq;
6526         int hrrq_id;
6527
6528         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6529
6530         scsi_cmd->result = (DID_OK << 16);
6531         res = scsi_cmd->device->hostdata;
6532
6533         if (ipr_is_gata(res) && res->sata_port) {
6534                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6535                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6536                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6537                 return rc;
6538         }
6539
6540         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6541         hrrq = &ioa_cfg->hrrq[hrrq_id];
6542
6543         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6544         /*
6545          * We are currently blocking all devices due to a host reset
6546          * We have told the host to stop giving us new requests, but
6547          * ERP ops don't count. FIXME
6548          */
6549         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6550                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6551                 return SCSI_MLQUEUE_HOST_BUSY;
6552         }
6553
6554         /*
6555          * FIXME - Create scsi_set_host_offline interface
6556          *  and the ioa_is_dead check can be removed
6557          */
6558         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6559                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6560                 goto err_nodev;
6561         }
6562
6563         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6564         if (ipr_cmd == NULL) {
6565                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6566                 return SCSI_MLQUEUE_HOST_BUSY;
6567         }
6568         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6569
6570         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6571         ioarcb = &ipr_cmd->ioarcb;
6572
6573         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6574         ipr_cmd->scsi_cmd = scsi_cmd;
6575         ipr_cmd->done = ipr_scsi_eh_done;
6576
6577         if (ipr_is_gscsi(res)) {
6578                 if (scsi_cmd->underflow == 0)
6579                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6580
6581                 if (res->reset_occurred) {
6582                         res->reset_occurred = 0;
6583                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6584                 }
6585         }
6586
6587         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6588                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6589
6590                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6591                 if (scsi_cmd->flags & SCMD_TAGGED)
6592                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6593                 else
6594                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6595         }
6596
6597         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6598             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6599                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6600         }
6601         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6602                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6603
6604                 if (scsi_cmd->underflow == 0)
6605                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6606         }
6607
6608         if (ioa_cfg->sis64)
6609                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6610         else
6611                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6612
6613         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6614         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6615                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6616                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6617                 if (!rc)
6618                         scsi_dma_unmap(scsi_cmd);
6619                 return SCSI_MLQUEUE_HOST_BUSY;
6620         }
6621
6622         if (unlikely(hrrq->ioa_is_dead)) {
6623                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6624                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6625                 scsi_dma_unmap(scsi_cmd);
6626                 goto err_nodev;
6627         }
6628
6629         ioarcb->res_handle = res->res_handle;
6630         if (res->needs_sync_complete) {
6631                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6632                 res->needs_sync_complete = 0;
6633         }
6634         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6635         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6636         ipr_send_command(ipr_cmd);
6637         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6638         return 0;
6639
6640 err_nodev:
6641         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6642         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6643         scsi_cmd->result = (DID_NO_CONNECT << 16);
6644         scsi_cmd->scsi_done(scsi_cmd);
6645         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6646         return 0;
6647 }
6648
6649 /**
6650  * ipr_ioctl - IOCTL handler
6651  * @sdev:       scsi device struct
6652  * @cmd:        IOCTL cmd
6653  * @arg:        IOCTL arg
6654  *
6655  * Return value:
6656  *      0 on success / other on failure
6657  **/
6658 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6659 {
6660         struct ipr_resource_entry *res;
6661
6662         res = (struct ipr_resource_entry *)sdev->hostdata;
6663         if (res && ipr_is_gata(res)) {
6664                 if (cmd == HDIO_GET_IDENTITY)
6665                         return -ENOTTY;
6666                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6667         }
6668
6669         return -EINVAL;
6670 }
6671
6672 /**
6673  * ipr_info - Get information about the card/driver
6674  * @scsi_host:  scsi host struct
6675  *
6676  * Return value:
6677  *      pointer to buffer with description string
6678  **/
6679 static const char *ipr_ioa_info(struct Scsi_Host *host)
6680 {
6681         static char buffer[512];
6682         struct ipr_ioa_cfg *ioa_cfg;
6683         unsigned long lock_flags = 0;
6684
6685         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6686
6687         spin_lock_irqsave(host->host_lock, lock_flags);
6688         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6689         spin_unlock_irqrestore(host->host_lock, lock_flags);
6690
6691         return buffer;
6692 }
6693
6694 static struct scsi_host_template driver_template = {
6695         .module = THIS_MODULE,
6696         .name = "IPR",
6697         .info = ipr_ioa_info,
6698         .ioctl = ipr_ioctl,
6699         .queuecommand = ipr_queuecommand,
6700         .eh_abort_handler = ipr_eh_abort,
6701         .eh_device_reset_handler = ipr_eh_dev_reset,
6702         .eh_host_reset_handler = ipr_eh_host_reset,
6703         .slave_alloc = ipr_slave_alloc,
6704         .slave_configure = ipr_slave_configure,
6705         .slave_destroy = ipr_slave_destroy,
6706         .scan_finished = ipr_scan_finished,
6707         .target_alloc = ipr_target_alloc,
6708         .target_destroy = ipr_target_destroy,
6709         .change_queue_depth = ipr_change_queue_depth,
6710         .bios_param = ipr_biosparam,
6711         .can_queue = IPR_MAX_COMMANDS,
6712         .this_id = -1,
6713         .sg_tablesize = IPR_MAX_SGLIST,
6714         .max_sectors = IPR_IOA_MAX_SECTORS,
6715         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6716         .use_clustering = ENABLE_CLUSTERING,
6717         .shost_attrs = ipr_ioa_attrs,
6718         .sdev_attrs = ipr_dev_attrs,
6719         .proc_name = IPR_NAME,
6720 };
6721
6722 /**
6723  * ipr_ata_phy_reset - libata phy_reset handler
6724  * @ap:         ata port to reset
6725  *
6726  **/
6727 static void ipr_ata_phy_reset(struct ata_port *ap)
6728 {
6729         unsigned long flags;
6730         struct ipr_sata_port *sata_port = ap->private_data;
6731         struct ipr_resource_entry *res = sata_port->res;
6732         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6733         int rc;
6734
6735         ENTER;
6736         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6737         while (ioa_cfg->in_reset_reload) {
6738                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6739                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6740                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6741         }
6742
6743         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6744                 goto out_unlock;
6745
6746         rc = ipr_device_reset(ioa_cfg, res);
6747
6748         if (rc) {
6749                 ap->link.device[0].class = ATA_DEV_NONE;
6750                 goto out_unlock;
6751         }
6752
6753         ap->link.device[0].class = res->ata_class;
6754         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6755                 ap->link.device[0].class = ATA_DEV_NONE;
6756
6757 out_unlock:
6758         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6759         LEAVE;
6760 }
6761
6762 /**
6763  * ipr_ata_post_internal - Cleanup after an internal command
6764  * @qc: ATA queued command
6765  *
6766  * Return value:
6767  *      none
6768  **/
6769 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6770 {
6771         struct ipr_sata_port *sata_port = qc->ap->private_data;
6772         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6773         struct ipr_cmnd *ipr_cmd;
6774         struct ipr_hrr_queue *hrrq;
6775         unsigned long flags;
6776
6777         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6778         while (ioa_cfg->in_reset_reload) {
6779                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6780                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6781                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6782         }
6783
6784         for_each_hrrq(hrrq, ioa_cfg) {
6785                 spin_lock(&hrrq->_lock);
6786                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6787                         if (ipr_cmd->qc == qc) {
6788                                 ipr_device_reset(ioa_cfg, sata_port->res);
6789                                 break;
6790                         }
6791                 }
6792                 spin_unlock(&hrrq->_lock);
6793         }
6794         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6795 }
6796
6797 /**
6798  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6799  * @regs:       destination
6800  * @tf: source ATA taskfile
6801  *
6802  * Return value:
6803  *      none
6804  **/
6805 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6806                              struct ata_taskfile *tf)
6807 {
6808         regs->feature = tf->feature;
6809         regs->nsect = tf->nsect;
6810         regs->lbal = tf->lbal;
6811         regs->lbam = tf->lbam;
6812         regs->lbah = tf->lbah;
6813         regs->device = tf->device;
6814         regs->command = tf->command;
6815         regs->hob_feature = tf->hob_feature;
6816         regs->hob_nsect = tf->hob_nsect;
6817         regs->hob_lbal = tf->hob_lbal;
6818         regs->hob_lbam = tf->hob_lbam;
6819         regs->hob_lbah = tf->hob_lbah;
6820         regs->ctl = tf->ctl;
6821 }
6822
6823 /**
6824  * ipr_sata_done - done function for SATA commands
6825  * @ipr_cmd:    ipr command struct
6826  *
6827  * This function is invoked by the interrupt handler for
6828  * ops generated by the SCSI mid-layer to SATA devices
6829  *
6830  * Return value:
6831  *      none
6832  **/
6833 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6834 {
6835         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6836         struct ata_queued_cmd *qc = ipr_cmd->qc;
6837         struct ipr_sata_port *sata_port = qc->ap->private_data;
6838         struct ipr_resource_entry *res = sata_port->res;
6839         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6840
6841         spin_lock(&ipr_cmd->hrrq->_lock);
6842         if (ipr_cmd->ioa_cfg->sis64)
6843                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6844                        sizeof(struct ipr_ioasa_gata));
6845         else
6846                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6847                        sizeof(struct ipr_ioasa_gata));
6848         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6849
6850         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6851                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6852
6853         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6854                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6855         else
6856                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6857         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6858         spin_unlock(&ipr_cmd->hrrq->_lock);
6859         ata_qc_complete(qc);
6860 }
6861
6862 /**
6863  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6864  * @ipr_cmd:    ipr command struct
6865  * @qc:         ATA queued command
6866  *
6867  **/
6868 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6869                                   struct ata_queued_cmd *qc)
6870 {
6871         u32 ioadl_flags = 0;
6872         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6873         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6874         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6875         int len = qc->nbytes;
6876         struct scatterlist *sg;
6877         unsigned int si;
6878         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6879
6880         if (len == 0)
6881                 return;
6882
6883         if (qc->dma_dir == DMA_TO_DEVICE) {
6884                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6885                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6886         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6887                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6888
6889         ioarcb->data_transfer_length = cpu_to_be32(len);
6890         ioarcb->ioadl_len =
6891                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6892         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6893                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6894
6895         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6896                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6897                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6898                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6899
6900                 last_ioadl64 = ioadl64;
6901                 ioadl64++;
6902         }
6903
6904         if (likely(last_ioadl64))
6905                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6906 }
6907
6908 /**
6909  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6910  * @ipr_cmd:    ipr command struct
6911  * @qc:         ATA queued command
6912  *
6913  **/
6914 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6915                                 struct ata_queued_cmd *qc)
6916 {
6917         u32 ioadl_flags = 0;
6918         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6919         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6920         struct ipr_ioadl_desc *last_ioadl = NULL;
6921         int len = qc->nbytes;
6922         struct scatterlist *sg;
6923         unsigned int si;
6924
6925         if (len == 0)
6926                 return;
6927
6928         if (qc->dma_dir == DMA_TO_DEVICE) {
6929                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6930                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6931                 ioarcb->data_transfer_length = cpu_to_be32(len);
6932                 ioarcb->ioadl_len =
6933                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6934         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6935                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6936                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6937                 ioarcb->read_ioadl_len =
6938                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6939         }
6940
6941         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6942                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6943                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6944
6945                 last_ioadl = ioadl;
6946                 ioadl++;
6947         }
6948
6949         if (likely(last_ioadl))
6950                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6951 }
6952
6953 /**
6954  * ipr_qc_defer - Get a free ipr_cmd
6955  * @qc: queued command
6956  *
6957  * Return value:
6958  *      0 if success
6959  **/
6960 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6961 {
6962         struct ata_port *ap = qc->ap;
6963         struct ipr_sata_port *sata_port = ap->private_data;
6964         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6965         struct ipr_cmnd *ipr_cmd;
6966         struct ipr_hrr_queue *hrrq;
6967         int hrrq_id;
6968
6969         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6970         hrrq = &ioa_cfg->hrrq[hrrq_id];
6971
6972         qc->lldd_task = NULL;
6973         spin_lock(&hrrq->_lock);
6974         if (unlikely(hrrq->ioa_is_dead)) {
6975                 spin_unlock(&hrrq->_lock);
6976                 return 0;
6977         }
6978
6979         if (unlikely(!hrrq->allow_cmds)) {
6980                 spin_unlock(&hrrq->_lock);
6981                 return ATA_DEFER_LINK;
6982         }
6983
6984         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6985         if (ipr_cmd == NULL) {
6986                 spin_unlock(&hrrq->_lock);
6987                 return ATA_DEFER_LINK;
6988         }
6989
6990         qc->lldd_task = ipr_cmd;
6991         spin_unlock(&hrrq->_lock);
6992         return 0;
6993 }
6994
6995 /**
6996  * ipr_qc_issue - Issue a SATA qc to a device
6997  * @qc: queued command
6998  *
6999  * Return value:
7000  *      0 if success
7001  **/
7002 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7003 {
7004         struct ata_port *ap = qc->ap;
7005         struct ipr_sata_port *sata_port = ap->private_data;
7006         struct ipr_resource_entry *res = sata_port->res;
7007         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7008         struct ipr_cmnd *ipr_cmd;
7009         struct ipr_ioarcb *ioarcb;
7010         struct ipr_ioarcb_ata_regs *regs;
7011
7012         if (qc->lldd_task == NULL)
7013                 ipr_qc_defer(qc);
7014
7015         ipr_cmd = qc->lldd_task;
7016         if (ipr_cmd == NULL)
7017                 return AC_ERR_SYSTEM;
7018
7019         qc->lldd_task = NULL;
7020         spin_lock(&ipr_cmd->hrrq->_lock);
7021         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7022                         ipr_cmd->hrrq->ioa_is_dead)) {
7023                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7024                 spin_unlock(&ipr_cmd->hrrq->_lock);
7025                 return AC_ERR_SYSTEM;
7026         }
7027
7028         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7029         ioarcb = &ipr_cmd->ioarcb;
7030
7031         if (ioa_cfg->sis64) {
7032                 regs = &ipr_cmd->i.ata_ioadl.regs;
7033                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7034         } else
7035                 regs = &ioarcb->u.add_data.u.regs;
7036
7037         memset(regs, 0, sizeof(*regs));
7038         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7039
7040         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7041         ipr_cmd->qc = qc;
7042         ipr_cmd->done = ipr_sata_done;
7043         ipr_cmd->ioarcb.res_handle = res->res_handle;
7044         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7045         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7046         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7047         ipr_cmd->dma_use_sg = qc->n_elem;
7048
7049         if (ioa_cfg->sis64)
7050                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7051         else
7052                 ipr_build_ata_ioadl(ipr_cmd, qc);
7053
7054         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7055         ipr_copy_sata_tf(regs, &qc->tf);
7056         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7057         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7058
7059         switch (qc->tf.protocol) {
7060         case ATA_PROT_NODATA:
7061         case ATA_PROT_PIO:
7062                 break;
7063
7064         case ATA_PROT_DMA:
7065                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7066                 break;
7067
7068         case ATAPI_PROT_PIO:
7069         case ATAPI_PROT_NODATA:
7070                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7071                 break;
7072
7073         case ATAPI_PROT_DMA:
7074                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7075                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7076                 break;
7077
7078         default:
7079                 WARN_ON(1);
7080                 spin_unlock(&ipr_cmd->hrrq->_lock);
7081                 return AC_ERR_INVALID;
7082         }
7083
7084         ipr_send_command(ipr_cmd);
7085         spin_unlock(&ipr_cmd->hrrq->_lock);
7086
7087         return 0;
7088 }
7089
7090 /**
7091  * ipr_qc_fill_rtf - Read result TF
7092  * @qc: ATA queued command
7093  *
7094  * Return value:
7095  *      true
7096  **/
7097 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7098 {
7099         struct ipr_sata_port *sata_port = qc->ap->private_data;
7100         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7101         struct ata_taskfile *tf = &qc->result_tf;
7102
7103         tf->feature = g->error;
7104         tf->nsect = g->nsect;
7105         tf->lbal = g->lbal;
7106         tf->lbam = g->lbam;
7107         tf->lbah = g->lbah;
7108         tf->device = g->device;
7109         tf->command = g->status;
7110         tf->hob_nsect = g->hob_nsect;
7111         tf->hob_lbal = g->hob_lbal;
7112         tf->hob_lbam = g->hob_lbam;
7113         tf->hob_lbah = g->hob_lbah;
7114
7115         return true;
7116 }
7117
7118 static struct ata_port_operations ipr_sata_ops = {
7119         .phy_reset = ipr_ata_phy_reset,
7120         .hardreset = ipr_sata_reset,
7121         .post_internal_cmd = ipr_ata_post_internal,
7122         .qc_prep = ata_noop_qc_prep,
7123         .qc_defer = ipr_qc_defer,
7124         .qc_issue = ipr_qc_issue,
7125         .qc_fill_rtf = ipr_qc_fill_rtf,
7126         .port_start = ata_sas_port_start,
7127         .port_stop = ata_sas_port_stop
7128 };
7129
7130 static struct ata_port_info sata_port_info = {
7131         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7132                           ATA_FLAG_SAS_HOST,
7133         .pio_mask       = ATA_PIO4_ONLY,
7134         .mwdma_mask     = ATA_MWDMA2,
7135         .udma_mask      = ATA_UDMA6,
7136         .port_ops       = &ipr_sata_ops
7137 };
7138
7139 #ifdef CONFIG_PPC_PSERIES
7140 static const u16 ipr_blocked_processors[] = {
7141         PVR_NORTHSTAR,
7142         PVR_PULSAR,
7143         PVR_POWER4,
7144         PVR_ICESTAR,
7145         PVR_SSTAR,
7146         PVR_POWER4p,
7147         PVR_630,
7148         PVR_630p
7149 };
7150
7151 /**
7152  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7153  * @ioa_cfg:    ioa cfg struct
7154  *
7155  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7156  * certain pSeries hardware. This function determines if the given
7157  * adapter is in one of these confgurations or not.
7158  *
7159  * Return value:
7160  *      1 if adapter is not supported / 0 if adapter is supported
7161  **/
7162 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7163 {
7164         int i;
7165
7166         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7167                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7168                         if (pvr_version_is(ipr_blocked_processors[i]))
7169                                 return 1;
7170                 }
7171         }
7172         return 0;
7173 }
7174 #else
7175 #define ipr_invalid_adapter(ioa_cfg) 0
7176 #endif
7177
7178 /**
7179  * ipr_ioa_bringdown_done - IOA bring down completion.
7180  * @ipr_cmd:    ipr command struct
7181  *
7182  * This function processes the completion of an adapter bring down.
7183  * It wakes any reset sleepers.
7184  *
7185  * Return value:
7186  *      IPR_RC_JOB_RETURN
7187  **/
7188 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7189 {
7190         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7191         int i;
7192
7193         ENTER;
7194         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7195                 ipr_trace;
7196                 ioa_cfg->scsi_unblock = 1;
7197                 schedule_work(&ioa_cfg->work_q);
7198         }
7199
7200         ioa_cfg->in_reset_reload = 0;
7201         ioa_cfg->reset_retries = 0;
7202         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7203                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7204                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7205                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7206         }
7207         wmb();
7208
7209         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7210         wake_up_all(&ioa_cfg->reset_wait_q);
7211         LEAVE;
7212
7213         return IPR_RC_JOB_RETURN;
7214 }
7215
7216 /**
7217  * ipr_ioa_reset_done - IOA reset completion.
7218  * @ipr_cmd:    ipr command struct
7219  *
7220  * This function processes the completion of an adapter reset.
7221  * It schedules any necessary mid-layer add/removes and
7222  * wakes any reset sleepers.
7223  *
7224  * Return value:
7225  *      IPR_RC_JOB_RETURN
7226  **/
7227 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7228 {
7229         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7230         struct ipr_resource_entry *res;
7231         int j;
7232
7233         ENTER;
7234         ioa_cfg->in_reset_reload = 0;
7235         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7236                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7237                 ioa_cfg->hrrq[j].allow_cmds = 1;
7238                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7239         }
7240         wmb();
7241         ioa_cfg->reset_cmd = NULL;
7242         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7243
7244         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7245                 if (res->add_to_ml || res->del_from_ml) {
7246                         ipr_trace;
7247                         break;
7248                 }
7249         }
7250         schedule_work(&ioa_cfg->work_q);
7251
7252         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7253                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7254                 if (j < IPR_NUM_LOG_HCAMS)
7255                         ipr_send_hcam(ioa_cfg,
7256                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7257                                 ioa_cfg->hostrcb[j]);
7258                 else
7259                         ipr_send_hcam(ioa_cfg,
7260                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7261                                 ioa_cfg->hostrcb[j]);
7262         }
7263
7264         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7265         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7266
7267         ioa_cfg->reset_retries = 0;
7268         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7269         wake_up_all(&ioa_cfg->reset_wait_q);
7270
7271         ioa_cfg->scsi_unblock = 1;
7272         schedule_work(&ioa_cfg->work_q);
7273         LEAVE;
7274         return IPR_RC_JOB_RETURN;
7275 }
7276
7277 /**
7278  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7279  * @supported_dev:      supported device struct
7280  * @vpids:                      vendor product id struct
7281  *
7282  * Return value:
7283  *      none
7284  **/
7285 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7286                                  struct ipr_std_inq_vpids *vpids)
7287 {
7288         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7289         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7290         supported_dev->num_records = 1;
7291         supported_dev->data_length =
7292                 cpu_to_be16(sizeof(struct ipr_supported_device));
7293         supported_dev->reserved = 0;
7294 }
7295
7296 /**
7297  * ipr_set_supported_devs - Send Set Supported Devices for a device
7298  * @ipr_cmd:    ipr command struct
7299  *
7300  * This function sends a Set Supported Devices to the adapter
7301  *
7302  * Return value:
7303  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7304  **/
7305 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7306 {
7307         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7308         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7309         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7310         struct ipr_resource_entry *res = ipr_cmd->u.res;
7311
7312         ipr_cmd->job_step = ipr_ioa_reset_done;
7313
7314         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7315                 if (!ipr_is_scsi_disk(res))
7316                         continue;
7317
7318                 ipr_cmd->u.res = res;
7319                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7320
7321                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7322                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7323                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7324
7325                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7326                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7327                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7328                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7329
7330                 ipr_init_ioadl(ipr_cmd,
7331                                ioa_cfg->vpd_cbs_dma +
7332                                  offsetof(struct ipr_misc_cbs, supp_dev),
7333                                sizeof(struct ipr_supported_device),
7334                                IPR_IOADL_FLAGS_WRITE_LAST);
7335
7336                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7337                            IPR_SET_SUP_DEVICE_TIMEOUT);
7338
7339                 if (!ioa_cfg->sis64)
7340                         ipr_cmd->job_step = ipr_set_supported_devs;
7341                 LEAVE;
7342                 return IPR_RC_JOB_RETURN;
7343         }
7344
7345         LEAVE;
7346         return IPR_RC_JOB_CONTINUE;
7347 }
7348
7349 /**
7350  * ipr_get_mode_page - Locate specified mode page
7351  * @mode_pages: mode page buffer
7352  * @page_code:  page code to find
7353  * @len:                minimum required length for mode page
7354  *
7355  * Return value:
7356  *      pointer to mode page / NULL on failure
7357  **/
7358 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7359                                u32 page_code, u32 len)
7360 {
7361         struct ipr_mode_page_hdr *mode_hdr;
7362         u32 page_length;
7363         u32 length;
7364
7365         if (!mode_pages || (mode_pages->hdr.length == 0))
7366                 return NULL;
7367
7368         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7369         mode_hdr = (struct ipr_mode_page_hdr *)
7370                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7371
7372         while (length) {
7373                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7374                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7375                                 return mode_hdr;
7376                         break;
7377                 } else {
7378                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7379                                        mode_hdr->page_length);
7380                         length -= page_length;
7381                         mode_hdr = (struct ipr_mode_page_hdr *)
7382                                 ((unsigned long)mode_hdr + page_length);
7383                 }
7384         }
7385         return NULL;
7386 }
7387
7388 /**
7389  * ipr_check_term_power - Check for term power errors
7390  * @ioa_cfg:    ioa config struct
7391  * @mode_pages: IOAFP mode pages buffer
7392  *
7393  * Check the IOAFP's mode page 28 for term power errors
7394  *
7395  * Return value:
7396  *      nothing
7397  **/
7398 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7399                                  struct ipr_mode_pages *mode_pages)
7400 {
7401         int i;
7402         int entry_length;
7403         struct ipr_dev_bus_entry *bus;
7404         struct ipr_mode_page28 *mode_page;
7405
7406         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7407                                       sizeof(struct ipr_mode_page28));
7408
7409         entry_length = mode_page->entry_length;
7410
7411         bus = mode_page->bus;
7412
7413         for (i = 0; i < mode_page->num_entries; i++) {
7414                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7415                         dev_err(&ioa_cfg->pdev->dev,
7416                                 "Term power is absent on scsi bus %d\n",
7417                                 bus->res_addr.bus);
7418                 }
7419
7420                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7421         }
7422 }
7423
7424 /**
7425  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7426  * @ioa_cfg:    ioa config struct
7427  *
7428  * Looks through the config table checking for SES devices. If
7429  * the SES device is in the SES table indicating a maximum SCSI
7430  * bus speed, the speed is limited for the bus.
7431  *
7432  * Return value:
7433  *      none
7434  **/
7435 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7436 {
7437         u32 max_xfer_rate;
7438         int i;
7439
7440         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7441                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7442                                                        ioa_cfg->bus_attr[i].bus_width);
7443
7444                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7445                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7446         }
7447 }
7448
7449 /**
7450  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7451  * @ioa_cfg:    ioa config struct
7452  * @mode_pages: mode page 28 buffer
7453  *
7454  * Updates mode page 28 based on driver configuration
7455  *
7456  * Return value:
7457  *      none
7458  **/
7459 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7460                                           struct ipr_mode_pages *mode_pages)
7461 {
7462         int i, entry_length;
7463         struct ipr_dev_bus_entry *bus;
7464         struct ipr_bus_attributes *bus_attr;
7465         struct ipr_mode_page28 *mode_page;
7466
7467         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7468                                       sizeof(struct ipr_mode_page28));
7469
7470         entry_length = mode_page->entry_length;
7471
7472         /* Loop for each device bus entry */
7473         for (i = 0, bus = mode_page->bus;
7474              i < mode_page->num_entries;
7475              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7476                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7477                         dev_err(&ioa_cfg->pdev->dev,
7478                                 "Invalid resource address reported: 0x%08X\n",
7479                                 IPR_GET_PHYS_LOC(bus->res_addr));
7480                         continue;
7481                 }
7482
7483                 bus_attr = &ioa_cfg->bus_attr[i];
7484                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7485                 bus->bus_width = bus_attr->bus_width;
7486                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7487                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7488                 if (bus_attr->qas_enabled)
7489                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7490                 else
7491                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7492         }
7493 }
7494
7495 /**
7496  * ipr_build_mode_select - Build a mode select command
7497  * @ipr_cmd:    ipr command struct
7498  * @res_handle: resource handle to send command to
7499  * @parm:               Byte 2 of Mode Sense command
7500  * @dma_addr:   DMA buffer address
7501  * @xfer_len:   data transfer length
7502  *
7503  * Return value:
7504  *      none
7505  **/
7506 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7507                                   __be32 res_handle, u8 parm,
7508                                   dma_addr_t dma_addr, u8 xfer_len)
7509 {
7510         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7511
7512         ioarcb->res_handle = res_handle;
7513         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7514         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7515         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7516         ioarcb->cmd_pkt.cdb[1] = parm;
7517         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7518
7519         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7520 }
7521
7522 /**
7523  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7524  * @ipr_cmd:    ipr command struct
7525  *
7526  * This function sets up the SCSI bus attributes and sends
7527  * a Mode Select for Page 28 to activate them.
7528  *
7529  * Return value:
7530  *      IPR_RC_JOB_RETURN
7531  **/
7532 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7533 {
7534         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7535         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7536         int length;
7537
7538         ENTER;
7539         ipr_scsi_bus_speed_limit(ioa_cfg);
7540         ipr_check_term_power(ioa_cfg, mode_pages);
7541         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7542         length = mode_pages->hdr.length + 1;
7543         mode_pages->hdr.length = 0;
7544
7545         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7546                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7547                               length);
7548
7549         ipr_cmd->job_step = ipr_set_supported_devs;
7550         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7551                                     struct ipr_resource_entry, queue);
7552         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7553
7554         LEAVE;
7555         return IPR_RC_JOB_RETURN;
7556 }
7557
7558 /**
7559  * ipr_build_mode_sense - Builds a mode sense command
7560  * @ipr_cmd:    ipr command struct
7561  * @res:                resource entry struct
7562  * @parm:               Byte 2 of mode sense command
7563  * @dma_addr:   DMA address of mode sense buffer
7564  * @xfer_len:   Size of DMA buffer
7565  *
7566  * Return value:
7567  *      none
7568  **/
7569 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7570                                  __be32 res_handle,
7571                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7572 {
7573         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7574
7575         ioarcb->res_handle = res_handle;
7576         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7577         ioarcb->cmd_pkt.cdb[2] = parm;
7578         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7579         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7580
7581         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7582 }
7583
7584 /**
7585  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7586  * @ipr_cmd:    ipr command struct
7587  *
7588  * This function handles the failure of an IOA bringup command.
7589  *
7590  * Return value:
7591  *      IPR_RC_JOB_RETURN
7592  **/
7593 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7594 {
7595         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7596         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7597
7598         dev_err(&ioa_cfg->pdev->dev,
7599                 "0x%02X failed with IOASC: 0x%08X\n",
7600                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7601
7602         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7603         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7604         return IPR_RC_JOB_RETURN;
7605 }
7606
7607 /**
7608  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7609  * @ipr_cmd:    ipr command struct
7610  *
7611  * This function handles the failure of a Mode Sense to the IOAFP.
7612  * Some adapters do not handle all mode pages.
7613  *
7614  * Return value:
7615  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7616  **/
7617 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7618 {
7619         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7620         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7621
7622         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7623                 ipr_cmd->job_step = ipr_set_supported_devs;
7624                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7625                                             struct ipr_resource_entry, queue);
7626                 return IPR_RC_JOB_CONTINUE;
7627         }
7628
7629         return ipr_reset_cmd_failed(ipr_cmd);
7630 }
7631
7632 /**
7633  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7634  * @ipr_cmd:    ipr command struct
7635  *
7636  * This function send a Page 28 mode sense to the IOA to
7637  * retrieve SCSI bus attributes.
7638  *
7639  * Return value:
7640  *      IPR_RC_JOB_RETURN
7641  **/
7642 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7643 {
7644         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7645
7646         ENTER;
7647         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7648                              0x28, ioa_cfg->vpd_cbs_dma +
7649                              offsetof(struct ipr_misc_cbs, mode_pages),
7650                              sizeof(struct ipr_mode_pages));
7651
7652         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7653         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7654
7655         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7656
7657         LEAVE;
7658         return IPR_RC_JOB_RETURN;
7659 }
7660
7661 /**
7662  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7663  * @ipr_cmd:    ipr command struct
7664  *
7665  * This function enables dual IOA RAID support if possible.
7666  *
7667  * Return value:
7668  *      IPR_RC_JOB_RETURN
7669  **/
7670 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7671 {
7672         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7673         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7674         struct ipr_mode_page24 *mode_page;
7675         int length;
7676
7677         ENTER;
7678         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7679                                       sizeof(struct ipr_mode_page24));
7680
7681         if (mode_page)
7682                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7683
7684         length = mode_pages->hdr.length + 1;
7685         mode_pages->hdr.length = 0;
7686
7687         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7688                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7689                               length);
7690
7691         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7692         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7693
7694         LEAVE;
7695         return IPR_RC_JOB_RETURN;
7696 }
7697
7698 /**
7699  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7700  * @ipr_cmd:    ipr command struct
7701  *
7702  * This function handles the failure of a Mode Sense to the IOAFP.
7703  * Some adapters do not handle all mode pages.
7704  *
7705  * Return value:
7706  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7707  **/
7708 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7709 {
7710         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7711
7712         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7713                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7714                 return IPR_RC_JOB_CONTINUE;
7715         }
7716
7717         return ipr_reset_cmd_failed(ipr_cmd);
7718 }
7719
7720 /**
7721  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7722  * @ipr_cmd:    ipr command struct
7723  *
7724  * This function send a mode sense to the IOA to retrieve
7725  * the IOA Advanced Function Control mode page.
7726  *
7727  * Return value:
7728  *      IPR_RC_JOB_RETURN
7729  **/
7730 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7731 {
7732         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7733
7734         ENTER;
7735         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7736                              0x24, ioa_cfg->vpd_cbs_dma +
7737                              offsetof(struct ipr_misc_cbs, mode_pages),
7738                              sizeof(struct ipr_mode_pages));
7739
7740         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7741         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7742
7743         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7744
7745         LEAVE;
7746         return IPR_RC_JOB_RETURN;
7747 }
7748
7749 /**
7750  * ipr_init_res_table - Initialize the resource table
7751  * @ipr_cmd:    ipr command struct
7752  *
7753  * This function looks through the existing resource table, comparing
7754  * it with the config table. This function will take care of old/new
7755  * devices and schedule adding/removing them from the mid-layer
7756  * as appropriate.
7757  *
7758  * Return value:
7759  *      IPR_RC_JOB_CONTINUE
7760  **/
7761 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7762 {
7763         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7764         struct ipr_resource_entry *res, *temp;
7765         struct ipr_config_table_entry_wrapper cfgtew;
7766         int entries, found, flag, i;
7767         LIST_HEAD(old_res);
7768
7769         ENTER;
7770         if (ioa_cfg->sis64)
7771                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7772         else
7773                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7774
7775         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7776                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7777
7778         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7779                 list_move_tail(&res->queue, &old_res);
7780
7781         if (ioa_cfg->sis64)
7782                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7783         else
7784                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7785
7786         for (i = 0; i < entries; i++) {
7787                 if (ioa_cfg->sis64)
7788                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7789                 else
7790                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7791                 found = 0;
7792
7793                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7794                         if (ipr_is_same_device(res, &cfgtew)) {
7795                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7796                                 found = 1;
7797                                 break;
7798                         }
7799                 }
7800
7801                 if (!found) {
7802                         if (list_empty(&ioa_cfg->free_res_q)) {
7803                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7804                                 break;
7805                         }
7806
7807                         found = 1;
7808                         res = list_entry(ioa_cfg->free_res_q.next,
7809                                          struct ipr_resource_entry, queue);
7810                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7811                         ipr_init_res_entry(res, &cfgtew);
7812                         res->add_to_ml = 1;
7813                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7814                         res->sdev->allow_restart = 1;
7815
7816                 if (found)
7817                         ipr_update_res_entry(res, &cfgtew);
7818         }
7819
7820         list_for_each_entry_safe(res, temp, &old_res, queue) {
7821                 if (res->sdev) {
7822                         res->del_from_ml = 1;
7823                         res->res_handle = IPR_INVALID_RES_HANDLE;
7824                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7825                 }
7826         }
7827
7828         list_for_each_entry_safe(res, temp, &old_res, queue) {
7829                 ipr_clear_res_target(res);
7830                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7831         }
7832
7833         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7834                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7835         else
7836                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7837
7838         LEAVE;
7839         return IPR_RC_JOB_CONTINUE;
7840 }
7841
7842 /**
7843  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7844  * @ipr_cmd:    ipr command struct
7845  *
7846  * This function sends a Query IOA Configuration command
7847  * to the adapter to retrieve the IOA configuration table.
7848  *
7849  * Return value:
7850  *      IPR_RC_JOB_RETURN
7851  **/
7852 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7853 {
7854         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7855         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7856         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7857         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7858
7859         ENTER;
7860         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7861                 ioa_cfg->dual_raid = 1;
7862         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7863                  ucode_vpd->major_release, ucode_vpd->card_type,
7864                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7865         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7866         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7867
7868         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7869         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7870         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7871         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7872
7873         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7874                        IPR_IOADL_FLAGS_READ_LAST);
7875
7876         ipr_cmd->job_step = ipr_init_res_table;
7877
7878         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7879
7880         LEAVE;
7881         return IPR_RC_JOB_RETURN;
7882 }
7883
7884 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7885 {
7886         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7887
7888         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7889                 return IPR_RC_JOB_CONTINUE;
7890
7891         return ipr_reset_cmd_failed(ipr_cmd);
7892 }
7893
7894 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7895                                          __be32 res_handle, u8 sa_code)
7896 {
7897         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7898
7899         ioarcb->res_handle = res_handle;
7900         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7901         ioarcb->cmd_pkt.cdb[1] = sa_code;
7902         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7903 }
7904
7905 /**
7906  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7907  * action
7908  *
7909  * Return value:
7910  *      none
7911  **/
7912 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7913 {
7914         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7915         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7916         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7917
7918         ENTER;
7919
7920         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7921
7922         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7923                 ipr_build_ioa_service_action(ipr_cmd,
7924                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7925                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7926
7927                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7928
7929                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7930                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7931                            IPR_SET_SUP_DEVICE_TIMEOUT);
7932
7933                 LEAVE;
7934                 return IPR_RC_JOB_RETURN;
7935         }
7936
7937         LEAVE;
7938         return IPR_RC_JOB_CONTINUE;
7939 }
7940
7941 /**
7942  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7943  * @ipr_cmd:    ipr command struct
7944  *
7945  * This utility function sends an inquiry to the adapter.
7946  *
7947  * Return value:
7948  *      none
7949  **/
7950 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7951                               dma_addr_t dma_addr, u8 xfer_len)
7952 {
7953         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7954
7955         ENTER;
7956         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7957         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7958
7959         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7960         ioarcb->cmd_pkt.cdb[1] = flags;
7961         ioarcb->cmd_pkt.cdb[2] = page;
7962         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7963
7964         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7965
7966         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7967         LEAVE;
7968 }
7969
7970 /**
7971  * ipr_inquiry_page_supported - Is the given inquiry page supported
7972  * @page0:              inquiry page 0 buffer
7973  * @page:               page code.
7974  *
7975  * This function determines if the specified inquiry page is supported.
7976  *
7977  * Return value:
7978  *      1 if page is supported / 0 if not
7979  **/
7980 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7981 {
7982         int i;
7983
7984         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7985                 if (page0->page[i] == page)
7986                         return 1;
7987
7988         return 0;
7989 }
7990
7991 /**
7992  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7993  * @ipr_cmd:    ipr command struct
7994  *
7995  * This function sends a Page 0xC4 inquiry to the adapter
7996  * to retrieve software VPD information.
7997  *
7998  * Return value:
7999  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8000  **/
8001 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8002 {
8003         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8004         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8005         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8006
8007         ENTER;
8008         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8009         memset(pageC4, 0, sizeof(*pageC4));
8010
8011         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8012                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8013                                   (ioa_cfg->vpd_cbs_dma
8014                                    + offsetof(struct ipr_misc_cbs,
8015                                               pageC4_data)),
8016                                   sizeof(struct ipr_inquiry_pageC4));
8017                 return IPR_RC_JOB_RETURN;
8018         }
8019
8020         LEAVE;
8021         return IPR_RC_JOB_CONTINUE;
8022 }
8023
8024 /**
8025  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8026  * @ipr_cmd:    ipr command struct
8027  *
8028  * This function sends a Page 0xD0 inquiry to the adapter
8029  * to retrieve adapter capabilities.
8030  *
8031  * Return value:
8032  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8033  **/
8034 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8035 {
8036         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8037         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8038         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8039
8040         ENTER;
8041         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8042         memset(cap, 0, sizeof(*cap));
8043
8044         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8045                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8046                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8047                                   sizeof(struct ipr_inquiry_cap));
8048                 return IPR_RC_JOB_RETURN;
8049         }
8050
8051         LEAVE;
8052         return IPR_RC_JOB_CONTINUE;
8053 }
8054
8055 /**
8056  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8057  * @ipr_cmd:    ipr command struct
8058  *
8059  * This function sends a Page 3 inquiry to the adapter
8060  * to retrieve software VPD information.
8061  *
8062  * Return value:
8063  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8064  **/
8065 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8066 {
8067         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8068
8069         ENTER;
8070
8071         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8072
8073         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8074                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8075                           sizeof(struct ipr_inquiry_page3));
8076
8077         LEAVE;
8078         return IPR_RC_JOB_RETURN;
8079 }
8080
8081 /**
8082  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8083  * @ipr_cmd:    ipr command struct
8084  *
8085  * This function sends a Page 0 inquiry to the adapter
8086  * to retrieve supported inquiry pages.
8087  *
8088  * Return value:
8089  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8090  **/
8091 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8092 {
8093         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8094         char type[5];
8095
8096         ENTER;
8097
8098         /* Grab the type out of the VPD and store it away */
8099         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8100         type[4] = '\0';
8101         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8102
8103         if (ipr_invalid_adapter(ioa_cfg)) {
8104                 dev_err(&ioa_cfg->pdev->dev,
8105                         "Adapter not supported in this hardware configuration.\n");
8106
8107                 if (!ipr_testmode) {
8108                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8109                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8110                         list_add_tail(&ipr_cmd->queue,
8111                                         &ioa_cfg->hrrq->hrrq_free_q);
8112                         return IPR_RC_JOB_RETURN;
8113                 }
8114         }
8115
8116         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8117
8118         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8119                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8120                           sizeof(struct ipr_inquiry_page0));
8121
8122         LEAVE;
8123         return IPR_RC_JOB_RETURN;
8124 }
8125
8126 /**
8127  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8128  * @ipr_cmd:    ipr command struct
8129  *
8130  * This function sends a standard inquiry to the adapter.
8131  *
8132  * Return value:
8133  *      IPR_RC_JOB_RETURN
8134  **/
8135 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8136 {
8137         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8138
8139         ENTER;
8140         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8141
8142         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8143                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8144                           sizeof(struct ipr_ioa_vpd));
8145
8146         LEAVE;
8147         return IPR_RC_JOB_RETURN;
8148 }
8149
8150 /**
8151  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8152  * @ipr_cmd:    ipr command struct
8153  *
8154  * This function send an Identify Host Request Response Queue
8155  * command to establish the HRRQ with the adapter.
8156  *
8157  * Return value:
8158  *      IPR_RC_JOB_RETURN
8159  **/
8160 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8161 {
8162         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8163         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8164         struct ipr_hrr_queue *hrrq;
8165
8166         ENTER;
8167         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8168         if (ioa_cfg->identify_hrrq_index == 0)
8169                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8170
8171         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8172                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8173
8174                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8175                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8176
8177                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8178                 if (ioa_cfg->sis64)
8179                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8180
8181                 if (ioa_cfg->nvectors == 1)
8182                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8183                 else
8184                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8185
8186                 ioarcb->cmd_pkt.cdb[2] =
8187                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8188                 ioarcb->cmd_pkt.cdb[3] =
8189                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8190                 ioarcb->cmd_pkt.cdb[4] =
8191                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8192                 ioarcb->cmd_pkt.cdb[5] =
8193                         ((u64) hrrq->host_rrq_dma) & 0xff;
8194                 ioarcb->cmd_pkt.cdb[7] =
8195                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8196                 ioarcb->cmd_pkt.cdb[8] =
8197                         (sizeof(u32) * hrrq->size) & 0xff;
8198
8199                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8200                         ioarcb->cmd_pkt.cdb[9] =
8201                                         ioa_cfg->identify_hrrq_index;
8202
8203                 if (ioa_cfg->sis64) {
8204                         ioarcb->cmd_pkt.cdb[10] =
8205                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8206                         ioarcb->cmd_pkt.cdb[11] =
8207                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8208                         ioarcb->cmd_pkt.cdb[12] =
8209                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8210                         ioarcb->cmd_pkt.cdb[13] =
8211                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8212                 }
8213
8214                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8215                         ioarcb->cmd_pkt.cdb[14] =
8216                                         ioa_cfg->identify_hrrq_index;
8217
8218                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8219                            IPR_INTERNAL_TIMEOUT);
8220
8221                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8222                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8223
8224                 LEAVE;
8225                 return IPR_RC_JOB_RETURN;
8226         }
8227
8228         LEAVE;
8229         return IPR_RC_JOB_CONTINUE;
8230 }
8231
8232 /**
8233  * ipr_reset_timer_done - Adapter reset timer function
8234  * @ipr_cmd:    ipr command struct
8235  *
8236  * Description: This function is used in adapter reset processing
8237  * for timing events. If the reset_cmd pointer in the IOA
8238  * config struct is not this adapter's we are doing nested
8239  * resets and fail_all_ops will take care of freeing the
8240  * command block.
8241  *
8242  * Return value:
8243  *      none
8244  **/
8245 static void ipr_reset_timer_done(struct timer_list *t)
8246 {
8247         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8248         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8249         unsigned long lock_flags = 0;
8250
8251         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8252
8253         if (ioa_cfg->reset_cmd == ipr_cmd) {
8254                 list_del(&ipr_cmd->queue);
8255                 ipr_cmd->done(ipr_cmd);
8256         }
8257
8258         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8259 }
8260
8261 /**
8262  * ipr_reset_start_timer - Start a timer for adapter reset job
8263  * @ipr_cmd:    ipr command struct
8264  * @timeout:    timeout value
8265  *
8266  * Description: This function is used in adapter reset processing
8267  * for timing events. If the reset_cmd pointer in the IOA
8268  * config struct is not this adapter's we are doing nested
8269  * resets and fail_all_ops will take care of freeing the
8270  * command block.
8271  *
8272  * Return value:
8273  *      none
8274  **/
8275 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8276                                   unsigned long timeout)
8277 {
8278
8279         ENTER;
8280         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8281         ipr_cmd->done = ipr_reset_ioa_job;
8282
8283         ipr_cmd->timer.expires = jiffies + timeout;
8284         ipr_cmd->timer.function = ipr_reset_timer_done;
8285         add_timer(&ipr_cmd->timer);
8286 }
8287
8288 /**
8289  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8290  * @ioa_cfg:    ioa cfg struct
8291  *
8292  * Return value:
8293  *      nothing
8294  **/
8295 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8296 {
8297         struct ipr_hrr_queue *hrrq;
8298
8299         for_each_hrrq(hrrq, ioa_cfg) {
8300                 spin_lock(&hrrq->_lock);
8301                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8302
8303                 /* Initialize Host RRQ pointers */
8304                 hrrq->hrrq_start = hrrq->host_rrq;
8305                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8306                 hrrq->hrrq_curr = hrrq->hrrq_start;
8307                 hrrq->toggle_bit = 1;
8308                 spin_unlock(&hrrq->_lock);
8309         }
8310         wmb();
8311
8312         ioa_cfg->identify_hrrq_index = 0;
8313         if (ioa_cfg->hrrq_num == 1)
8314                 atomic_set(&ioa_cfg->hrrq_index, 0);
8315         else
8316                 atomic_set(&ioa_cfg->hrrq_index, 1);
8317
8318         /* Zero out config table */
8319         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8320 }
8321
8322 /**
8323  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8324  * @ipr_cmd:    ipr command struct
8325  *
8326  * Return value:
8327  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8328  **/
8329 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8330 {
8331         unsigned long stage, stage_time;
8332         u32 feedback;
8333         volatile u32 int_reg;
8334         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8335         u64 maskval = 0;
8336
8337         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8338         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8339         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8340
8341         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8342
8343         /* sanity check the stage_time value */
8344         if (stage_time == 0)
8345                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8346         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8347                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8348         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8349                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8350
8351         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8352                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8353                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8354                 stage_time = ioa_cfg->transop_timeout;
8355                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8356         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8357                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8358                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8359                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8360                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8361                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8362                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8363                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8364                         return IPR_RC_JOB_CONTINUE;
8365                 }
8366         }
8367
8368         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8369         ipr_cmd->timer.function = ipr_oper_timeout;
8370         ipr_cmd->done = ipr_reset_ioa_job;
8371         add_timer(&ipr_cmd->timer);
8372
8373         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8374
8375         return IPR_RC_JOB_RETURN;
8376 }
8377
8378 /**
8379  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8380  * @ipr_cmd:    ipr command struct
8381  *
8382  * This function reinitializes some control blocks and
8383  * enables destructive diagnostics on the adapter.
8384  *
8385  * Return value:
8386  *      IPR_RC_JOB_RETURN
8387  **/
8388 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8389 {
8390         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8391         volatile u32 int_reg;
8392         volatile u64 maskval;
8393         int i;
8394
8395         ENTER;
8396         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8397         ipr_init_ioa_mem(ioa_cfg);
8398
8399         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8400                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8401                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8402                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8403         }
8404         wmb();
8405         if (ioa_cfg->sis64) {
8406                 /* Set the adapter to the correct endian mode. */
8407                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8408                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8409         }
8410
8411         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8412
8413         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8414                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8415                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8416                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8417                 return IPR_RC_JOB_CONTINUE;
8418         }
8419
8420         /* Enable destructive diagnostics on IOA */
8421         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8422
8423         if (ioa_cfg->sis64) {
8424                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8425                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8426                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8427         } else
8428                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8429
8430         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8431
8432         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8433
8434         if (ioa_cfg->sis64) {
8435                 ipr_cmd->job_step = ipr_reset_next_stage;
8436                 return IPR_RC_JOB_CONTINUE;
8437         }
8438
8439         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8440         ipr_cmd->timer.function = ipr_oper_timeout;
8441         ipr_cmd->done = ipr_reset_ioa_job;
8442         add_timer(&ipr_cmd->timer);
8443         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8444
8445         LEAVE;
8446         return IPR_RC_JOB_RETURN;
8447 }
8448
8449 /**
8450  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8451  * @ipr_cmd:    ipr command struct
8452  *
8453  * This function is invoked when an adapter dump has run out
8454  * of processing time.
8455  *
8456  * Return value:
8457  *      IPR_RC_JOB_CONTINUE
8458  **/
8459 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8460 {
8461         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8462
8463         if (ioa_cfg->sdt_state == GET_DUMP)
8464                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8465         else if (ioa_cfg->sdt_state == READ_DUMP)
8466                 ioa_cfg->sdt_state = ABORT_DUMP;
8467
8468         ioa_cfg->dump_timeout = 1;
8469         ipr_cmd->job_step = ipr_reset_alert;
8470
8471         return IPR_RC_JOB_CONTINUE;
8472 }
8473
8474 /**
8475  * ipr_unit_check_no_data - Log a unit check/no data error log
8476  * @ioa_cfg:            ioa config struct
8477  *
8478  * Logs an error indicating the adapter unit checked, but for some
8479  * reason, we were unable to fetch the unit check buffer.
8480  *
8481  * Return value:
8482  *      nothing
8483  **/
8484 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8485 {
8486         ioa_cfg->errors_logged++;
8487         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8488 }
8489
8490 /**
8491  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8492  * @ioa_cfg:            ioa config struct
8493  *
8494  * Fetches the unit check buffer from the adapter by clocking the data
8495  * through the mailbox register.
8496  *
8497  * Return value:
8498  *      nothing
8499  **/
8500 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8501 {
8502         unsigned long mailbox;
8503         struct ipr_hostrcb *hostrcb;
8504         struct ipr_uc_sdt sdt;
8505         int rc, length;
8506         u32 ioasc;
8507
8508         mailbox = readl(ioa_cfg->ioa_mailbox);
8509
8510         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8511                 ipr_unit_check_no_data(ioa_cfg);
8512                 return;
8513         }
8514
8515         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8516         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8517                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8518
8519         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8520             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8521             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8522                 ipr_unit_check_no_data(ioa_cfg);
8523                 return;
8524         }
8525
8526         /* Find length of the first sdt entry (UC buffer) */
8527         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8528                 length = be32_to_cpu(sdt.entry[0].end_token);
8529         else
8530                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8531                           be32_to_cpu(sdt.entry[0].start_token)) &
8532                           IPR_FMT2_MBX_ADDR_MASK;
8533
8534         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8535                              struct ipr_hostrcb, queue);
8536         list_del_init(&hostrcb->queue);
8537         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8538
8539         rc = ipr_get_ldump_data_section(ioa_cfg,
8540                                         be32_to_cpu(sdt.entry[0].start_token),
8541                                         (__be32 *)&hostrcb->hcam,
8542                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8543
8544         if (!rc) {
8545                 ipr_handle_log_data(ioa_cfg, hostrcb);
8546                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8547                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8548                     ioa_cfg->sdt_state == GET_DUMP)
8549                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8550         } else
8551                 ipr_unit_check_no_data(ioa_cfg);
8552
8553         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8554 }
8555
8556 /**
8557  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8558  * @ipr_cmd:    ipr command struct
8559  *
8560  * Description: This function will call to get the unit check buffer.
8561  *
8562  * Return value:
8563  *      IPR_RC_JOB_RETURN
8564  **/
8565 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8566 {
8567         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8568
8569         ENTER;
8570         ioa_cfg->ioa_unit_checked = 0;
8571         ipr_get_unit_check_buffer(ioa_cfg);
8572         ipr_cmd->job_step = ipr_reset_alert;
8573         ipr_reset_start_timer(ipr_cmd, 0);
8574
8575         LEAVE;
8576         return IPR_RC_JOB_RETURN;
8577 }
8578
8579 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8580 {
8581         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8582
8583         ENTER;
8584
8585         if (ioa_cfg->sdt_state != GET_DUMP)
8586                 return IPR_RC_JOB_RETURN;
8587
8588         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8589             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8590              IPR_PCII_MAILBOX_STABLE)) {
8591
8592                 if (!ipr_cmd->u.time_left)
8593                         dev_err(&ioa_cfg->pdev->dev,
8594                                 "Timed out waiting for Mailbox register.\n");
8595
8596                 ioa_cfg->sdt_state = READ_DUMP;
8597                 ioa_cfg->dump_timeout = 0;
8598                 if (ioa_cfg->sis64)
8599                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8600                 else
8601                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8602                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8603                 schedule_work(&ioa_cfg->work_q);
8604
8605         } else {
8606                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8607                 ipr_reset_start_timer(ipr_cmd,
8608                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8609         }
8610
8611         LEAVE;
8612         return IPR_RC_JOB_RETURN;
8613 }
8614
8615 /**
8616  * ipr_reset_restore_cfg_space - Restore PCI config space.
8617  * @ipr_cmd:    ipr command struct
8618  *
8619  * Description: This function restores the saved PCI config space of
8620  * the adapter, fails all outstanding ops back to the callers, and
8621  * fetches the dump/unit check if applicable to this reset.
8622  *
8623  * Return value:
8624  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8625  **/
8626 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8627 {
8628         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8629         u32 int_reg;
8630
8631         ENTER;
8632         ioa_cfg->pdev->state_saved = true;
8633         pci_restore_state(ioa_cfg->pdev);
8634
8635         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8636                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8637                 return IPR_RC_JOB_CONTINUE;
8638         }
8639
8640         ipr_fail_all_ops(ioa_cfg);
8641
8642         if (ioa_cfg->sis64) {
8643                 /* Set the adapter to the correct endian mode. */
8644                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8645                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8646         }
8647
8648         if (ioa_cfg->ioa_unit_checked) {
8649                 if (ioa_cfg->sis64) {
8650                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8651                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8652                         return IPR_RC_JOB_RETURN;
8653                 } else {
8654                         ioa_cfg->ioa_unit_checked = 0;
8655                         ipr_get_unit_check_buffer(ioa_cfg);
8656                         ipr_cmd->job_step = ipr_reset_alert;
8657                         ipr_reset_start_timer(ipr_cmd, 0);
8658                         return IPR_RC_JOB_RETURN;
8659                 }
8660         }
8661
8662         if (ioa_cfg->in_ioa_bringdown) {
8663                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8664         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8665                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8666                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8667         } else {
8668                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8669         }
8670
8671         LEAVE;
8672         return IPR_RC_JOB_CONTINUE;
8673 }
8674
8675 /**
8676  * ipr_reset_bist_done - BIST has completed on the adapter.
8677  * @ipr_cmd:    ipr command struct
8678  *
8679  * Description: Unblock config space and resume the reset process.
8680  *
8681  * Return value:
8682  *      IPR_RC_JOB_CONTINUE
8683  **/
8684 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8685 {
8686         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8687
8688         ENTER;
8689         if (ioa_cfg->cfg_locked)
8690                 pci_cfg_access_unlock(ioa_cfg->pdev);
8691         ioa_cfg->cfg_locked = 0;
8692         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8693         LEAVE;
8694         return IPR_RC_JOB_CONTINUE;
8695 }
8696
8697 /**
8698  * ipr_reset_start_bist - Run BIST on the adapter.
8699  * @ipr_cmd:    ipr command struct
8700  *
8701  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8702  *
8703  * Return value:
8704  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8705  **/
8706 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8707 {
8708         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8709         int rc = PCIBIOS_SUCCESSFUL;
8710
8711         ENTER;
8712         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8713                 writel(IPR_UPROCI_SIS64_START_BIST,
8714                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8715         else
8716                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8717
8718         if (rc == PCIBIOS_SUCCESSFUL) {
8719                 ipr_cmd->job_step = ipr_reset_bist_done;
8720                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8721                 rc = IPR_RC_JOB_RETURN;
8722         } else {
8723                 if (ioa_cfg->cfg_locked)
8724                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8725                 ioa_cfg->cfg_locked = 0;
8726                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8727                 rc = IPR_RC_JOB_CONTINUE;
8728         }
8729
8730         LEAVE;
8731         return rc;
8732 }
8733
8734 /**
8735  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8736  * @ipr_cmd:    ipr command struct
8737  *
8738  * Description: This clears PCI reset to the adapter and delays two seconds.
8739  *
8740  * Return value:
8741  *      IPR_RC_JOB_RETURN
8742  **/
8743 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8744 {
8745         ENTER;
8746         ipr_cmd->job_step = ipr_reset_bist_done;
8747         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8748         LEAVE;
8749         return IPR_RC_JOB_RETURN;
8750 }
8751
8752 /**
8753  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8754  * @work:       work struct
8755  *
8756  * Description: This pulses warm reset to a slot.
8757  *
8758  **/
8759 static void ipr_reset_reset_work(struct work_struct *work)
8760 {
8761         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8762         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8763         struct pci_dev *pdev = ioa_cfg->pdev;
8764         unsigned long lock_flags = 0;
8765
8766         ENTER;
8767         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8768         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8769         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8770
8771         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8772         if (ioa_cfg->reset_cmd == ipr_cmd)
8773                 ipr_reset_ioa_job(ipr_cmd);
8774         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8775         LEAVE;
8776 }
8777
8778 /**
8779  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8780  * @ipr_cmd:    ipr command struct
8781  *
8782  * Description: This asserts PCI reset to the adapter.
8783  *
8784  * Return value:
8785  *      IPR_RC_JOB_RETURN
8786  **/
8787 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8788 {
8789         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8790
8791         ENTER;
8792         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8793         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8794         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8795         LEAVE;
8796         return IPR_RC_JOB_RETURN;
8797 }
8798
8799 /**
8800  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8801  * @ipr_cmd:    ipr command struct
8802  *
8803  * Description: This attempts to block config access to the IOA.
8804  *
8805  * Return value:
8806  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8807  **/
8808 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8809 {
8810         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8811         int rc = IPR_RC_JOB_CONTINUE;
8812
8813         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8814                 ioa_cfg->cfg_locked = 1;
8815                 ipr_cmd->job_step = ioa_cfg->reset;
8816         } else {
8817                 if (ipr_cmd->u.time_left) {
8818                         rc = IPR_RC_JOB_RETURN;
8819                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8820                         ipr_reset_start_timer(ipr_cmd,
8821                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8822                 } else {
8823                         ipr_cmd->job_step = ioa_cfg->reset;
8824                         dev_err(&ioa_cfg->pdev->dev,
8825                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8826                 }
8827         }
8828
8829         return rc;
8830 }
8831
8832 /**
8833  * ipr_reset_block_config_access - Block config access to the IOA
8834  * @ipr_cmd:    ipr command struct
8835  *
8836  * Description: This attempts to block config access to the IOA
8837  *
8838  * Return value:
8839  *      IPR_RC_JOB_CONTINUE
8840  **/
8841 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8842 {
8843         ipr_cmd->ioa_cfg->cfg_locked = 0;
8844         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8845         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8846         return IPR_RC_JOB_CONTINUE;
8847 }
8848
8849 /**
8850  * ipr_reset_allowed - Query whether or not IOA can be reset
8851  * @ioa_cfg:    ioa config struct
8852  *
8853  * Return value:
8854  *      0 if reset not allowed / non-zero if reset is allowed
8855  **/
8856 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8857 {
8858         volatile u32 temp_reg;
8859
8860         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8861         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8862 }
8863
8864 /**
8865  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8866  * @ipr_cmd:    ipr command struct
8867  *
8868  * Description: This function waits for adapter permission to run BIST,
8869  * then runs BIST. If the adapter does not give permission after a
8870  * reasonable time, we will reset the adapter anyway. The impact of
8871  * resetting the adapter without warning the adapter is the risk of
8872  * losing the persistent error log on the adapter. If the adapter is
8873  * reset while it is writing to the flash on the adapter, the flash
8874  * segment will have bad ECC and be zeroed.
8875  *
8876  * Return value:
8877  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8878  **/
8879 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8880 {
8881         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8882         int rc = IPR_RC_JOB_RETURN;
8883
8884         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8885                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8886                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8887         } else {
8888                 ipr_cmd->job_step = ipr_reset_block_config_access;
8889                 rc = IPR_RC_JOB_CONTINUE;
8890         }
8891
8892         return rc;
8893 }
8894
8895 /**
8896  * ipr_reset_alert - Alert the adapter of a pending reset
8897  * @ipr_cmd:    ipr command struct
8898  *
8899  * Description: This function alerts the adapter that it will be reset.
8900  * If memory space is not currently enabled, proceed directly
8901  * to running BIST on the adapter. The timer must always be started
8902  * so we guarantee we do not run BIST from ipr_isr.
8903  *
8904  * Return value:
8905  *      IPR_RC_JOB_RETURN
8906  **/
8907 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8908 {
8909         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8910         u16 cmd_reg;
8911         int rc;
8912
8913         ENTER;
8914         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8915
8916         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8917                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8918                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8919                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8920         } else {
8921                 ipr_cmd->job_step = ipr_reset_block_config_access;
8922         }
8923
8924         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8925         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8926
8927         LEAVE;
8928         return IPR_RC_JOB_RETURN;
8929 }
8930
8931 /**
8932  * ipr_reset_quiesce_done - Complete IOA disconnect
8933  * @ipr_cmd:    ipr command struct
8934  *
8935  * Description: Freeze the adapter to complete quiesce processing
8936  *
8937  * Return value:
8938  *      IPR_RC_JOB_CONTINUE
8939  **/
8940 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8941 {
8942         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8943
8944         ENTER;
8945         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8946         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8947         LEAVE;
8948         return IPR_RC_JOB_CONTINUE;
8949 }
8950
8951 /**
8952  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8953  * @ipr_cmd:    ipr command struct
8954  *
8955  * Description: Ensure nothing is outstanding to the IOA and
8956  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8957  *
8958  * Return value:
8959  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8960  **/
8961 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8962 {
8963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8964         struct ipr_cmnd *loop_cmd;
8965         struct ipr_hrr_queue *hrrq;
8966         int rc = IPR_RC_JOB_CONTINUE;
8967         int count = 0;
8968
8969         ENTER;
8970         ipr_cmd->job_step = ipr_reset_quiesce_done;
8971
8972         for_each_hrrq(hrrq, ioa_cfg) {
8973                 spin_lock(&hrrq->_lock);
8974                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8975                         count++;
8976                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8977                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8978                         rc = IPR_RC_JOB_RETURN;
8979                         break;
8980                 }
8981                 spin_unlock(&hrrq->_lock);
8982
8983                 if (count)
8984                         break;
8985         }
8986
8987         LEAVE;
8988         return rc;
8989 }
8990
8991 /**
8992  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8993  * @ipr_cmd:    ipr command struct
8994  *
8995  * Description: Cancel any oustanding HCAMs to the IOA.
8996  *
8997  * Return value:
8998  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8999  **/
9000 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9001 {
9002         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9003         int rc = IPR_RC_JOB_CONTINUE;
9004         struct ipr_cmd_pkt *cmd_pkt;
9005         struct ipr_cmnd *hcam_cmd;
9006         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9007
9008         ENTER;
9009         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9010
9011         if (!hrrq->ioa_is_dead) {
9012                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9013                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9014                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9015                                         continue;
9016
9017                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9018                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9019                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9020                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9021                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9022                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9023                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9024                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9025                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9026                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9027                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9028                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9029                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9030                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9031
9032                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9033                                            IPR_CANCEL_TIMEOUT);
9034
9035                                 rc = IPR_RC_JOB_RETURN;
9036                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9037                                 break;
9038                         }
9039                 }
9040         } else
9041                 ipr_cmd->job_step = ipr_reset_alert;
9042
9043         LEAVE;
9044         return rc;
9045 }
9046
9047 /**
9048  * ipr_reset_ucode_download_done - Microcode download completion
9049  * @ipr_cmd:    ipr command struct
9050  *
9051  * Description: This function unmaps the microcode download buffer.
9052  *
9053  * Return value:
9054  *      IPR_RC_JOB_CONTINUE
9055  **/
9056 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9057 {
9058         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9059         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9060
9061         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9062                      sglist->num_sg, DMA_TO_DEVICE);
9063
9064         ipr_cmd->job_step = ipr_reset_alert;
9065         return IPR_RC_JOB_CONTINUE;
9066 }
9067
9068 /**
9069  * ipr_reset_ucode_download - Download microcode to the adapter
9070  * @ipr_cmd:    ipr command struct
9071  *
9072  * Description: This function checks to see if it there is microcode
9073  * to download to the adapter. If there is, a download is performed.
9074  *
9075  * Return value:
9076  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9077  **/
9078 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9079 {
9080         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9081         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9082
9083         ENTER;
9084         ipr_cmd->job_step = ipr_reset_alert;
9085
9086         if (!sglist)
9087                 return IPR_RC_JOB_CONTINUE;
9088
9089         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9090         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9091         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9092         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9093         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9094         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9095         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9096
9097         if (ioa_cfg->sis64)
9098                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9099         else
9100                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9101         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9102
9103         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9104                    IPR_WRITE_BUFFER_TIMEOUT);
9105
9106         LEAVE;
9107         return IPR_RC_JOB_RETURN;
9108 }
9109
9110 /**
9111  * ipr_reset_shutdown_ioa - Shutdown the adapter
9112  * @ipr_cmd:    ipr command struct
9113  *
9114  * Description: This function issues an adapter shutdown of the
9115  * specified type to the specified adapter as part of the
9116  * adapter reset job.
9117  *
9118  * Return value:
9119  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9120  **/
9121 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9122 {
9123         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9124         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9125         unsigned long timeout;
9126         int rc = IPR_RC_JOB_CONTINUE;
9127
9128         ENTER;
9129         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9130                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9131         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9132                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9133                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9134                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9135                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9136                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9137
9138                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9139                         timeout = IPR_SHUTDOWN_TIMEOUT;
9140                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9141                         timeout = IPR_INTERNAL_TIMEOUT;
9142                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9143                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9144                 else
9145                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9146
9147                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9148
9149                 rc = IPR_RC_JOB_RETURN;
9150                 ipr_cmd->job_step = ipr_reset_ucode_download;
9151         } else
9152                 ipr_cmd->job_step = ipr_reset_alert;
9153
9154         LEAVE;
9155         return rc;
9156 }
9157
9158 /**
9159  * ipr_reset_ioa_job - Adapter reset job
9160  * @ipr_cmd:    ipr command struct
9161  *
9162  * Description: This function is the job router for the adapter reset job.
9163  *
9164  * Return value:
9165  *      none
9166  **/
9167 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9168 {
9169         u32 rc, ioasc;
9170         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9171
9172         do {
9173                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9174
9175                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9176                         /*
9177                          * We are doing nested adapter resets and this is
9178                          * not the current reset job.
9179                          */
9180                         list_add_tail(&ipr_cmd->queue,
9181                                         &ipr_cmd->hrrq->hrrq_free_q);
9182                         return;
9183                 }
9184
9185                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9186                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9187                         if (rc == IPR_RC_JOB_RETURN)
9188                                 return;
9189                 }
9190
9191                 ipr_reinit_ipr_cmnd(ipr_cmd);
9192                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9193                 rc = ipr_cmd->job_step(ipr_cmd);
9194         } while (rc == IPR_RC_JOB_CONTINUE);
9195 }
9196
9197 /**
9198  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9199  * @ioa_cfg:            ioa config struct
9200  * @job_step:           first job step of reset job
9201  * @shutdown_type:      shutdown type
9202  *
9203  * Description: This function will initiate the reset of the given adapter
9204  * starting at the selected job step.
9205  * If the caller needs to wait on the completion of the reset,
9206  * the caller must sleep on the reset_wait_q.
9207  *
9208  * Return value:
9209  *      none
9210  **/
9211 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9212                                     int (*job_step) (struct ipr_cmnd *),
9213                                     enum ipr_shutdown_type shutdown_type)
9214 {
9215         struct ipr_cmnd *ipr_cmd;
9216         int i;
9217
9218         ioa_cfg->in_reset_reload = 1;
9219         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9220                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9221                 ioa_cfg->hrrq[i].allow_cmds = 0;
9222                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9223         }
9224         wmb();
9225         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9226                 ioa_cfg->scsi_unblock = 0;
9227                 ioa_cfg->scsi_blocked = 1;
9228                 scsi_block_requests(ioa_cfg->host);
9229         }
9230
9231         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9232         ioa_cfg->reset_cmd = ipr_cmd;
9233         ipr_cmd->job_step = job_step;
9234         ipr_cmd->u.shutdown_type = shutdown_type;
9235
9236         ipr_reset_ioa_job(ipr_cmd);
9237 }
9238
9239 /**
9240  * ipr_initiate_ioa_reset - Initiate an adapter reset
9241  * @ioa_cfg:            ioa config struct
9242  * @shutdown_type:      shutdown type
9243  *
9244  * Description: This function will initiate the reset of the given adapter.
9245  * If the caller needs to wait on the completion of the reset,
9246  * the caller must sleep on the reset_wait_q.
9247  *
9248  * Return value:
9249  *      none
9250  **/
9251 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9252                                    enum ipr_shutdown_type shutdown_type)
9253 {
9254         int i;
9255
9256         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9257                 return;
9258
9259         if (ioa_cfg->in_reset_reload) {
9260                 if (ioa_cfg->sdt_state == GET_DUMP)
9261                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9262                 else if (ioa_cfg->sdt_state == READ_DUMP)
9263                         ioa_cfg->sdt_state = ABORT_DUMP;
9264         }
9265
9266         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9267                 dev_err(&ioa_cfg->pdev->dev,
9268                         "IOA taken offline - error recovery failed\n");
9269
9270                 ioa_cfg->reset_retries = 0;
9271                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9272                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9273                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9274                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9275                 }
9276                 wmb();
9277
9278                 if (ioa_cfg->in_ioa_bringdown) {
9279                         ioa_cfg->reset_cmd = NULL;
9280                         ioa_cfg->in_reset_reload = 0;
9281                         ipr_fail_all_ops(ioa_cfg);
9282                         wake_up_all(&ioa_cfg->reset_wait_q);
9283
9284                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9285                                 ioa_cfg->scsi_unblock = 1;
9286                                 schedule_work(&ioa_cfg->work_q);
9287                         }
9288                         return;
9289                 } else {
9290                         ioa_cfg->in_ioa_bringdown = 1;
9291                         shutdown_type = IPR_SHUTDOWN_NONE;
9292                 }
9293         }
9294
9295         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9296                                 shutdown_type);
9297 }
9298
9299 /**
9300  * ipr_reset_freeze - Hold off all I/O activity
9301  * @ipr_cmd:    ipr command struct
9302  *
9303  * Description: If the PCI slot is frozen, hold off all I/O
9304  * activity; then, as soon as the slot is available again,
9305  * initiate an adapter reset.
9306  */
9307 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9308 {
9309         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9310         int i;
9311
9312         /* Disallow new interrupts, avoid loop */
9313         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9314                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9315                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9316                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9317         }
9318         wmb();
9319         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9320         ipr_cmd->done = ipr_reset_ioa_job;
9321         return IPR_RC_JOB_RETURN;
9322 }
9323
9324 /**
9325  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9326  * @pdev:       PCI device struct
9327  *
9328  * Description: This routine is called to tell us that the MMIO
9329  * access to the IOA has been restored
9330  */
9331 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9332 {
9333         unsigned long flags = 0;
9334         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9335
9336         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9337         if (!ioa_cfg->probe_done)
9338                 pci_save_state(pdev);
9339         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9340         return PCI_ERS_RESULT_NEED_RESET;
9341 }
9342
9343 /**
9344  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9345  * @pdev:       PCI device struct
9346  *
9347  * Description: This routine is called to tell us that the PCI bus
9348  * is down. Can't do anything here, except put the device driver
9349  * into a holding pattern, waiting for the PCI bus to come back.
9350  */
9351 static void ipr_pci_frozen(struct pci_dev *pdev)
9352 {
9353         unsigned long flags = 0;
9354         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9355
9356         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9357         if (ioa_cfg->probe_done)
9358                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9359         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9360 }
9361
9362 /**
9363  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9364  * @pdev:       PCI device struct
9365  *
9366  * Description: This routine is called by the pci error recovery
9367  * code after the PCI slot has been reset, just before we
9368  * should resume normal operations.
9369  */
9370 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9371 {
9372         unsigned long flags = 0;
9373         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9374
9375         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9376         if (ioa_cfg->probe_done) {
9377                 if (ioa_cfg->needs_warm_reset)
9378                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9379                 else
9380                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9381                                                 IPR_SHUTDOWN_NONE);
9382         } else
9383                 wake_up_all(&ioa_cfg->eeh_wait_q);
9384         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9385         return PCI_ERS_RESULT_RECOVERED;
9386 }
9387
9388 /**
9389  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9390  * @pdev:       PCI device struct
9391  *
9392  * Description: This routine is called when the PCI bus has
9393  * permanently failed.
9394  */
9395 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9396 {
9397         unsigned long flags = 0;
9398         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9399         int i;
9400
9401         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9402         if (ioa_cfg->probe_done) {
9403                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9404                         ioa_cfg->sdt_state = ABORT_DUMP;
9405                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9406                 ioa_cfg->in_ioa_bringdown = 1;
9407                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9408                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9409                         ioa_cfg->hrrq[i].allow_cmds = 0;
9410                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9411                 }
9412                 wmb();
9413                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9414         } else
9415                 wake_up_all(&ioa_cfg->eeh_wait_q);
9416         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9417 }
9418
9419 /**
9420  * ipr_pci_error_detected - Called when a PCI error is detected.
9421  * @pdev:       PCI device struct
9422  * @state:      PCI channel state
9423  *
9424  * Description: Called when a PCI error is detected.
9425  *
9426  * Return value:
9427  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9428  */
9429 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9430                                                pci_channel_state_t state)
9431 {
9432         switch (state) {
9433         case pci_channel_io_frozen:
9434                 ipr_pci_frozen(pdev);
9435                 return PCI_ERS_RESULT_CAN_RECOVER;
9436         case pci_channel_io_perm_failure:
9437                 ipr_pci_perm_failure(pdev);
9438                 return PCI_ERS_RESULT_DISCONNECT;
9439                 break;
9440         default:
9441                 break;
9442         }
9443         return PCI_ERS_RESULT_NEED_RESET;
9444 }
9445
9446 /**
9447  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9448  * @ioa_cfg:    ioa cfg struct
9449  *
9450  * Description: This is the second phase of adapter initialization
9451  * This function takes care of initilizing the adapter to the point
9452  * where it can accept new commands.
9453
9454  * Return value:
9455  *      0 on success / -EIO on failure
9456  **/
9457 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9458 {
9459         int rc = 0;
9460         unsigned long host_lock_flags = 0;
9461
9462         ENTER;
9463         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9464         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9465         ioa_cfg->probe_done = 1;
9466         if (ioa_cfg->needs_hard_reset) {
9467                 ioa_cfg->needs_hard_reset = 0;
9468                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9469         } else
9470                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9471                                         IPR_SHUTDOWN_NONE);
9472         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9473
9474         LEAVE;
9475         return rc;
9476 }
9477
9478 /**
9479  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9480  * @ioa_cfg:    ioa config struct
9481  *
9482  * Return value:
9483  *      none
9484  **/
9485 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9486 {
9487         int i;
9488
9489         if (ioa_cfg->ipr_cmnd_list) {
9490                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9491                         if (ioa_cfg->ipr_cmnd_list[i])
9492                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9493                                               ioa_cfg->ipr_cmnd_list[i],
9494                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9495
9496                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9497                 }
9498         }
9499
9500         if (ioa_cfg->ipr_cmd_pool)
9501                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9502
9503         kfree(ioa_cfg->ipr_cmnd_list);
9504         kfree(ioa_cfg->ipr_cmnd_list_dma);
9505         ioa_cfg->ipr_cmnd_list = NULL;
9506         ioa_cfg->ipr_cmnd_list_dma = NULL;
9507         ioa_cfg->ipr_cmd_pool = NULL;
9508 }
9509
9510 /**
9511  * ipr_free_mem - Frees memory allocated for an adapter
9512  * @ioa_cfg:    ioa cfg struct
9513  *
9514  * Return value:
9515  *      nothing
9516  **/
9517 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9518 {
9519         int i;
9520
9521         kfree(ioa_cfg->res_entries);
9522         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9523                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9524         ipr_free_cmd_blks(ioa_cfg);
9525
9526         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9527                 dma_free_coherent(&ioa_cfg->pdev->dev,
9528                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9529                                   ioa_cfg->hrrq[i].host_rrq,
9530                                   ioa_cfg->hrrq[i].host_rrq_dma);
9531
9532         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9533                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9534
9535         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9536                 dma_free_coherent(&ioa_cfg->pdev->dev,
9537                                   sizeof(struct ipr_hostrcb),
9538                                   ioa_cfg->hostrcb[i],
9539                                   ioa_cfg->hostrcb_dma[i]);
9540         }
9541
9542         ipr_free_dump(ioa_cfg);
9543         kfree(ioa_cfg->trace);
9544 }
9545
9546 /**
9547  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9548  * @ioa_cfg:    ipr cfg struct
9549  *
9550  * This function frees all allocated IRQs for the
9551  * specified adapter.
9552  *
9553  * Return value:
9554  *      none
9555  **/
9556 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9557 {
9558         struct pci_dev *pdev = ioa_cfg->pdev;
9559         int i;
9560
9561         for (i = 0; i < ioa_cfg->nvectors; i++)
9562                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9563         pci_free_irq_vectors(pdev);
9564 }
9565
9566 /**
9567  * ipr_free_all_resources - Free all allocated resources for an adapter.
9568  * @ipr_cmd:    ipr command struct
9569  *
9570  * This function frees all allocated resources for the
9571  * specified adapter.
9572  *
9573  * Return value:
9574  *      none
9575  **/
9576 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9577 {
9578         struct pci_dev *pdev = ioa_cfg->pdev;
9579
9580         ENTER;
9581         ipr_free_irqs(ioa_cfg);
9582         if (ioa_cfg->reset_work_q)
9583                 destroy_workqueue(ioa_cfg->reset_work_q);
9584         iounmap(ioa_cfg->hdw_dma_regs);
9585         pci_release_regions(pdev);
9586         ipr_free_mem(ioa_cfg);
9587         scsi_host_put(ioa_cfg->host);
9588         pci_disable_device(pdev);
9589         LEAVE;
9590 }
9591
9592 /**
9593  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9594  * @ioa_cfg:    ioa config struct
9595  *
9596  * Return value:
9597  *      0 on success / -ENOMEM on allocation failure
9598  **/
9599 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9600 {
9601         struct ipr_cmnd *ipr_cmd;
9602         struct ipr_ioarcb *ioarcb;
9603         dma_addr_t dma_addr;
9604         int i, entries_each_hrrq, hrrq_id = 0;
9605
9606         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9607                                                 sizeof(struct ipr_cmnd), 512, 0);
9608
9609         if (!ioa_cfg->ipr_cmd_pool)
9610                 return -ENOMEM;
9611
9612         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9613         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9614
9615         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9616                 ipr_free_cmd_blks(ioa_cfg);
9617                 return -ENOMEM;
9618         }
9619
9620         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9621                 if (ioa_cfg->hrrq_num > 1) {
9622                         if (i == 0) {
9623                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9624                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9625                                 ioa_cfg->hrrq[i].max_cmd_id =
9626                                         (entries_each_hrrq - 1);
9627                         } else {
9628                                 entries_each_hrrq =
9629                                         IPR_NUM_BASE_CMD_BLKS/
9630                                         (ioa_cfg->hrrq_num - 1);
9631                                 ioa_cfg->hrrq[i].min_cmd_id =
9632                                         IPR_NUM_INTERNAL_CMD_BLKS +
9633                                         (i - 1) * entries_each_hrrq;
9634                                 ioa_cfg->hrrq[i].max_cmd_id =
9635                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9636                                         i * entries_each_hrrq - 1);
9637                         }
9638                 } else {
9639                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9640                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9641                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9642                 }
9643                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9644         }
9645
9646         BUG_ON(ioa_cfg->hrrq_num == 0);
9647
9648         i = IPR_NUM_CMD_BLKS -
9649                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9650         if (i > 0) {
9651                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9652                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9653         }
9654
9655         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9656                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9657                                 GFP_KERNEL, &dma_addr);
9658
9659                 if (!ipr_cmd) {
9660                         ipr_free_cmd_blks(ioa_cfg);
9661                         return -ENOMEM;
9662                 }
9663
9664                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9665                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9666
9667                 ioarcb = &ipr_cmd->ioarcb;
9668                 ipr_cmd->dma_addr = dma_addr;
9669                 if (ioa_cfg->sis64)
9670                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9671                 else
9672                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9673
9674                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9675                 if (ioa_cfg->sis64) {
9676                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9677                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9678                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9679                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9680                 } else {
9681                         ioarcb->write_ioadl_addr =
9682                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9683                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9684                         ioarcb->ioasa_host_pci_addr =
9685                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9686                 }
9687                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9688                 ipr_cmd->cmd_index = i;
9689                 ipr_cmd->ioa_cfg = ioa_cfg;
9690                 ipr_cmd->sense_buffer_dma = dma_addr +
9691                         offsetof(struct ipr_cmnd, sense_buffer);
9692
9693                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9694                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9695                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9696                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9697                         hrrq_id++;
9698         }
9699
9700         return 0;
9701 }
9702
9703 /**
9704  * ipr_alloc_mem - Allocate memory for an adapter
9705  * @ioa_cfg:    ioa config struct
9706  *
9707  * Return value:
9708  *      0 on success / non-zero for error
9709  **/
9710 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9711 {
9712         struct pci_dev *pdev = ioa_cfg->pdev;
9713         int i, rc = -ENOMEM;
9714
9715         ENTER;
9716         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9717                                        sizeof(struct ipr_resource_entry),
9718                                        GFP_KERNEL);
9719
9720         if (!ioa_cfg->res_entries)
9721                 goto out;
9722
9723         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9724                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9725                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9726         }
9727
9728         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9729                                               sizeof(struct ipr_misc_cbs),
9730                                               &ioa_cfg->vpd_cbs_dma,
9731                                               GFP_KERNEL);
9732
9733         if (!ioa_cfg->vpd_cbs)
9734                 goto out_free_res_entries;
9735
9736         if (ipr_alloc_cmd_blks(ioa_cfg))
9737                 goto out_free_vpd_cbs;
9738
9739         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9740                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9741                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9742                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9743                                         GFP_KERNEL);
9744
9745                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9746                         while (--i > 0)
9747                                 dma_free_coherent(&pdev->dev,
9748                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9749                                         ioa_cfg->hrrq[i].host_rrq,
9750                                         ioa_cfg->hrrq[i].host_rrq_dma);
9751                         goto out_ipr_free_cmd_blocks;
9752                 }
9753                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9754         }
9755
9756         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9757                                                   ioa_cfg->cfg_table_size,
9758                                                   &ioa_cfg->cfg_table_dma,
9759                                                   GFP_KERNEL);
9760
9761         if (!ioa_cfg->u.cfg_table)
9762                 goto out_free_host_rrq;
9763
9764         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9765                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9766                                                          sizeof(struct ipr_hostrcb),
9767                                                          &ioa_cfg->hostrcb_dma[i],
9768                                                          GFP_KERNEL);
9769
9770                 if (!ioa_cfg->hostrcb[i])
9771                         goto out_free_hostrcb_dma;
9772
9773                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9774                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9775                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9776                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9777         }
9778
9779         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9780                                  sizeof(struct ipr_trace_entry),
9781                                  GFP_KERNEL);
9782
9783         if (!ioa_cfg->trace)
9784                 goto out_free_hostrcb_dma;
9785
9786         rc = 0;
9787 out:
9788         LEAVE;
9789         return rc;
9790
9791 out_free_hostrcb_dma:
9792         while (i-- > 0) {
9793                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9794                                   ioa_cfg->hostrcb[i],
9795                                   ioa_cfg->hostrcb_dma[i]);
9796         }
9797         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9798                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9799 out_free_host_rrq:
9800         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9801                 dma_free_coherent(&pdev->dev,
9802                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9803                                   ioa_cfg->hrrq[i].host_rrq,
9804                                   ioa_cfg->hrrq[i].host_rrq_dma);
9805         }
9806 out_ipr_free_cmd_blocks:
9807         ipr_free_cmd_blks(ioa_cfg);
9808 out_free_vpd_cbs:
9809         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9810                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9811 out_free_res_entries:
9812         kfree(ioa_cfg->res_entries);
9813         goto out;
9814 }
9815
9816 /**
9817  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9818  * @ioa_cfg:    ioa config struct
9819  *
9820  * Return value:
9821  *      none
9822  **/
9823 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9824 {
9825         int i;
9826
9827         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9828                 ioa_cfg->bus_attr[i].bus = i;
9829                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9830                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9831                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9832                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9833                 else
9834                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9835         }
9836 }
9837
9838 /**
9839  * ipr_init_regs - Initialize IOA registers
9840  * @ioa_cfg:    ioa config struct
9841  *
9842  * Return value:
9843  *      none
9844  **/
9845 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9846 {
9847         const struct ipr_interrupt_offsets *p;
9848         struct ipr_interrupts *t;
9849         void __iomem *base;
9850
9851         p = &ioa_cfg->chip_cfg->regs;
9852         t = &ioa_cfg->regs;
9853         base = ioa_cfg->hdw_dma_regs;
9854
9855         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9856         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9857         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9858         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9859         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9860         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9861         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9862         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9863         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9864         t->ioarrin_reg = base + p->ioarrin_reg;
9865         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9866         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9867         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9868         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9869         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9870         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9871
9872         if (ioa_cfg->sis64) {
9873                 t->init_feedback_reg = base + p->init_feedback_reg;
9874                 t->dump_addr_reg = base + p->dump_addr_reg;
9875                 t->dump_data_reg = base + p->dump_data_reg;
9876                 t->endian_swap_reg = base + p->endian_swap_reg;
9877         }
9878 }
9879
9880 /**
9881  * ipr_init_ioa_cfg - Initialize IOA config struct
9882  * @ioa_cfg:    ioa config struct
9883  * @host:               scsi host struct
9884  * @pdev:               PCI dev struct
9885  *
9886  * Return value:
9887  *      none
9888  **/
9889 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9890                              struct Scsi_Host *host, struct pci_dev *pdev)
9891 {
9892         int i;
9893
9894         ioa_cfg->host = host;
9895         ioa_cfg->pdev = pdev;
9896         ioa_cfg->log_level = ipr_log_level;
9897         ioa_cfg->doorbell = IPR_DOORBELL;
9898         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9899         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9900         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9901         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9902         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9903         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9904
9905         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9906         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9907         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9908         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9909         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9910         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9911         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9912         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9913         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9914         ioa_cfg->sdt_state = INACTIVE;
9915
9916         ipr_initialize_bus_attr(ioa_cfg);
9917         ioa_cfg->max_devs_supported = ipr_max_devs;
9918
9919         if (ioa_cfg->sis64) {
9920                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9921                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9922                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9923                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9924                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9925                                            + ((sizeof(struct ipr_config_table_entry64)
9926                                                * ioa_cfg->max_devs_supported)));
9927         } else {
9928                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9929                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9930                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9931                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9932                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9933                                            + ((sizeof(struct ipr_config_table_entry)
9934                                                * ioa_cfg->max_devs_supported)));
9935         }
9936
9937         host->max_channel = IPR_VSET_BUS;
9938         host->unique_id = host->host_no;
9939         host->max_cmd_len = IPR_MAX_CDB_LEN;
9940         host->can_queue = ioa_cfg->max_cmds;
9941         pci_set_drvdata(pdev, ioa_cfg);
9942
9943         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9944                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9945                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9946                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9947                 if (i == 0)
9948                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9949                 else
9950                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9951         }
9952 }
9953
9954 /**
9955  * ipr_get_chip_info - Find adapter chip information
9956  * @dev_id:             PCI device id struct
9957  *
9958  * Return value:
9959  *      ptr to chip information on success / NULL on failure
9960  **/
9961 static const struct ipr_chip_t *
9962 ipr_get_chip_info(const struct pci_device_id *dev_id)
9963 {
9964         int i;
9965
9966         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9967                 if (ipr_chip[i].vendor == dev_id->vendor &&
9968                     ipr_chip[i].device == dev_id->device)
9969                         return &ipr_chip[i];
9970         return NULL;
9971 }
9972
9973 /**
9974  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9975  *                                              during probe time
9976  * @ioa_cfg:    ioa config struct
9977  *
9978  * Return value:
9979  *      None
9980  **/
9981 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9982 {
9983         struct pci_dev *pdev = ioa_cfg->pdev;
9984
9985         if (pci_channel_offline(pdev)) {
9986                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9987                                    !pci_channel_offline(pdev),
9988                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9989                 pci_restore_state(pdev);
9990         }
9991 }
9992
9993 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9994 {
9995         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9996
9997         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9998                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9999                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10000                 ioa_cfg->vectors_info[vec_idx].
10001                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10002         }
10003 }
10004
10005 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10006                 struct pci_dev *pdev)
10007 {
10008         int i, rc;
10009
10010         for (i = 1; i < ioa_cfg->nvectors; i++) {
10011                 rc = request_irq(pci_irq_vector(pdev, i),
10012                         ipr_isr_mhrrq,
10013                         0,
10014                         ioa_cfg->vectors_info[i].desc,
10015                         &ioa_cfg->hrrq[i]);
10016                 if (rc) {
10017                         while (--i >= 0)
10018                                 free_irq(pci_irq_vector(pdev, i),
10019                                         &ioa_cfg->hrrq[i]);
10020                         return rc;
10021                 }
10022         }
10023         return 0;
10024 }
10025
10026 /**
10027  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10028  * @pdev:               PCI device struct
10029  *
10030  * Description: Simply set the msi_received flag to 1 indicating that
10031  * Message Signaled Interrupts are supported.
10032  *
10033  * Return value:
10034  *      0 on success / non-zero on failure
10035  **/
10036 static irqreturn_t ipr_test_intr(int irq, void *devp)
10037 {
10038         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10039         unsigned long lock_flags = 0;
10040         irqreturn_t rc = IRQ_HANDLED;
10041
10042         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10043         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10044
10045         ioa_cfg->msi_received = 1;
10046         wake_up(&ioa_cfg->msi_wait_q);
10047
10048         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10049         return rc;
10050 }
10051
10052 /**
10053  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10054  * @pdev:               PCI device struct
10055  *
10056  * Description: This routine sets up and initiates a test interrupt to determine
10057  * if the interrupt is received via the ipr_test_intr() service routine.
10058  * If the tests fails, the driver will fall back to LSI.
10059  *
10060  * Return value:
10061  *      0 on success / non-zero on failure
10062  **/
10063 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10064 {
10065         int rc;
10066         volatile u32 int_reg;
10067         unsigned long lock_flags = 0;
10068         int irq = pci_irq_vector(pdev, 0);
10069
10070         ENTER;
10071
10072         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10073         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10074         ioa_cfg->msi_received = 0;
10075         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10076         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10077         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10078         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10079
10080         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10081         if (rc) {
10082                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10083                 return rc;
10084         } else if (ipr_debug)
10085                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10086
10087         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10088         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10089         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10090         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10091         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10092
10093         if (!ioa_cfg->msi_received) {
10094                 /* MSI test failed */
10095                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10096                 rc = -EOPNOTSUPP;
10097         } else if (ipr_debug)
10098                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10099
10100         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10101
10102         free_irq(irq, ioa_cfg);
10103
10104         LEAVE;
10105
10106         return rc;
10107 }
10108
10109  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10110  * @pdev:               PCI device struct
10111  * @dev_id:             PCI device id struct
10112  *
10113  * Return value:
10114  *      0 on success / non-zero on failure
10115  **/
10116 static int ipr_probe_ioa(struct pci_dev *pdev,
10117                          const struct pci_device_id *dev_id)
10118 {
10119         struct ipr_ioa_cfg *ioa_cfg;
10120         struct Scsi_Host *host;
10121         unsigned long ipr_regs_pci;
10122         void __iomem *ipr_regs;
10123         int rc = PCIBIOS_SUCCESSFUL;
10124         volatile u32 mask, uproc, interrupts;
10125         unsigned long lock_flags, driver_lock_flags;
10126         unsigned int irq_flag;
10127
10128         ENTER;
10129
10130         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10131         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10132
10133         if (!host) {
10134                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10135                 rc = -ENOMEM;
10136                 goto out;
10137         }
10138
10139         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10140         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10141         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10142
10143         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10144
10145         if (!ioa_cfg->ipr_chip) {
10146                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10147                         dev_id->vendor, dev_id->device);
10148                 goto out_scsi_host_put;
10149         }
10150
10151         /* set SIS 32 or SIS 64 */
10152         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10153         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10154         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10155         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10156
10157         if (ipr_transop_timeout)
10158                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10159         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10160                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10161         else
10162                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10163
10164         ioa_cfg->revid = pdev->revision;
10165
10166         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10167
10168         ipr_regs_pci = pci_resource_start(pdev, 0);
10169
10170         rc = pci_request_regions(pdev, IPR_NAME);
10171         if (rc < 0) {
10172                 dev_err(&pdev->dev,
10173                         "Couldn't register memory range of registers\n");
10174                 goto out_scsi_host_put;
10175         }
10176
10177         rc = pci_enable_device(pdev);
10178
10179         if (rc || pci_channel_offline(pdev)) {
10180                 if (pci_channel_offline(pdev)) {
10181                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10182                         rc = pci_enable_device(pdev);
10183                 }
10184
10185                 if (rc) {
10186                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10187                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10188                         goto out_release_regions;
10189                 }
10190         }
10191
10192         ipr_regs = pci_ioremap_bar(pdev, 0);
10193
10194         if (!ipr_regs) {
10195                 dev_err(&pdev->dev,
10196                         "Couldn't map memory range of registers\n");
10197                 rc = -ENOMEM;
10198                 goto out_disable;
10199         }
10200
10201         ioa_cfg->hdw_dma_regs = ipr_regs;
10202         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10203         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10204
10205         ipr_init_regs(ioa_cfg);
10206
10207         if (ioa_cfg->sis64) {
10208                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10209                 if (rc < 0) {
10210                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10211                         rc = dma_set_mask_and_coherent(&pdev->dev,
10212                                                        DMA_BIT_MASK(32));
10213                 }
10214         } else
10215                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10216
10217         if (rc < 0) {
10218                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10219                 goto cleanup_nomem;
10220         }
10221
10222         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10223                                    ioa_cfg->chip_cfg->cache_line_size);
10224
10225         if (rc != PCIBIOS_SUCCESSFUL) {
10226                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10227                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10228                 rc = -EIO;
10229                 goto cleanup_nomem;
10230         }
10231
10232         /* Issue MMIO read to ensure card is not in EEH */
10233         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10234         ipr_wait_for_pci_err_recovery(ioa_cfg);
10235
10236         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10237                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10238                         IPR_MAX_MSIX_VECTORS);
10239                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10240         }
10241
10242         irq_flag = PCI_IRQ_LEGACY;
10243         if (ioa_cfg->ipr_chip->has_msi)
10244                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10245         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10246         if (rc < 0) {
10247                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10248                 goto cleanup_nomem;
10249         }
10250         ioa_cfg->nvectors = rc;
10251
10252         if (!pdev->msi_enabled && !pdev->msix_enabled)
10253                 ioa_cfg->clear_isr = 1;
10254
10255         pci_set_master(pdev);
10256
10257         if (pci_channel_offline(pdev)) {
10258                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10259                 pci_set_master(pdev);
10260                 if (pci_channel_offline(pdev)) {
10261                         rc = -EIO;
10262                         goto out_msi_disable;
10263                 }
10264         }
10265
10266         if (pdev->msi_enabled || pdev->msix_enabled) {
10267                 rc = ipr_test_msi(ioa_cfg, pdev);
10268                 switch (rc) {
10269                 case 0:
10270                         dev_info(&pdev->dev,
10271                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10272                                 pdev->msix_enabled ? "-X" : "");
10273                         break;
10274                 case -EOPNOTSUPP:
10275                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10276                         pci_free_irq_vectors(pdev);
10277
10278                         ioa_cfg->nvectors = 1;
10279                         ioa_cfg->clear_isr = 1;
10280                         break;
10281                 default:
10282                         goto out_msi_disable;
10283                 }
10284         }
10285
10286         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10287                                 (unsigned int)num_online_cpus(),
10288                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10289
10290         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10291                 goto out_msi_disable;
10292
10293         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10294                 goto out_msi_disable;
10295
10296         rc = ipr_alloc_mem(ioa_cfg);
10297         if (rc < 0) {
10298                 dev_err(&pdev->dev,
10299                         "Couldn't allocate enough memory for device driver!\n");
10300                 goto out_msi_disable;
10301         }
10302
10303         /* Save away PCI config space for use following IOA reset */
10304         rc = pci_save_state(pdev);
10305
10306         if (rc != PCIBIOS_SUCCESSFUL) {
10307                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10308                 rc = -EIO;
10309                 goto cleanup_nolog;
10310         }
10311
10312         /*
10313          * If HRRQ updated interrupt is not masked, or reset alert is set,
10314          * the card is in an unknown state and needs a hard reset
10315          */
10316         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10317         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10318         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10319         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10320                 ioa_cfg->needs_hard_reset = 1;
10321         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10322                 ioa_cfg->needs_hard_reset = 1;
10323         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10324                 ioa_cfg->ioa_unit_checked = 1;
10325
10326         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10327         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10328         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10329
10330         if (pdev->msi_enabled || pdev->msix_enabled) {
10331                 name_msi_vectors(ioa_cfg);
10332                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10333                         ioa_cfg->vectors_info[0].desc,
10334                         &ioa_cfg->hrrq[0]);
10335                 if (!rc)
10336                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10337         } else {
10338                 rc = request_irq(pdev->irq, ipr_isr,
10339                          IRQF_SHARED,
10340                          IPR_NAME, &ioa_cfg->hrrq[0]);
10341         }
10342         if (rc) {
10343                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10344                         pdev->irq, rc);
10345                 goto cleanup_nolog;
10346         }
10347
10348         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10349             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10350                 ioa_cfg->needs_warm_reset = 1;
10351                 ioa_cfg->reset = ipr_reset_slot_reset;
10352
10353                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10354                                                                 WQ_MEM_RECLAIM, host->host_no);
10355
10356                 if (!ioa_cfg->reset_work_q) {
10357                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10358                         rc = -ENOMEM;
10359                         goto out_free_irq;
10360                 }
10361         } else
10362                 ioa_cfg->reset = ipr_reset_start_bist;
10363
10364         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10365         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10366         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10367
10368         LEAVE;
10369 out:
10370         return rc;
10371
10372 out_free_irq:
10373         ipr_free_irqs(ioa_cfg);
10374 cleanup_nolog:
10375         ipr_free_mem(ioa_cfg);
10376 out_msi_disable:
10377         ipr_wait_for_pci_err_recovery(ioa_cfg);
10378         pci_free_irq_vectors(pdev);
10379 cleanup_nomem:
10380         iounmap(ipr_regs);
10381 out_disable:
10382         pci_disable_device(pdev);
10383 out_release_regions:
10384         pci_release_regions(pdev);
10385 out_scsi_host_put:
10386         scsi_host_put(host);
10387         goto out;
10388 }
10389
10390 /**
10391  * ipr_initiate_ioa_bringdown - Bring down an adapter
10392  * @ioa_cfg:            ioa config struct
10393  * @shutdown_type:      shutdown type
10394  *
10395  * Description: This function will initiate bringing down the adapter.
10396  * This consists of issuing an IOA shutdown to the adapter
10397  * to flush the cache, and running BIST.
10398  * If the caller needs to wait on the completion of the reset,
10399  * the caller must sleep on the reset_wait_q.
10400  *
10401  * Return value:
10402  *      none
10403  **/
10404 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10405                                        enum ipr_shutdown_type shutdown_type)
10406 {
10407         ENTER;
10408         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10409                 ioa_cfg->sdt_state = ABORT_DUMP;
10410         ioa_cfg->reset_retries = 0;
10411         ioa_cfg->in_ioa_bringdown = 1;
10412         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10413         LEAVE;
10414 }
10415
10416 /**
10417  * __ipr_remove - Remove a single adapter
10418  * @pdev:       pci device struct
10419  *
10420  * Adapter hot plug remove entry point.
10421  *
10422  * Return value:
10423  *      none
10424  **/
10425 static void __ipr_remove(struct pci_dev *pdev)
10426 {
10427         unsigned long host_lock_flags = 0;
10428         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10429         int i;
10430         unsigned long driver_lock_flags;
10431         ENTER;
10432
10433         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10434         while (ioa_cfg->in_reset_reload) {
10435                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10436                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10437                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10438         }
10439
10440         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10441                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10442                 ioa_cfg->hrrq[i].removing_ioa = 1;
10443                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10444         }
10445         wmb();
10446         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10447
10448         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10449         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10450         flush_work(&ioa_cfg->work_q);
10451         if (ioa_cfg->reset_work_q)
10452                 flush_workqueue(ioa_cfg->reset_work_q);
10453         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10454         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10455
10456         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10457         list_del(&ioa_cfg->queue);
10458         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10459
10460         if (ioa_cfg->sdt_state == ABORT_DUMP)
10461                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10462         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10463
10464         ipr_free_all_resources(ioa_cfg);
10465
10466         LEAVE;
10467 }
10468
10469 /**
10470  * ipr_remove - IOA hot plug remove entry point
10471  * @pdev:       pci device struct
10472  *
10473  * Adapter hot plug remove entry point.
10474  *
10475  * Return value:
10476  *      none
10477  **/
10478 static void ipr_remove(struct pci_dev *pdev)
10479 {
10480         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10481
10482         ENTER;
10483
10484         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10485                               &ipr_trace_attr);
10486         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10487                              &ipr_dump_attr);
10488         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10489                         &ipr_ioa_async_err_log);
10490         scsi_remove_host(ioa_cfg->host);
10491
10492         __ipr_remove(pdev);
10493
10494         LEAVE;
10495 }
10496
10497 /**
10498  * ipr_probe - Adapter hot plug add entry point
10499  *
10500  * Return value:
10501  *      0 on success / non-zero on failure
10502  **/
10503 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10504 {
10505         struct ipr_ioa_cfg *ioa_cfg;
10506         unsigned long flags;
10507         int rc, i;
10508
10509         rc = ipr_probe_ioa(pdev, dev_id);
10510
10511         if (rc)
10512                 return rc;
10513
10514         ioa_cfg = pci_get_drvdata(pdev);
10515         rc = ipr_probe_ioa_part2(ioa_cfg);
10516
10517         if (rc) {
10518                 __ipr_remove(pdev);
10519                 return rc;
10520         }
10521
10522         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10523
10524         if (rc) {
10525                 __ipr_remove(pdev);
10526                 return rc;
10527         }
10528
10529         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10530                                    &ipr_trace_attr);
10531
10532         if (rc) {
10533                 scsi_remove_host(ioa_cfg->host);
10534                 __ipr_remove(pdev);
10535                 return rc;
10536         }
10537
10538         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10539                         &ipr_ioa_async_err_log);
10540
10541         if (rc) {
10542                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10543                                 &ipr_dump_attr);
10544                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10545                                 &ipr_trace_attr);
10546                 scsi_remove_host(ioa_cfg->host);
10547                 __ipr_remove(pdev);
10548                 return rc;
10549         }
10550
10551         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10552                                    &ipr_dump_attr);
10553
10554         if (rc) {
10555                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10556                                       &ipr_ioa_async_err_log);
10557                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10558                                       &ipr_trace_attr);
10559                 scsi_remove_host(ioa_cfg->host);
10560                 __ipr_remove(pdev);
10561                 return rc;
10562         }
10563         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10564         ioa_cfg->scan_enabled = 1;
10565         schedule_work(&ioa_cfg->work_q);
10566         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10567
10568         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10569
10570         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10571                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10572                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10573                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10574                 }
10575         }
10576
10577         scsi_scan_host(ioa_cfg->host);
10578
10579         return 0;
10580 }
10581
10582 /**
10583  * ipr_shutdown - Shutdown handler.
10584  * @pdev:       pci device struct
10585  *
10586  * This function is invoked upon system shutdown/reboot. It will issue
10587  * an adapter shutdown to the adapter to flush the write cache.
10588  *
10589  * Return value:
10590  *      none
10591  **/
10592 static void ipr_shutdown(struct pci_dev *pdev)
10593 {
10594         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10595         unsigned long lock_flags = 0;
10596         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10597         int i;
10598
10599         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10600         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10601                 ioa_cfg->iopoll_weight = 0;
10602                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10603                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10604         }
10605
10606         while (ioa_cfg->in_reset_reload) {
10607                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10608                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10609                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10610         }
10611
10612         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10613                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10614
10615         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10616         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10617         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10618         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10619                 ipr_free_irqs(ioa_cfg);
10620                 pci_disable_device(ioa_cfg->pdev);
10621         }
10622 }
10623
10624 static struct pci_device_id ipr_pci_table[] = {
10625         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10626                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10627         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10628                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10629         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10630                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10631         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10632                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10633         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10634                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10635         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10636                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10637         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10638                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10639         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10640                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10641                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10642         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10643               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10644         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10645               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10646               IPR_USE_LONG_TRANSOP_TIMEOUT },
10647         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10648               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10649               IPR_USE_LONG_TRANSOP_TIMEOUT },
10650         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10651               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10652         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10653               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10654               IPR_USE_LONG_TRANSOP_TIMEOUT},
10655         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10656               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10657               IPR_USE_LONG_TRANSOP_TIMEOUT },
10658         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10659               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10660               IPR_USE_LONG_TRANSOP_TIMEOUT },
10661         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10662               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10663         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10664               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10665         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10666               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10667               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10668         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10670         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10671                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10672         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10674                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10675         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10676                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10677                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10685                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10687                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10688         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10689                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10691                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10693                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10695                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10697                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10698         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10700         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10701                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10716         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10718         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10720         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10734         { }
10735 };
10736 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10737
10738 static const struct pci_error_handlers ipr_err_handler = {
10739         .error_detected = ipr_pci_error_detected,
10740         .mmio_enabled = ipr_pci_mmio_enabled,
10741         .slot_reset = ipr_pci_slot_reset,
10742 };
10743
10744 static struct pci_driver ipr_driver = {
10745         .name = IPR_NAME,
10746         .id_table = ipr_pci_table,
10747         .probe = ipr_probe,
10748         .remove = ipr_remove,
10749         .shutdown = ipr_shutdown,
10750         .err_handler = &ipr_err_handler,
10751 };
10752
10753 /**
10754  * ipr_halt_done - Shutdown prepare completion
10755  *
10756  * Return value:
10757  *      none
10758  **/
10759 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10760 {
10761         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10762 }
10763
10764 /**
10765  * ipr_halt - Issue shutdown prepare to all adapters
10766  *
10767  * Return value:
10768  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10769  **/
10770 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10771 {
10772         struct ipr_cmnd *ipr_cmd;
10773         struct ipr_ioa_cfg *ioa_cfg;
10774         unsigned long flags = 0, driver_lock_flags;
10775
10776         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10777                 return NOTIFY_DONE;
10778
10779         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10780
10781         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10782                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10783                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10784                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10785                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10786                         continue;
10787                 }
10788
10789                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10790                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10791                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10792                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10793                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10794
10795                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10796                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10797         }
10798         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10799
10800         return NOTIFY_OK;
10801 }
10802
10803 static struct notifier_block ipr_notifier = {
10804         ipr_halt, NULL, 0
10805 };
10806
10807 /**
10808  * ipr_init - Module entry point
10809  *
10810  * Return value:
10811  *      0 on success / negative value on failure
10812  **/
10813 static int __init ipr_init(void)
10814 {
10815         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10816                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10817
10818         register_reboot_notifier(&ipr_notifier);
10819         return pci_register_driver(&ipr_driver);
10820 }
10821
10822 /**
10823  * ipr_exit - Module unload
10824  *
10825  * Module unload entry point.
10826  *
10827  * Return value:
10828  *      none
10829  **/
10830 static void __exit ipr_exit(void)
10831 {
10832         unregister_reboot_notifier(&ipr_notifier);
10833         pci_unregister_driver(&ipr_driver);
10834 }
10835
10836 module_init(ipr_init);
10837 module_exit(ipr_exit);