]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/mvumi.c
drm/amd/powerplay: check before issuing messages for max sustainable clocks
[linux.git] / drivers / scsi / mvumi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Marvell UMI driver
4  *
5  * Copyright 2011 Marvell. <jyli@marvell.com>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/moduleparam.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/pci.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/ktime.h>
19 #include <linux/blkdev.h>
20 #include <linux/io.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_device.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_transport.h>
26 #include <scsi/scsi_eh.h>
27 #include <linux/uaccess.h>
28 #include <linux/kthread.h>
29
30 #include "mvumi.h"
31
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("jyli@marvell.com");
34 MODULE_DESCRIPTION("Marvell UMI Driver");
35
36 static const struct pci_device_id mvumi_pci_table[] = {
37         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
38         { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
39         { 0 }
40 };
41
42 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
43
44 static void tag_init(struct mvumi_tag *st, unsigned short size)
45 {
46         unsigned short i;
47         BUG_ON(size != st->size);
48         st->top = size;
49         for (i = 0; i < size; i++)
50                 st->stack[i] = size - 1 - i;
51 }
52
53 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
54 {
55         BUG_ON(st->top <= 0);
56         return st->stack[--st->top];
57 }
58
59 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
60                                                         unsigned short tag)
61 {
62         BUG_ON(st->top >= st->size);
63         st->stack[st->top++] = tag;
64 }
65
66 static bool tag_is_empty(struct mvumi_tag *st)
67 {
68         if (st->top == 0)
69                 return 1;
70         else
71                 return 0;
72 }
73
74 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
75 {
76         int i;
77
78         for (i = 0; i < MAX_BASE_ADDRESS; i++)
79                 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
80                                                                 addr_array[i])
81                         pci_iounmap(dev, addr_array[i]);
82 }
83
84 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
85 {
86         int i;
87
88         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
89                 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
90                         addr_array[i] = pci_iomap(dev, i, 0);
91                         if (!addr_array[i]) {
92                                 dev_err(&dev->dev, "failed to map Bar[%d]\n",
93                                                                         i);
94                                 mvumi_unmap_pci_addr(dev, addr_array);
95                                 return -ENOMEM;
96                         }
97                 } else
98                         addr_array[i] = NULL;
99
100                 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
101         }
102
103         return 0;
104 }
105
106 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
107                                 enum resource_type type, unsigned int size)
108 {
109         struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
110
111         if (!res) {
112                 dev_err(&mhba->pdev->dev,
113                         "Failed to allocate memory for resource manager.\n");
114                 return NULL;
115         }
116
117         switch (type) {
118         case RESOURCE_CACHED_MEMORY:
119                 res->virt_addr = kzalloc(size, GFP_ATOMIC);
120                 if (!res->virt_addr) {
121                         dev_err(&mhba->pdev->dev,
122                                 "unable to allocate memory,size = %d.\n", size);
123                         kfree(res);
124                         return NULL;
125                 }
126                 break;
127
128         case RESOURCE_UNCACHED_MEMORY:
129                 size = round_up(size, 8);
130                 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
131                                                     &res->bus_addr,
132                                                     GFP_KERNEL);
133                 if (!res->virt_addr) {
134                         dev_err(&mhba->pdev->dev,
135                                         "unable to allocate consistent mem,"
136                                                         "size = %d.\n", size);
137                         kfree(res);
138                         return NULL;
139                 }
140                 break;
141
142         default:
143                 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
144                 kfree(res);
145                 return NULL;
146         }
147
148         res->type = type;
149         res->size = size;
150         INIT_LIST_HEAD(&res->entry);
151         list_add_tail(&res->entry, &mhba->res_list);
152
153         return res;
154 }
155
156 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
157 {
158         struct mvumi_res *res, *tmp;
159
160         list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
161                 switch (res->type) {
162                 case RESOURCE_UNCACHED_MEMORY:
163                         dma_free_coherent(&mhba->pdev->dev, res->size,
164                                                 res->virt_addr, res->bus_addr);
165                         break;
166                 case RESOURCE_CACHED_MEMORY:
167                         kfree(res->virt_addr);
168                         break;
169                 default:
170                         dev_err(&mhba->pdev->dev,
171                                 "unknown resource type %d\n", res->type);
172                         break;
173                 }
174                 list_del(&res->entry);
175                 kfree(res);
176         }
177         mhba->fw_flag &= ~MVUMI_FW_ALLOC;
178 }
179
180 /**
181  * mvumi_make_sgl -     Prepares  SGL
182  * @mhba:               Adapter soft state
183  * @scmd:               SCSI command from the mid-layer
184  * @sgl_p:              SGL to be filled in
185  * @sg_count            return the number of SG elements
186  *
187  * If successful, this function returns 0. otherwise, it returns -1.
188  */
189 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
190                                         void *sgl_p, unsigned char *sg_count)
191 {
192         struct scatterlist *sg;
193         struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
194         unsigned int i;
195         unsigned int sgnum = scsi_sg_count(scmd);
196         dma_addr_t busaddr;
197
198         sg = scsi_sglist(scmd);
199         *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
200                                scmd->sc_data_direction);
201         if (*sg_count > mhba->max_sge) {
202                 dev_err(&mhba->pdev->dev,
203                         "sg count[0x%x] is bigger than max sg[0x%x].\n",
204                         *sg_count, mhba->max_sge);
205                 dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
206                              scmd->sc_data_direction);
207                 return -1;
208         }
209         for (i = 0; i < *sg_count; i++) {
210                 busaddr = sg_dma_address(&sg[i]);
211                 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
212                 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
213                 m_sg->flags = 0;
214                 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
215                 if ((i + 1) == *sg_count)
216                         m_sg->flags |= 1U << mhba->eot_flag;
217
218                 sgd_inc(mhba, m_sg);
219         }
220
221         return 0;
222 }
223
224 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
225                                                         unsigned int size)
226 {
227         struct mvumi_sgl *m_sg;
228         void *virt_addr;
229         dma_addr_t phy_addr;
230
231         if (size == 0)
232                 return 0;
233
234         virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
235                                        GFP_KERNEL);
236         if (!virt_addr)
237                 return -1;
238
239         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
240         cmd->frame->sg_counts = 1;
241         cmd->data_buf = virt_addr;
242
243         m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
244         m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
245         m_sg->flags = 1U << mhba->eot_flag;
246         sgd_setsz(mhba, m_sg, cpu_to_le32(size));
247
248         return 0;
249 }
250
251 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
252                                 unsigned int buf_size)
253 {
254         struct mvumi_cmd *cmd;
255
256         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
257         if (!cmd) {
258                 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
259                 return NULL;
260         }
261         INIT_LIST_HEAD(&cmd->queue_pointer);
262
263         cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
264                         &cmd->frame_phys, GFP_KERNEL);
265         if (!cmd->frame) {
266                 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
267                         " frame,size = %d.\n", mhba->ib_max_size);
268                 kfree(cmd);
269                 return NULL;
270         }
271
272         if (buf_size) {
273                 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
274                         dev_err(&mhba->pdev->dev, "failed to allocate memory"
275                                                 " for internal frame\n");
276                         dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
277                                         cmd->frame, cmd->frame_phys);
278                         kfree(cmd);
279                         return NULL;
280                 }
281         } else
282                 cmd->frame->sg_counts = 0;
283
284         return cmd;
285 }
286
287 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
288                                                 struct mvumi_cmd *cmd)
289 {
290         struct mvumi_sgl *m_sg;
291         unsigned int size;
292         dma_addr_t phy_addr;
293
294         if (cmd && cmd->frame) {
295                 if (cmd->frame->sg_counts) {
296                         m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
297                         sgd_getsz(mhba, m_sg, size);
298
299                         phy_addr = (dma_addr_t) m_sg->baseaddr_l |
300                                 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
301
302                         dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
303                                                                 phy_addr);
304                 }
305                 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
306                                 cmd->frame, cmd->frame_phys);
307                 kfree(cmd);
308         }
309 }
310
311 /**
312  * mvumi_get_cmd -      Get a command from the free pool
313  * @mhba:               Adapter soft state
314  *
315  * Returns a free command from the pool
316  */
317 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
318 {
319         struct mvumi_cmd *cmd = NULL;
320
321         if (likely(!list_empty(&mhba->cmd_pool))) {
322                 cmd = list_entry((&mhba->cmd_pool)->next,
323                                 struct mvumi_cmd, queue_pointer);
324                 list_del_init(&cmd->queue_pointer);
325         } else
326                 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
327
328         return cmd;
329 }
330
331 /**
332  * mvumi_return_cmd -   Return a cmd to free command pool
333  * @mhba:               Adapter soft state
334  * @cmd:                Command packet to be returned to free command pool
335  */
336 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
337                                                 struct mvumi_cmd *cmd)
338 {
339         cmd->scmd = NULL;
340         list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
341 }
342
343 /**
344  * mvumi_free_cmds -    Free all the cmds in the free cmd pool
345  * @mhba:               Adapter soft state
346  */
347 static void mvumi_free_cmds(struct mvumi_hba *mhba)
348 {
349         struct mvumi_cmd *cmd;
350
351         while (!list_empty(&mhba->cmd_pool)) {
352                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
353                                                         queue_pointer);
354                 list_del(&cmd->queue_pointer);
355                 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
356                         kfree(cmd->frame);
357                 kfree(cmd);
358         }
359 }
360
361 /**
362  * mvumi_alloc_cmds -   Allocates the command packets
363  * @mhba:               Adapter soft state
364  *
365  */
366 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
367 {
368         int i;
369         struct mvumi_cmd *cmd;
370
371         for (i = 0; i < mhba->max_io; i++) {
372                 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
373                 if (!cmd)
374                         goto err_exit;
375
376                 INIT_LIST_HEAD(&cmd->queue_pointer);
377                 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
378                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
379                         cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
380                         cmd->frame_phys = mhba->ib_frame_phys
381                                                 + i * mhba->ib_max_size;
382                 } else
383                         cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
384                 if (!cmd->frame)
385                         goto err_exit;
386         }
387         return 0;
388
389 err_exit:
390         dev_err(&mhba->pdev->dev,
391                         "failed to allocate memory for cmd[0x%x].\n", i);
392         while (!list_empty(&mhba->cmd_pool)) {
393                 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
394                                                 queue_pointer);
395                 list_del(&cmd->queue_pointer);
396                 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
397                         kfree(cmd->frame);
398                 kfree(cmd);
399         }
400         return -ENOMEM;
401 }
402
403 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
404 {
405         unsigned int ib_rp_reg;
406         struct mvumi_hw_regs *regs = mhba->regs;
407
408         ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
409
410         if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
411                         (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
412                         ((ib_rp_reg & regs->cl_pointer_toggle)
413                          != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
414                 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
415                 return 0;
416         }
417         if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
418                 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
419                 return 0;
420         } else {
421                 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
422         }
423 }
424
425 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
426 {
427         unsigned int count;
428         if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
429                 return 0;
430         count = ioread32(mhba->ib_shadow);
431         if (count == 0xffff)
432                 return 0;
433         return count;
434 }
435
436 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
437 {
438         unsigned int cur_ib_entry;
439
440         cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
441         cur_ib_entry++;
442         if (cur_ib_entry >= mhba->list_num_io) {
443                 cur_ib_entry -= mhba->list_num_io;
444                 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
445         }
446         mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
447         mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
448         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
449                 *ib_entry = mhba->ib_list + cur_ib_entry *
450                                 sizeof(struct mvumi_dyn_list_entry);
451         } else {
452                 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
453         }
454         atomic_inc(&mhba->fw_outstanding);
455 }
456
457 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
458 {
459         iowrite32(0xffff, mhba->ib_shadow);
460         iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
461 }
462
463 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
464                 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
465 {
466         unsigned short tag, request_id;
467
468         udelay(1);
469         p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
470         request_id = p_outb_frame->request_id;
471         tag = p_outb_frame->tag;
472         if (tag > mhba->tag_pool.size) {
473                 dev_err(&mhba->pdev->dev, "ob frame data error\n");
474                 return -1;
475         }
476         if (mhba->tag_cmd[tag] == NULL) {
477                 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
478                 return -1;
479         } else if (mhba->tag_cmd[tag]->request_id != request_id &&
480                                                 mhba->request_id_enabled) {
481                         dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
482                                         "cmd request ID:0x%x\n", request_id,
483                                         mhba->tag_cmd[tag]->request_id);
484                         return -1;
485         }
486
487         return 0;
488 }
489
490 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
491                         unsigned int *cur_obf, unsigned int *assign_obf_end)
492 {
493         unsigned int ob_write, ob_write_shadow;
494         struct mvumi_hw_regs *regs = mhba->regs;
495
496         do {
497                 ob_write = ioread32(regs->outb_copy_pointer);
498                 ob_write_shadow = ioread32(mhba->ob_shadow);
499         } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
500
501         *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
502         *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
503
504         if ((ob_write & regs->cl_pointer_toggle) !=
505                         (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
506                 *assign_obf_end += mhba->list_num_io;
507         }
508         return 0;
509 }
510
511 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
512                         unsigned int *cur_obf, unsigned int *assign_obf_end)
513 {
514         unsigned int ob_write;
515         struct mvumi_hw_regs *regs = mhba->regs;
516
517         ob_write = ioread32(regs->outb_read_pointer);
518         ob_write = ioread32(regs->outb_copy_pointer);
519         *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
520         *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
521         if (*assign_obf_end < *cur_obf)
522                 *assign_obf_end += mhba->list_num_io;
523         else if (*assign_obf_end == *cur_obf)
524                 return -1;
525         return 0;
526 }
527
528 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
529 {
530         unsigned int cur_obf, assign_obf_end, i;
531         struct mvumi_ob_data *ob_data;
532         struct mvumi_rsp_frame *p_outb_frame;
533         struct mvumi_hw_regs *regs = mhba->regs;
534
535         if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
536                 return;
537
538         for (i = (assign_obf_end - cur_obf); i != 0; i--) {
539                 cur_obf++;
540                 if (cur_obf >= mhba->list_num_io) {
541                         cur_obf -= mhba->list_num_io;
542                         mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
543                 }
544
545                 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
546
547                 /* Copy pointer may point to entry in outbound list
548                 *  before entry has valid data
549                 */
550                 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
551                         mhba->tag_cmd[p_outb_frame->tag] == NULL ||
552                         p_outb_frame->request_id !=
553                                 mhba->tag_cmd[p_outb_frame->tag]->request_id))
554                         if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
555                                 continue;
556
557                 if (!list_empty(&mhba->ob_data_list)) {
558                         ob_data = (struct mvumi_ob_data *)
559                                 list_first_entry(&mhba->ob_data_list,
560                                         struct mvumi_ob_data, list);
561                         list_del_init(&ob_data->list);
562                 } else {
563                         ob_data = NULL;
564                         if (cur_obf == 0) {
565                                 cur_obf = mhba->list_num_io - 1;
566                                 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
567                         } else
568                                 cur_obf -= 1;
569                         break;
570                 }
571
572                 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
573                 p_outb_frame->tag = 0xff;
574
575                 list_add_tail(&ob_data->list, &mhba->free_ob_list);
576         }
577         mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
578         mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
579         iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
580 }
581
582 static void mvumi_reset(struct mvumi_hba *mhba)
583 {
584         struct mvumi_hw_regs *regs = mhba->regs;
585
586         iowrite32(0, regs->enpointa_mask_reg);
587         if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
588                 return;
589
590         iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
591 }
592
593 static unsigned char mvumi_start(struct mvumi_hba *mhba);
594
595 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
596 {
597         mhba->fw_state = FW_STATE_ABORT;
598         mvumi_reset(mhba);
599
600         if (mvumi_start(mhba))
601                 return FAILED;
602         else
603                 return SUCCESS;
604 }
605
606 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
607 {
608         struct mvumi_hw_regs *regs = mhba->regs;
609         u32 tmp;
610         unsigned long before;
611         before = jiffies;
612
613         iowrite32(0, regs->enpointa_mask_reg);
614         tmp = ioread32(regs->arm_to_pciea_msg1);
615         while (tmp != HANDSHAKE_READYSTATE) {
616                 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
617                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
618                         dev_err(&mhba->pdev->dev,
619                                 "FW reset failed [0x%x].\n", tmp);
620                         return FAILED;
621                 }
622
623                 msleep(500);
624                 rmb();
625                 tmp = ioread32(regs->arm_to_pciea_msg1);
626         }
627
628         return SUCCESS;
629 }
630
631 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
632 {
633         unsigned char i;
634
635         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
636                 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
637                                                 &mhba->pci_base[i]);
638         }
639 }
640
641 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
642 {
643         unsigned char i;
644
645         for (i = 0; i < MAX_BASE_ADDRESS; i++) {
646                 if (mhba->pci_base[i])
647                         pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
648                                                 mhba->pci_base[i]);
649         }
650 }
651
652 static int mvumi_pci_set_master(struct pci_dev *pdev)
653 {
654         int ret = 0;
655
656         pci_set_master(pdev);
657
658         if (IS_DMA64) {
659                 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
660                         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
661         } else
662                 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
663
664         return ret;
665 }
666
667 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
668 {
669         mhba->fw_state = FW_STATE_ABORT;
670
671         iowrite32(0, mhba->regs->reset_enable);
672         iowrite32(0xf, mhba->regs->reset_request);
673
674         iowrite32(0x10, mhba->regs->reset_enable);
675         iowrite32(0x10, mhba->regs->reset_request);
676         msleep(100);
677         pci_disable_device(mhba->pdev);
678
679         if (pci_enable_device(mhba->pdev)) {
680                 dev_err(&mhba->pdev->dev, "enable device failed\n");
681                 return FAILED;
682         }
683         if (mvumi_pci_set_master(mhba->pdev)) {
684                 dev_err(&mhba->pdev->dev, "set master failed\n");
685                 return FAILED;
686         }
687         mvumi_restore_bar_addr(mhba);
688         if (mvumi_wait_for_fw(mhba) == FAILED)
689                 return FAILED;
690
691         return mvumi_wait_for_outstanding(mhba);
692 }
693
694 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
695 {
696         return mvumi_wait_for_outstanding(mhba);
697 }
698
699 static int mvumi_host_reset(struct scsi_cmnd *scmd)
700 {
701         struct mvumi_hba *mhba;
702
703         mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
704
705         scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
706                         scmd->request->tag, scmd->cmnd[0], scmd->retries);
707
708         return mhba->instancet->reset_host(mhba);
709 }
710
711 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
712                                                 struct mvumi_cmd *cmd)
713 {
714         unsigned long flags;
715
716         cmd->cmd_status = REQ_STATUS_PENDING;
717
718         if (atomic_read(&cmd->sync_cmd)) {
719                 dev_err(&mhba->pdev->dev,
720                         "last blocked cmd not finished, sync_cmd = %d\n",
721                                                 atomic_read(&cmd->sync_cmd));
722                 BUG_ON(1);
723                 return -1;
724         }
725         atomic_inc(&cmd->sync_cmd);
726         spin_lock_irqsave(mhba->shost->host_lock, flags);
727         mhba->instancet->fire_cmd(mhba, cmd);
728         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
729
730         wait_event_timeout(mhba->int_cmd_wait_q,
731                 (cmd->cmd_status != REQ_STATUS_PENDING),
732                 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
733
734         /* command timeout */
735         if (atomic_read(&cmd->sync_cmd)) {
736                 spin_lock_irqsave(mhba->shost->host_lock, flags);
737                 atomic_dec(&cmd->sync_cmd);
738                 if (mhba->tag_cmd[cmd->frame->tag]) {
739                         mhba->tag_cmd[cmd->frame->tag] = NULL;
740                         dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
741                                                         cmd->frame->tag);
742                         tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
743                 }
744                 if (!list_empty(&cmd->queue_pointer)) {
745                         dev_warn(&mhba->pdev->dev,
746                                 "TIMEOUT:A internal command doesn't send!\n");
747                         list_del_init(&cmd->queue_pointer);
748                 } else
749                         atomic_dec(&mhba->fw_outstanding);
750
751                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
752         }
753         return 0;
754 }
755
756 static void mvumi_release_fw(struct mvumi_hba *mhba)
757 {
758         mvumi_free_cmds(mhba);
759         mvumi_release_mem_resource(mhba);
760         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
761         dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
762                 mhba->handshake_page, mhba->handshake_page_phys);
763         kfree(mhba->regs);
764         pci_release_regions(mhba->pdev);
765 }
766
767 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
768 {
769         struct mvumi_cmd *cmd;
770         struct mvumi_msg_frame *frame;
771         unsigned char device_id, retry = 0;
772         unsigned char bitcount = sizeof(unsigned char) * 8;
773
774         for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
775                 if (!(mhba->target_map[device_id / bitcount] &
776                                 (1 << (device_id % bitcount))))
777                         continue;
778 get_cmd:        cmd = mvumi_create_internal_cmd(mhba, 0);
779                 if (!cmd) {
780                         if (retry++ >= 5) {
781                                 dev_err(&mhba->pdev->dev, "failed to get memory"
782                                         " for internal flush cache cmd for "
783                                         "device %d", device_id);
784                                 retry = 0;
785                                 continue;
786                         } else
787                                 goto get_cmd;
788                 }
789                 cmd->scmd = NULL;
790                 cmd->cmd_status = REQ_STATUS_PENDING;
791                 atomic_set(&cmd->sync_cmd, 0);
792                 frame = cmd->frame;
793                 frame->req_function = CL_FUN_SCSI_CMD;
794                 frame->device_id = device_id;
795                 frame->cmd_flag = CMD_FLAG_NON_DATA;
796                 frame->data_transfer_length = 0;
797                 frame->cdb_length = MAX_COMMAND_SIZE;
798                 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
799                 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
800                 frame->cdb[1] = CDB_CORE_MODULE;
801                 frame->cdb[2] = CDB_CORE_SHUTDOWN;
802
803                 mvumi_issue_blocked_cmd(mhba, cmd);
804                 if (cmd->cmd_status != SAM_STAT_GOOD) {
805                         dev_err(&mhba->pdev->dev,
806                                 "device %d flush cache failed, status=0x%x.\n",
807                                 device_id, cmd->cmd_status);
808                 }
809
810                 mvumi_delete_internal_cmd(mhba, cmd);
811         }
812         return 0;
813 }
814
815 static unsigned char
816 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
817                                                         unsigned short len)
818 {
819         unsigned char *ptr;
820         unsigned char ret = 0, i;
821
822         ptr = (unsigned char *) p_header->frame_content;
823         for (i = 0; i < len; i++) {
824                 ret ^= *ptr;
825                 ptr++;
826         }
827
828         return ret;
829 }
830
831 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
832                                 struct mvumi_hs_header *hs_header)
833 {
834         struct mvumi_hs_page2 *hs_page2;
835         struct mvumi_hs_page4 *hs_page4;
836         struct mvumi_hs_page3 *hs_page3;
837         u64 time;
838         u64 local_time;
839
840         switch (hs_header->page_code) {
841         case HS_PAGE_HOST_INFO:
842                 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
843                 hs_header->frame_length = sizeof(*hs_page2) - 4;
844                 memset(hs_header->frame_content, 0, hs_header->frame_length);
845                 hs_page2->host_type = 3; /* 3 mean linux*/
846                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
847                         hs_page2->host_cap = 0x08;/* host dynamic source mode */
848                 hs_page2->host_ver.ver_major = VER_MAJOR;
849                 hs_page2->host_ver.ver_minor = VER_MINOR;
850                 hs_page2->host_ver.ver_oem = VER_OEM;
851                 hs_page2->host_ver.ver_build = VER_BUILD;
852                 hs_page2->system_io_bus = 0;
853                 hs_page2->slot_number = 0;
854                 hs_page2->intr_level = 0;
855                 hs_page2->intr_vector = 0;
856                 time = ktime_get_real_seconds();
857                 local_time = (time - (sys_tz.tz_minuteswest * 60));
858                 hs_page2->seconds_since1970 = local_time;
859                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
860                                                 hs_header->frame_length);
861                 break;
862
863         case HS_PAGE_FIRM_CTL:
864                 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
865                 hs_header->frame_length = sizeof(*hs_page3) - 4;
866                 memset(hs_header->frame_content, 0, hs_header->frame_length);
867                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
868                                                 hs_header->frame_length);
869                 break;
870
871         case HS_PAGE_CL_INFO:
872                 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
873                 hs_header->frame_length = sizeof(*hs_page4) - 4;
874                 memset(hs_header->frame_content, 0, hs_header->frame_length);
875                 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
876                 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
877
878                 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
879                 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
880                 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
881                 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
882                 if (mhba->hba_capability
883                         & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
884                         hs_page4->ob_depth = find_first_bit((unsigned long *)
885                                                             &mhba->list_num_io,
886                                                             BITS_PER_LONG);
887                         hs_page4->ib_depth = find_first_bit((unsigned long *)
888                                                             &mhba->list_num_io,
889                                                             BITS_PER_LONG);
890                 } else {
891                         hs_page4->ob_depth = (u8) mhba->list_num_io;
892                         hs_page4->ib_depth = (u8) mhba->list_num_io;
893                 }
894                 hs_header->checksum = mvumi_calculate_checksum(hs_header,
895                                                 hs_header->frame_length);
896                 break;
897
898         default:
899                 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
900                         hs_header->page_code);
901                 break;
902         }
903 }
904
905 /**
906  * mvumi_init_data -    Initialize requested date for FW
907  * @mhba:                       Adapter soft state
908  */
909 static int mvumi_init_data(struct mvumi_hba *mhba)
910 {
911         struct mvumi_ob_data *ob_pool;
912         struct mvumi_res *res_mgnt;
913         unsigned int tmp_size, offset, i;
914         void *virmem, *v;
915         dma_addr_t p;
916
917         if (mhba->fw_flag & MVUMI_FW_ALLOC)
918                 return 0;
919
920         tmp_size = mhba->ib_max_size * mhba->max_io;
921         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
922                 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
923
924         tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
925         tmp_size += 8 + sizeof(u32)*2 + 16;
926
927         res_mgnt = mvumi_alloc_mem_resource(mhba,
928                                         RESOURCE_UNCACHED_MEMORY, tmp_size);
929         if (!res_mgnt) {
930                 dev_err(&mhba->pdev->dev,
931                         "failed to allocate memory for inbound list\n");
932                 goto fail_alloc_dma_buf;
933         }
934
935         p = res_mgnt->bus_addr;
936         v = res_mgnt->virt_addr;
937         /* ib_list */
938         offset = round_up(p, 128) - p;
939         p += offset;
940         v += offset;
941         mhba->ib_list = v;
942         mhba->ib_list_phys = p;
943         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
944                 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
945                 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
946                 mhba->ib_frame = v;
947                 mhba->ib_frame_phys = p;
948         }
949         v += mhba->ib_max_size * mhba->max_io;
950         p += mhba->ib_max_size * mhba->max_io;
951
952         /* ib shadow */
953         offset = round_up(p, 8) - p;
954         p += offset;
955         v += offset;
956         mhba->ib_shadow = v;
957         mhba->ib_shadow_phys = p;
958         p += sizeof(u32)*2;
959         v += sizeof(u32)*2;
960         /* ob shadow */
961         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
962                 offset = round_up(p, 8) - p;
963                 p += offset;
964                 v += offset;
965                 mhba->ob_shadow = v;
966                 mhba->ob_shadow_phys = p;
967                 p += 8;
968                 v += 8;
969         } else {
970                 offset = round_up(p, 4) - p;
971                 p += offset;
972                 v += offset;
973                 mhba->ob_shadow = v;
974                 mhba->ob_shadow_phys = p;
975                 p += 4;
976                 v += 4;
977         }
978
979         /* ob list */
980         offset = round_up(p, 128) - p;
981         p += offset;
982         v += offset;
983
984         mhba->ob_list = v;
985         mhba->ob_list_phys = p;
986
987         /* ob data pool */
988         tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
989         tmp_size = round_up(tmp_size, 8);
990
991         res_mgnt = mvumi_alloc_mem_resource(mhba,
992                                 RESOURCE_CACHED_MEMORY, tmp_size);
993         if (!res_mgnt) {
994                 dev_err(&mhba->pdev->dev,
995                         "failed to allocate memory for outbound data buffer\n");
996                 goto fail_alloc_dma_buf;
997         }
998         virmem = res_mgnt->virt_addr;
999
1000         for (i = mhba->max_io; i != 0; i--) {
1001                 ob_pool = (struct mvumi_ob_data *) virmem;
1002                 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1003                 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1004         }
1005
1006         tmp_size = sizeof(unsigned short) * mhba->max_io +
1007                                 sizeof(struct mvumi_cmd *) * mhba->max_io;
1008         tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1009                                                 (sizeof(unsigned char) * 8);
1010
1011         res_mgnt = mvumi_alloc_mem_resource(mhba,
1012                                 RESOURCE_CACHED_MEMORY, tmp_size);
1013         if (!res_mgnt) {
1014                 dev_err(&mhba->pdev->dev,
1015                         "failed to allocate memory for tag and target map\n");
1016                 goto fail_alloc_dma_buf;
1017         }
1018
1019         virmem = res_mgnt->virt_addr;
1020         mhba->tag_pool.stack = virmem;
1021         mhba->tag_pool.size = mhba->max_io;
1022         tag_init(&mhba->tag_pool, mhba->max_io);
1023         virmem += sizeof(unsigned short) * mhba->max_io;
1024
1025         mhba->tag_cmd = virmem;
1026         virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1027
1028         mhba->target_map = virmem;
1029
1030         mhba->fw_flag |= MVUMI_FW_ALLOC;
1031         return 0;
1032
1033 fail_alloc_dma_buf:
1034         mvumi_release_mem_resource(mhba);
1035         return -1;
1036 }
1037
1038 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1039                                 struct mvumi_hs_header *hs_header)
1040 {
1041         struct mvumi_hs_page1 *hs_page1;
1042         unsigned char page_checksum;
1043
1044         page_checksum = mvumi_calculate_checksum(hs_header,
1045                                                 hs_header->frame_length);
1046         if (page_checksum != hs_header->checksum) {
1047                 dev_err(&mhba->pdev->dev, "checksum error\n");
1048                 return -1;
1049         }
1050
1051         switch (hs_header->page_code) {
1052         case HS_PAGE_FIRM_CAP:
1053                 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1054
1055                 mhba->max_io = hs_page1->max_io_support;
1056                 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1057                 mhba->max_transfer_size = hs_page1->max_transfer_size;
1058                 mhba->max_target_id = hs_page1->max_devices_support;
1059                 mhba->hba_capability = hs_page1->capability;
1060                 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1061                 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1062
1063                 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1064                 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1065
1066                 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1067                                                 hs_page1->fw_ver.ver_build);
1068
1069                 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1070                         mhba->eot_flag = 22;
1071                 else
1072                         mhba->eot_flag = 27;
1073                 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1074                         mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1075                 break;
1076         default:
1077                 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1078                 return -1;
1079         }
1080         return 0;
1081 }
1082
1083 /**
1084  * mvumi_handshake -    Move the FW to READY state
1085  * @mhba:                               Adapter soft state
1086  *
1087  * During the initialization, FW passes can potentially be in any one of
1088  * several possible states. If the FW in operational, waiting-for-handshake
1089  * states, driver must take steps to bring it to ready state. Otherwise, it
1090  * has to wait for the ready state.
1091  */
1092 static int mvumi_handshake(struct mvumi_hba *mhba)
1093 {
1094         unsigned int hs_state, tmp, hs_fun;
1095         struct mvumi_hs_header *hs_header;
1096         struct mvumi_hw_regs *regs = mhba->regs;
1097
1098         if (mhba->fw_state == FW_STATE_STARTING)
1099                 hs_state = HS_S_START;
1100         else {
1101                 tmp = ioread32(regs->arm_to_pciea_msg0);
1102                 hs_state = HS_GET_STATE(tmp);
1103                 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1104                 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1105                         mhba->fw_state = FW_STATE_STARTING;
1106                         return -1;
1107                 }
1108         }
1109
1110         hs_fun = 0;
1111         switch (hs_state) {
1112         case HS_S_START:
1113                 mhba->fw_state = FW_STATE_HANDSHAKING;
1114                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1115                 HS_SET_STATE(hs_fun, HS_S_RESET);
1116                 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1117                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1118                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1119                 break;
1120
1121         case HS_S_RESET:
1122                 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1123                                         regs->pciea_to_arm_msg1);
1124                 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1125                                         regs->arm_to_pciea_msg1);
1126                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1127                 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1128                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1129                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1130                 break;
1131
1132         case HS_S_PAGE_ADDR:
1133         case HS_S_QUERY_PAGE:
1134         case HS_S_SEND_PAGE:
1135                 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1136                 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1137                         mhba->hba_total_pages =
1138                         ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1139
1140                         if (mhba->hba_total_pages == 0)
1141                                 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1142                 }
1143
1144                 if (hs_state == HS_S_QUERY_PAGE) {
1145                         if (mvumi_hs_process_page(mhba, hs_header)) {
1146                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
1147                                 return -1;
1148                         }
1149                         if (mvumi_init_data(mhba)) {
1150                                 HS_SET_STATE(hs_fun, HS_S_ABORT);
1151                                 return -1;
1152                         }
1153                 } else if (hs_state == HS_S_PAGE_ADDR) {
1154                         hs_header->page_code = 0;
1155                         mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1156                 }
1157
1158                 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1159                         hs_header->page_code++;
1160                         if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1161                                 mvumi_hs_build_page(mhba, hs_header);
1162                                 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1163                         } else
1164                                 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1165                 } else
1166                         HS_SET_STATE(hs_fun, HS_S_END);
1167
1168                 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1169                 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1170                 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1171                 break;
1172
1173         case HS_S_END:
1174                 /* Set communication list ISR */
1175                 tmp = ioread32(regs->enpointa_mask_reg);
1176                 tmp |= regs->int_comaout | regs->int_comaerr;
1177                 iowrite32(tmp, regs->enpointa_mask_reg);
1178                 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1179                 /* Set InBound List Available count shadow */
1180                 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1181                                         regs->inb_aval_count_basel);
1182                 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1183                                         regs->inb_aval_count_baseh);
1184
1185                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1186                         /* Set OutBound List Available count shadow */
1187                         iowrite32((mhba->list_num_io-1) |
1188                                                         regs->cl_pointer_toggle,
1189                                                         mhba->ob_shadow);
1190                         iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1191                                                         regs->outb_copy_basel);
1192                         iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1193                                                         regs->outb_copy_baseh);
1194                 }
1195
1196                 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1197                                                         regs->cl_pointer_toggle;
1198                 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1199                                                         regs->cl_pointer_toggle;
1200                 mhba->fw_state = FW_STATE_STARTED;
1201
1202                 break;
1203         default:
1204                 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1205                                                                 hs_state);
1206                 return -1;
1207         }
1208         return 0;
1209 }
1210
1211 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1212 {
1213         unsigned int isr_status;
1214         unsigned long before;
1215
1216         before = jiffies;
1217         mvumi_handshake(mhba);
1218         do {
1219                 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1220
1221                 if (mhba->fw_state == FW_STATE_STARTED)
1222                         return 0;
1223                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1224                         dev_err(&mhba->pdev->dev,
1225                                 "no handshake response at state 0x%x.\n",
1226                                   mhba->fw_state);
1227                         dev_err(&mhba->pdev->dev,
1228                                 "isr : global=0x%x,status=0x%x.\n",
1229                                         mhba->global_isr, isr_status);
1230                         return -1;
1231                 }
1232                 rmb();
1233                 usleep_range(1000, 2000);
1234         } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1235
1236         return 0;
1237 }
1238
1239 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1240 {
1241         unsigned int tmp;
1242         unsigned long before;
1243
1244         before = jiffies;
1245         tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1246         while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1247                 if (tmp != HANDSHAKE_READYSTATE)
1248                         iowrite32(DRBL_MU_RESET,
1249                                         mhba->regs->pciea_to_arm_drbl_reg);
1250                 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1251                         dev_err(&mhba->pdev->dev,
1252                                 "invalid signature [0x%x].\n", tmp);
1253                         return -1;
1254                 }
1255                 usleep_range(1000, 2000);
1256                 rmb();
1257                 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1258         }
1259
1260         mhba->fw_state = FW_STATE_STARTING;
1261         dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1262         do {
1263                 if (mvumi_handshake_event(mhba)) {
1264                         dev_err(&mhba->pdev->dev,
1265                                         "handshake failed at state 0x%x.\n",
1266                                                 mhba->fw_state);
1267                         return -1;
1268                 }
1269         } while (mhba->fw_state != FW_STATE_STARTED);
1270
1271         dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1272
1273         return 0;
1274 }
1275
1276 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1277 {
1278         unsigned int tmp;
1279         struct mvumi_hw_regs *regs = mhba->regs;
1280
1281         /* clear Door bell */
1282         tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1283         iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1284
1285         iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1286         tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1287         iowrite32(tmp, regs->enpointa_mask_reg);
1288         msleep(100);
1289         if (mvumi_check_handshake(mhba))
1290                 return -1;
1291
1292         return 0;
1293 }
1294
1295 /**
1296  * mvumi_complete_cmd - Completes a command
1297  * @mhba:                       Adapter soft state
1298  * @cmd:                        Command to be completed
1299  */
1300 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1301                                         struct mvumi_rsp_frame *ob_frame)
1302 {
1303         struct scsi_cmnd *scmd = cmd->scmd;
1304
1305         cmd->scmd->SCp.ptr = NULL;
1306         scmd->result = ob_frame->req_status;
1307
1308         switch (ob_frame->req_status) {
1309         case SAM_STAT_GOOD:
1310                 scmd->result |= DID_OK << 16;
1311                 break;
1312         case SAM_STAT_BUSY:
1313                 scmd->result |= DID_BUS_BUSY << 16;
1314                 break;
1315         case SAM_STAT_CHECK_CONDITION:
1316                 scmd->result |= (DID_OK << 16);
1317                 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1318                         memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1319                                 sizeof(struct mvumi_sense_data));
1320                         scmd->result |=  (DRIVER_SENSE << 24);
1321                 }
1322                 break;
1323         default:
1324                 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1325                 break;
1326         }
1327
1328         if (scsi_bufflen(scmd))
1329                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1330                              scsi_sg_count(scmd),
1331                              scmd->sc_data_direction);
1332         cmd->scmd->scsi_done(scmd);
1333         mvumi_return_cmd(mhba, cmd);
1334 }
1335
1336 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1337                                                 struct mvumi_cmd *cmd,
1338                                         struct mvumi_rsp_frame *ob_frame)
1339 {
1340         if (atomic_read(&cmd->sync_cmd)) {
1341                 cmd->cmd_status = ob_frame->req_status;
1342
1343                 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1344                                 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1345                                 cmd->data_buf) {
1346                         memcpy(cmd->data_buf, ob_frame->payload,
1347                                         sizeof(struct mvumi_sense_data));
1348                 }
1349                 atomic_dec(&cmd->sync_cmd);
1350                 wake_up(&mhba->int_cmd_wait_q);
1351         }
1352 }
1353
1354 static void mvumi_show_event(struct mvumi_hba *mhba,
1355                         struct mvumi_driver_event *ptr)
1356 {
1357         unsigned int i;
1358
1359         dev_warn(&mhba->pdev->dev,
1360                 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1361                 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1362         if (ptr->param_count) {
1363                 printk(KERN_WARNING "Event param(len 0x%x): ",
1364                                                 ptr->param_count);
1365                 for (i = 0; i < ptr->param_count; i++)
1366                         printk(KERN_WARNING "0x%x ", ptr->params[i]);
1367
1368                 printk(KERN_WARNING "\n");
1369         }
1370
1371         if (ptr->sense_data_length) {
1372                 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1373                                                 ptr->sense_data_length);
1374                 for (i = 0; i < ptr->sense_data_length; i++)
1375                         printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1376                 printk(KERN_WARNING "\n");
1377         }
1378 }
1379
1380 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1381 {
1382         struct scsi_device *sdev;
1383         int ret = -1;
1384
1385         if (status == DEVICE_OFFLINE) {
1386                 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1387                 if (sdev) {
1388                         dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1389                                                                 sdev->id, 0);
1390                         scsi_remove_device(sdev);
1391                         scsi_device_put(sdev);
1392                         ret = 0;
1393                 } else
1394                         dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1395                                                                         devid);
1396         } else if (status == DEVICE_ONLINE) {
1397                 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1398                 if (!sdev) {
1399                         scsi_add_device(mhba->shost, 0, devid, 0);
1400                         dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1401                                                                 devid, 0);
1402                         ret = 0;
1403                 } else {
1404                         dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1405                                                                 0, devid, 0);
1406                         scsi_device_put(sdev);
1407                 }
1408         }
1409         return ret;
1410 }
1411
1412 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1413         unsigned int id, struct mvumi_cmd *cmd)
1414 {
1415         struct mvumi_msg_frame *frame;
1416         u64 wwid = 0;
1417         int cmd_alloc = 0;
1418         int data_buf_len = 64;
1419
1420         if (!cmd) {
1421                 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1422                 if (cmd)
1423                         cmd_alloc = 1;
1424                 else
1425                         return 0;
1426         } else {
1427                 memset(cmd->data_buf, 0, data_buf_len);
1428         }
1429         cmd->scmd = NULL;
1430         cmd->cmd_status = REQ_STATUS_PENDING;
1431         atomic_set(&cmd->sync_cmd, 0);
1432         frame = cmd->frame;
1433         frame->device_id = (u16) id;
1434         frame->cmd_flag = CMD_FLAG_DATA_IN;
1435         frame->req_function = CL_FUN_SCSI_CMD;
1436         frame->cdb_length = 6;
1437         frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1438         memset(frame->cdb, 0, frame->cdb_length);
1439         frame->cdb[0] = INQUIRY;
1440         frame->cdb[4] = frame->data_transfer_length;
1441
1442         mvumi_issue_blocked_cmd(mhba, cmd);
1443
1444         if (cmd->cmd_status == SAM_STAT_GOOD) {
1445                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1446                         wwid = id + 1;
1447                 else
1448                         memcpy((void *)&wwid,
1449                                (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1450                                MVUMI_INQUIRY_UUID_LEN);
1451                 dev_dbg(&mhba->pdev->dev,
1452                         "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1453         } else {
1454                 wwid = 0;
1455         }
1456         if (cmd_alloc)
1457                 mvumi_delete_internal_cmd(mhba, cmd);
1458
1459         return wwid;
1460 }
1461
1462 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1463 {
1464         struct mvumi_device *mv_dev = NULL , *dev_next;
1465         struct scsi_device *sdev = NULL;
1466
1467         mutex_lock(&mhba->device_lock);
1468
1469         /* detach Hard Disk */
1470         list_for_each_entry_safe(mv_dev, dev_next,
1471                 &mhba->shost_dev_list, list) {
1472                 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1473                 list_del_init(&mv_dev->list);
1474                 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1475                         mv_dev->id, mv_dev->wwid);
1476                 kfree(mv_dev);
1477         }
1478         list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1479                 list_del_init(&mv_dev->list);
1480                 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1481                         mv_dev->id, mv_dev->wwid);
1482                 kfree(mv_dev);
1483         }
1484
1485         /* detach virtual device */
1486         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1487                 sdev = scsi_device_lookup(mhba->shost, 0,
1488                                                 mhba->max_target_id - 1, 0);
1489
1490         if (sdev) {
1491                 scsi_remove_device(sdev);
1492                 scsi_device_put(sdev);
1493         }
1494
1495         mutex_unlock(&mhba->device_lock);
1496 }
1497
1498 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1499 {
1500         struct scsi_device *sdev;
1501
1502         sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1503         if (sdev) {
1504                 scsi_rescan_device(&sdev->sdev_gendev);
1505                 scsi_device_put(sdev);
1506         }
1507 }
1508
1509 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1510 {
1511         struct mvumi_device *mv_dev = NULL;
1512
1513         list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1514                 if (mv_dev->wwid == wwid) {
1515                         if (mv_dev->id != id) {
1516                                 dev_err(&mhba->pdev->dev,
1517                                         "%s has same wwid[%llx] ,"
1518                                         " but different id[%d %d]\n",
1519                                         __func__, mv_dev->wwid, mv_dev->id, id);
1520                                 return -1;
1521                         } else {
1522                                 if (mhba->pdev->device ==
1523                                                 PCI_DEVICE_ID_MARVELL_MV9143)
1524                                         mvumi_rescan_devices(mhba, id);
1525                                 return 1;
1526                         }
1527                 }
1528         }
1529         return 0;
1530 }
1531
1532 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1533 {
1534         struct mvumi_device *mv_dev = NULL, *dev_next;
1535
1536         list_for_each_entry_safe(mv_dev, dev_next,
1537                                 &mhba->shost_dev_list, list) {
1538                 if (mv_dev->id == id) {
1539                         dev_dbg(&mhba->pdev->dev,
1540                                 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1541                                 mv_dev->id, mv_dev->wwid);
1542                         mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1543                         list_del_init(&mv_dev->list);
1544                         kfree(mv_dev);
1545                 }
1546         }
1547 }
1548
1549 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1550 {
1551         int id, maxid;
1552         u64 wwid = 0;
1553         struct mvumi_device *mv_dev = NULL;
1554         struct mvumi_cmd *cmd = NULL;
1555         int found = 0;
1556
1557         cmd = mvumi_create_internal_cmd(mhba, 64);
1558         if (!cmd)
1559                 return -1;
1560
1561         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1562                 maxid = mhba->max_target_id;
1563         else
1564                 maxid = mhba->max_target_id - 1;
1565
1566         for (id = 0; id < maxid; id++) {
1567                 wwid = mvumi_inquiry(mhba, id, cmd);
1568                 if (!wwid) {
1569                         /* device no response, remove it */
1570                         mvumi_remove_devices(mhba, id);
1571                 } else {
1572                         /* device response, add it */
1573                         found = mvumi_match_devices(mhba, id, wwid);
1574                         if (!found) {
1575                                 mvumi_remove_devices(mhba, id);
1576                                 mv_dev = kzalloc(sizeof(struct mvumi_device),
1577                                                                 GFP_KERNEL);
1578                                 if (!mv_dev) {
1579                                         dev_err(&mhba->pdev->dev,
1580                                                 "%s alloc mv_dev failed\n",
1581                                                 __func__);
1582                                         continue;
1583                                 }
1584                                 mv_dev->id = id;
1585                                 mv_dev->wwid = wwid;
1586                                 mv_dev->sdev = NULL;
1587                                 INIT_LIST_HEAD(&mv_dev->list);
1588                                 list_add_tail(&mv_dev->list,
1589                                               &mhba->mhba_dev_list);
1590                                 dev_dbg(&mhba->pdev->dev,
1591                                         "probe a new device(0:%d:0)"
1592                                         " wwid(%llx)\n", id, mv_dev->wwid);
1593                         } else if (found == -1)
1594                                 return -1;
1595                         else
1596                                 continue;
1597                 }
1598         }
1599
1600         if (cmd)
1601                 mvumi_delete_internal_cmd(mhba, cmd);
1602
1603         return 0;
1604 }
1605
1606 static int mvumi_rescan_bus(void *data)
1607 {
1608         int ret = 0;
1609         struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1610         struct mvumi_device *mv_dev = NULL , *dev_next;
1611
1612         while (!kthread_should_stop()) {
1613
1614                 set_current_state(TASK_INTERRUPTIBLE);
1615                 if (!atomic_read(&mhba->pnp_count))
1616                         schedule();
1617                 msleep(1000);
1618                 atomic_set(&mhba->pnp_count, 0);
1619                 __set_current_state(TASK_RUNNING);
1620
1621                 mutex_lock(&mhba->device_lock);
1622                 ret = mvumi_probe_devices(mhba);
1623                 if (!ret) {
1624                         list_for_each_entry_safe(mv_dev, dev_next,
1625                                                  &mhba->mhba_dev_list, list) {
1626                                 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1627                                                          DEVICE_ONLINE)) {
1628                                         dev_err(&mhba->pdev->dev,
1629                                                 "%s add device(0:%d:0) failed"
1630                                                 "wwid(%llx) has exist\n",
1631                                                 __func__,
1632                                                 mv_dev->id, mv_dev->wwid);
1633                                         list_del_init(&mv_dev->list);
1634                                         kfree(mv_dev);
1635                                 } else {
1636                                         list_move_tail(&mv_dev->list,
1637                                                        &mhba->shost_dev_list);
1638                                 }
1639                         }
1640                 }
1641                 mutex_unlock(&mhba->device_lock);
1642         }
1643         return 0;
1644 }
1645
1646 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1647                                         struct mvumi_hotplug_event *param)
1648 {
1649         u16 size = param->size;
1650         const unsigned long *ar_bitmap;
1651         const unsigned long *re_bitmap;
1652         int index;
1653
1654         if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1655                 index = -1;
1656                 ar_bitmap = (const unsigned long *) param->bitmap;
1657                 re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1658
1659                 mutex_lock(&mhba->sas_discovery_mutex);
1660                 do {
1661                         index = find_next_zero_bit(ar_bitmap, size, index + 1);
1662                         if (index >= size)
1663                                 break;
1664                         mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1665                 } while (1);
1666
1667                 index = -1;
1668                 do {
1669                         index = find_next_zero_bit(re_bitmap, size, index + 1);
1670                         if (index >= size)
1671                                 break;
1672                         mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1673                 } while (1);
1674                 mutex_unlock(&mhba->sas_discovery_mutex);
1675         }
1676 }
1677
1678 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1679 {
1680         if (msg == APICDB1_EVENT_GETEVENT) {
1681                 int i, count;
1682                 struct mvumi_driver_event *param = NULL;
1683                 struct mvumi_event_req *er = buffer;
1684                 count = er->count;
1685                 if (count > MAX_EVENTS_RETURNED) {
1686                         dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1687                                         " than max event count[0x%x].\n",
1688                                         count, MAX_EVENTS_RETURNED);
1689                         return;
1690                 }
1691                 for (i = 0; i < count; i++) {
1692                         param = &er->events[i];
1693                         mvumi_show_event(mhba, param);
1694                 }
1695         } else if (msg == APICDB1_HOST_GETEVENT) {
1696                 mvumi_proc_msg(mhba, buffer);
1697         }
1698 }
1699
1700 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1701 {
1702         struct mvumi_cmd *cmd;
1703         struct mvumi_msg_frame *frame;
1704
1705         cmd = mvumi_create_internal_cmd(mhba, 512);
1706         if (!cmd)
1707                 return -1;
1708         cmd->scmd = NULL;
1709         cmd->cmd_status = REQ_STATUS_PENDING;
1710         atomic_set(&cmd->sync_cmd, 0);
1711         frame = cmd->frame;
1712         frame->device_id = 0;
1713         frame->cmd_flag = CMD_FLAG_DATA_IN;
1714         frame->req_function = CL_FUN_SCSI_CMD;
1715         frame->cdb_length = MAX_COMMAND_SIZE;
1716         frame->data_transfer_length = sizeof(struct mvumi_event_req);
1717         memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1718         frame->cdb[0] = APICDB0_EVENT;
1719         frame->cdb[1] = msg;
1720         mvumi_issue_blocked_cmd(mhba, cmd);
1721
1722         if (cmd->cmd_status != SAM_STAT_GOOD)
1723                 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1724                                                         cmd->cmd_status);
1725         else
1726                 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1727
1728         mvumi_delete_internal_cmd(mhba, cmd);
1729         return 0;
1730 }
1731
1732 static void mvumi_scan_events(struct work_struct *work)
1733 {
1734         struct mvumi_events_wq *mu_ev =
1735                 container_of(work, struct mvumi_events_wq, work_q);
1736
1737         mvumi_get_event(mu_ev->mhba, mu_ev->event);
1738         kfree(mu_ev);
1739 }
1740
1741 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1742 {
1743         struct mvumi_events_wq *mu_ev;
1744
1745         while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1746                 if (isr_status & DRBL_BUS_CHANGE) {
1747                         atomic_inc(&mhba->pnp_count);
1748                         wake_up_process(mhba->dm_thread);
1749                         isr_status &= ~(DRBL_BUS_CHANGE);
1750                         continue;
1751                 }
1752
1753                 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1754                 if (mu_ev) {
1755                         INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1756                         mu_ev->mhba = mhba;
1757                         mu_ev->event = APICDB1_EVENT_GETEVENT;
1758                         isr_status &= ~(DRBL_EVENT_NOTIFY);
1759                         mu_ev->param = NULL;
1760                         schedule_work(&mu_ev->work_q);
1761                 }
1762         }
1763 }
1764
1765 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1766 {
1767         struct mvumi_rsp_frame *ob_frame;
1768         struct mvumi_cmd *cmd;
1769         struct mvumi_ob_data *pool;
1770
1771         while (!list_empty(&mhba->free_ob_list)) {
1772                 pool = list_first_entry(&mhba->free_ob_list,
1773                                                 struct mvumi_ob_data, list);
1774                 list_del_init(&pool->list);
1775                 list_add_tail(&pool->list, &mhba->ob_data_list);
1776
1777                 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1778                 cmd = mhba->tag_cmd[ob_frame->tag];
1779
1780                 atomic_dec(&mhba->fw_outstanding);
1781                 mhba->tag_cmd[ob_frame->tag] = NULL;
1782                 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1783                 if (cmd->scmd)
1784                         mvumi_complete_cmd(mhba, cmd, ob_frame);
1785                 else
1786                         mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1787         }
1788         mhba->instancet->fire_cmd(mhba, NULL);
1789 }
1790
1791 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1792 {
1793         struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1794         unsigned long flags;
1795
1796         spin_lock_irqsave(mhba->shost->host_lock, flags);
1797         if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1798                 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1799                 return IRQ_NONE;
1800         }
1801
1802         if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1803                 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1804                         mvumi_launch_events(mhba, mhba->isr_status);
1805                 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1806                         dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1807                         mvumi_handshake(mhba);
1808                 }
1809
1810         }
1811
1812         if (mhba->global_isr & mhba->regs->int_comaout)
1813                 mvumi_receive_ob_list_entry(mhba);
1814
1815         mhba->global_isr = 0;
1816         mhba->isr_status = 0;
1817         if (mhba->fw_state == FW_STATE_STARTED)
1818                 mvumi_handle_clob(mhba);
1819         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1820         return IRQ_HANDLED;
1821 }
1822
1823 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1824                                                 struct mvumi_cmd *cmd)
1825 {
1826         void *ib_entry;
1827         struct mvumi_msg_frame *ib_frame;
1828         unsigned int frame_len;
1829
1830         ib_frame = cmd->frame;
1831         if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1832                 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1833                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1834         }
1835         if (tag_is_empty(&mhba->tag_pool)) {
1836                 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1837                 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1838         }
1839         mvumi_get_ib_list_entry(mhba, &ib_entry);
1840
1841         cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1842         cmd->frame->request_id = mhba->io_seq++;
1843         cmd->request_id = cmd->frame->request_id;
1844         mhba->tag_cmd[cmd->frame->tag] = cmd;
1845         frame_len = sizeof(*ib_frame) - 4 +
1846                                 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1847         if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1848                 struct mvumi_dyn_list_entry *dle;
1849                 dle = ib_entry;
1850                 dle->src_low_addr =
1851                         cpu_to_le32(lower_32_bits(cmd->frame_phys));
1852                 dle->src_high_addr =
1853                         cpu_to_le32(upper_32_bits(cmd->frame_phys));
1854                 dle->if_length = (frame_len >> 2) & 0xFFF;
1855         } else {
1856                 memcpy(ib_entry, ib_frame, frame_len);
1857         }
1858         return MV_QUEUE_COMMAND_RESULT_SENT;
1859 }
1860
1861 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1862 {
1863         unsigned short num_of_cl_sent = 0;
1864         unsigned int count;
1865         enum mvumi_qc_result result;
1866
1867         if (cmd)
1868                 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1869         count = mhba->instancet->check_ib_list(mhba);
1870         if (list_empty(&mhba->waiting_req_list) || !count)
1871                 return;
1872
1873         do {
1874                 cmd = list_first_entry(&mhba->waiting_req_list,
1875                                        struct mvumi_cmd, queue_pointer);
1876                 list_del_init(&cmd->queue_pointer);
1877                 result = mvumi_send_command(mhba, cmd);
1878                 switch (result) {
1879                 case MV_QUEUE_COMMAND_RESULT_SENT:
1880                         num_of_cl_sent++;
1881                         break;
1882                 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1883                         list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1884                         if (num_of_cl_sent > 0)
1885                                 mvumi_send_ib_list_entry(mhba);
1886
1887                         return;
1888                 }
1889         } while (!list_empty(&mhba->waiting_req_list) && count--);
1890
1891         if (num_of_cl_sent > 0)
1892                 mvumi_send_ib_list_entry(mhba);
1893 }
1894
1895 /**
1896  * mvumi_enable_intr -  Enables interrupts
1897  * @mhba:               Adapter soft state
1898  */
1899 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1900 {
1901         unsigned int mask;
1902         struct mvumi_hw_regs *regs = mhba->regs;
1903
1904         iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1905         mask = ioread32(regs->enpointa_mask_reg);
1906         mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1907         iowrite32(mask, regs->enpointa_mask_reg);
1908 }
1909
1910 /**
1911  * mvumi_disable_intr -Disables interrupt
1912  * @mhba:               Adapter soft state
1913  */
1914 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1915 {
1916         unsigned int mask;
1917         struct mvumi_hw_regs *regs = mhba->regs;
1918
1919         iowrite32(0, regs->arm_to_pciea_mask_reg);
1920         mask = ioread32(regs->enpointa_mask_reg);
1921         mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1922                                                         regs->int_comaerr);
1923         iowrite32(mask, regs->enpointa_mask_reg);
1924 }
1925
1926 static int mvumi_clear_intr(void *extend)
1927 {
1928         struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1929         unsigned int status, isr_status = 0, tmp = 0;
1930         struct mvumi_hw_regs *regs = mhba->regs;
1931
1932         status = ioread32(regs->main_int_cause_reg);
1933         if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1934                 return 1;
1935         if (unlikely(status & regs->int_comaerr)) {
1936                 tmp = ioread32(regs->outb_isr_cause);
1937                 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1938                         if (tmp & regs->clic_out_err) {
1939                                 iowrite32(tmp & regs->clic_out_err,
1940                                                         regs->outb_isr_cause);
1941                         }
1942                 } else {
1943                         if (tmp & (regs->clic_in_err | regs->clic_out_err))
1944                                 iowrite32(tmp & (regs->clic_in_err |
1945                                                 regs->clic_out_err),
1946                                                 regs->outb_isr_cause);
1947                 }
1948                 status ^= mhba->regs->int_comaerr;
1949                 /* inbound or outbound parity error, command will timeout */
1950         }
1951         if (status & regs->int_comaout) {
1952                 tmp = ioread32(regs->outb_isr_cause);
1953                 if (tmp & regs->clic_irq)
1954                         iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1955         }
1956         if (status & regs->int_dl_cpu2pciea) {
1957                 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1958                 if (isr_status)
1959                         iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1960         }
1961
1962         mhba->global_isr = status;
1963         mhba->isr_status = isr_status;
1964
1965         return 0;
1966 }
1967
1968 /**
1969  * mvumi_read_fw_status_reg - returns the current FW status value
1970  * @mhba:               Adapter soft state
1971  */
1972 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1973 {
1974         unsigned int status;
1975
1976         status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1977         if (status)
1978                 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1979         return status;
1980 }
1981
1982 static struct mvumi_instance_template mvumi_instance_9143 = {
1983         .fire_cmd = mvumi_fire_cmd,
1984         .enable_intr = mvumi_enable_intr,
1985         .disable_intr = mvumi_disable_intr,
1986         .clear_intr = mvumi_clear_intr,
1987         .read_fw_status_reg = mvumi_read_fw_status_reg,
1988         .check_ib_list = mvumi_check_ib_list_9143,
1989         .check_ob_list = mvumi_check_ob_list_9143,
1990         .reset_host = mvumi_reset_host_9143,
1991 };
1992
1993 static struct mvumi_instance_template mvumi_instance_9580 = {
1994         .fire_cmd = mvumi_fire_cmd,
1995         .enable_intr = mvumi_enable_intr,
1996         .disable_intr = mvumi_disable_intr,
1997         .clear_intr = mvumi_clear_intr,
1998         .read_fw_status_reg = mvumi_read_fw_status_reg,
1999         .check_ib_list = mvumi_check_ib_list_9580,
2000         .check_ob_list = mvumi_check_ob_list_9580,
2001         .reset_host = mvumi_reset_host_9580,
2002 };
2003
2004 static int mvumi_slave_configure(struct scsi_device *sdev)
2005 {
2006         struct mvumi_hba *mhba;
2007         unsigned char bitcount = sizeof(unsigned char) * 8;
2008
2009         mhba = (struct mvumi_hba *) sdev->host->hostdata;
2010         if (sdev->id >= mhba->max_target_id)
2011                 return -EINVAL;
2012
2013         mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2014         return 0;
2015 }
2016
2017 /**
2018  * mvumi_build_frame -  Prepares a direct cdb (DCDB) command
2019  * @mhba:               Adapter soft state
2020  * @scmd:               SCSI command
2021  * @cmd:                Command to be prepared in
2022  *
2023  * This function prepares CDB commands. These are typcially pass-through
2024  * commands to the devices.
2025  */
2026 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2027                                 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2028 {
2029         struct mvumi_msg_frame *pframe;
2030
2031         cmd->scmd = scmd;
2032         cmd->cmd_status = REQ_STATUS_PENDING;
2033         pframe = cmd->frame;
2034         pframe->device_id = ((unsigned short) scmd->device->id) |
2035                                 (((unsigned short) scmd->device->lun) << 8);
2036         pframe->cmd_flag = 0;
2037
2038         switch (scmd->sc_data_direction) {
2039         case DMA_NONE:
2040                 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2041                 break;
2042         case DMA_FROM_DEVICE:
2043                 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2044                 break;
2045         case DMA_TO_DEVICE:
2046                 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2047                 break;
2048         case DMA_BIDIRECTIONAL:
2049         default:
2050                 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2051                         "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2052                 goto error;
2053         }
2054
2055         pframe->cdb_length = scmd->cmd_len;
2056         memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2057         pframe->req_function = CL_FUN_SCSI_CMD;
2058         if (scsi_bufflen(scmd)) {
2059                 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2060                         &pframe->sg_counts))
2061                         goto error;
2062
2063                 pframe->data_transfer_length = scsi_bufflen(scmd);
2064         } else {
2065                 pframe->sg_counts = 0;
2066                 pframe->data_transfer_length = 0;
2067         }
2068         return 0;
2069
2070 error:
2071         scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2072                 SAM_STAT_CHECK_CONDITION;
2073         scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2074                                                                         0);
2075         return -1;
2076 }
2077
2078 /**
2079  * mvumi_queue_command -        Queue entry point
2080  * @scmd:                       SCSI command to be queued
2081  * @done:                       Callback entry point
2082  */
2083 static int mvumi_queue_command(struct Scsi_Host *shost,
2084                                         struct scsi_cmnd *scmd)
2085 {
2086         struct mvumi_cmd *cmd;
2087         struct mvumi_hba *mhba;
2088         unsigned long irq_flags;
2089
2090         spin_lock_irqsave(shost->host_lock, irq_flags);
2091
2092         mhba = (struct mvumi_hba *) shost->hostdata;
2093         scmd->result = 0;
2094         cmd = mvumi_get_cmd(mhba);
2095         if (unlikely(!cmd)) {
2096                 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2097                 return SCSI_MLQUEUE_HOST_BUSY;
2098         }
2099
2100         if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2101                 goto out_return_cmd;
2102
2103         cmd->scmd = scmd;
2104         scmd->SCp.ptr = (char *) cmd;
2105         mhba->instancet->fire_cmd(mhba, cmd);
2106         spin_unlock_irqrestore(shost->host_lock, irq_flags);
2107         return 0;
2108
2109 out_return_cmd:
2110         mvumi_return_cmd(mhba, cmd);
2111         scmd->scsi_done(scmd);
2112         spin_unlock_irqrestore(shost->host_lock, irq_flags);
2113         return 0;
2114 }
2115
2116 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2117 {
2118         struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2119         struct Scsi_Host *host = scmd->device->host;
2120         struct mvumi_hba *mhba = shost_priv(host);
2121         unsigned long flags;
2122
2123         spin_lock_irqsave(mhba->shost->host_lock, flags);
2124
2125         if (mhba->tag_cmd[cmd->frame->tag]) {
2126                 mhba->tag_cmd[cmd->frame->tag] = NULL;
2127                 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2128         }
2129         if (!list_empty(&cmd->queue_pointer))
2130                 list_del_init(&cmd->queue_pointer);
2131         else
2132                 atomic_dec(&mhba->fw_outstanding);
2133
2134         scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2135         scmd->SCp.ptr = NULL;
2136         if (scsi_bufflen(scmd)) {
2137                 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2138                              scsi_sg_count(scmd),
2139                              scmd->sc_data_direction);
2140         }
2141         mvumi_return_cmd(mhba, cmd);
2142         spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2143
2144         return BLK_EH_DONE;
2145 }
2146
2147 static int
2148 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2149                         sector_t capacity, int geom[])
2150 {
2151         int heads, sectors;
2152         sector_t cylinders;
2153         unsigned long tmp;
2154
2155         heads = 64;
2156         sectors = 32;
2157         tmp = heads * sectors;
2158         cylinders = capacity;
2159         sector_div(cylinders, tmp);
2160
2161         if (capacity >= 0x200000) {
2162                 heads = 255;
2163                 sectors = 63;
2164                 tmp = heads * sectors;
2165                 cylinders = capacity;
2166                 sector_div(cylinders, tmp);
2167         }
2168         geom[0] = heads;
2169         geom[1] = sectors;
2170         geom[2] = cylinders;
2171
2172         return 0;
2173 }
2174
2175 static struct scsi_host_template mvumi_template = {
2176
2177         .module = THIS_MODULE,
2178         .name = "Marvell Storage Controller",
2179         .slave_configure = mvumi_slave_configure,
2180         .queuecommand = mvumi_queue_command,
2181         .eh_timed_out = mvumi_timed_out,
2182         .eh_host_reset_handler = mvumi_host_reset,
2183         .bios_param = mvumi_bios_param,
2184         .dma_boundary = PAGE_SIZE - 1,
2185         .this_id = -1,
2186 };
2187
2188 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2189 {
2190         void *base = NULL;
2191         struct mvumi_hw_regs *regs;
2192
2193         switch (mhba->pdev->device) {
2194         case PCI_DEVICE_ID_MARVELL_MV9143:
2195                 mhba->mmio = mhba->base_addr[0];
2196                 base = mhba->mmio;
2197                 if (!mhba->regs) {
2198                         mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2199                         if (mhba->regs == NULL)
2200                                 return -ENOMEM;
2201                 }
2202                 regs = mhba->regs;
2203
2204                 /* For Arm */
2205                 regs->ctrl_sts_reg          = base + 0x20104;
2206                 regs->rstoutn_mask_reg      = base + 0x20108;
2207                 regs->sys_soft_rst_reg      = base + 0x2010C;
2208                 regs->main_int_cause_reg    = base + 0x20200;
2209                 regs->enpointa_mask_reg     = base + 0x2020C;
2210                 regs->rstoutn_en_reg        = base + 0xF1400;
2211                 /* For Doorbell */
2212                 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2213                 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2214                 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2215                 regs->pciea_to_arm_msg0     = base + 0x20430;
2216                 regs->pciea_to_arm_msg1     = base + 0x20434;
2217                 regs->arm_to_pciea_msg0     = base + 0x20438;
2218                 regs->arm_to_pciea_msg1     = base + 0x2043C;
2219
2220                 /* For Message Unit */
2221
2222                 regs->inb_aval_count_basel  = base + 0x508;
2223                 regs->inb_aval_count_baseh  = base + 0x50C;
2224                 regs->inb_write_pointer     = base + 0x518;
2225                 regs->inb_read_pointer      = base + 0x51C;
2226                 regs->outb_coal_cfg         = base + 0x568;
2227                 regs->outb_copy_basel       = base + 0x5B0;
2228                 regs->outb_copy_baseh       = base + 0x5B4;
2229                 regs->outb_copy_pointer     = base + 0x544;
2230                 regs->outb_read_pointer     = base + 0x548;
2231                 regs->outb_isr_cause        = base + 0x560;
2232                 regs->outb_coal_cfg         = base + 0x568;
2233                 /* Bit setting for HW */
2234                 regs->int_comaout           = 1 << 8;
2235                 regs->int_comaerr           = 1 << 6;
2236                 regs->int_dl_cpu2pciea      = 1 << 1;
2237                 regs->cl_pointer_toggle     = 1 << 12;
2238                 regs->clic_irq              = 1 << 1;
2239                 regs->clic_in_err           = 1 << 8;
2240                 regs->clic_out_err          = 1 << 12;
2241                 regs->cl_slot_num_mask      = 0xFFF;
2242                 regs->int_drbl_int_mask     = 0x3FFFFFFF;
2243                 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2244                                                         regs->int_comaerr;
2245                 break;
2246         case PCI_DEVICE_ID_MARVELL_MV9580:
2247                 mhba->mmio = mhba->base_addr[2];
2248                 base = mhba->mmio;
2249                 if (!mhba->regs) {
2250                         mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2251                         if (mhba->regs == NULL)
2252                                 return -ENOMEM;
2253                 }
2254                 regs = mhba->regs;
2255                 /* For Arm */
2256                 regs->ctrl_sts_reg          = base + 0x20104;
2257                 regs->rstoutn_mask_reg      = base + 0x1010C;
2258                 regs->sys_soft_rst_reg      = base + 0x10108;
2259                 regs->main_int_cause_reg    = base + 0x10200;
2260                 regs->enpointa_mask_reg     = base + 0x1020C;
2261                 regs->rstoutn_en_reg        = base + 0xF1400;
2262
2263                 /* For Doorbell */
2264                 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2265                 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2266                 regs->arm_to_pciea_mask_reg = base + 0x10484;
2267                 regs->pciea_to_arm_msg0     = base + 0x10400;
2268                 regs->pciea_to_arm_msg1     = base + 0x10404;
2269                 regs->arm_to_pciea_msg0     = base + 0x10420;
2270                 regs->arm_to_pciea_msg1     = base + 0x10424;
2271
2272                 /* For reset*/
2273                 regs->reset_request         = base + 0x10108;
2274                 regs->reset_enable          = base + 0x1010c;
2275
2276                 /* For Message Unit */
2277                 regs->inb_aval_count_basel  = base + 0x4008;
2278                 regs->inb_aval_count_baseh  = base + 0x400C;
2279                 regs->inb_write_pointer     = base + 0x4018;
2280                 regs->inb_read_pointer      = base + 0x401C;
2281                 regs->outb_copy_basel       = base + 0x4058;
2282                 regs->outb_copy_baseh       = base + 0x405C;
2283                 regs->outb_copy_pointer     = base + 0x406C;
2284                 regs->outb_read_pointer     = base + 0x4070;
2285                 regs->outb_coal_cfg         = base + 0x4080;
2286                 regs->outb_isr_cause        = base + 0x4088;
2287                 /* Bit setting for HW */
2288                 regs->int_comaout           = 1 << 4;
2289                 regs->int_dl_cpu2pciea      = 1 << 12;
2290                 regs->int_comaerr           = 1 << 29;
2291                 regs->cl_pointer_toggle     = 1 << 14;
2292                 regs->cl_slot_num_mask      = 0x3FFF;
2293                 regs->clic_irq              = 1 << 0;
2294                 regs->clic_out_err          = 1 << 1;
2295                 regs->int_drbl_int_mask     = 0x3FFFFFFF;
2296                 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2297                 break;
2298         default:
2299                 return -1;
2300                 break;
2301         }
2302
2303         return 0;
2304 }
2305
2306 /**
2307  * mvumi_init_fw -      Initializes the FW
2308  * @mhba:               Adapter soft state
2309  *
2310  * This is the main function for initializing firmware.
2311  */
2312 static int mvumi_init_fw(struct mvumi_hba *mhba)
2313 {
2314         int ret = 0;
2315
2316         if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2317                 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2318                 return -EBUSY;
2319         }
2320         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2321         if (ret)
2322                 goto fail_ioremap;
2323
2324         switch (mhba->pdev->device) {
2325         case PCI_DEVICE_ID_MARVELL_MV9143:
2326                 mhba->instancet = &mvumi_instance_9143;
2327                 mhba->io_seq = 0;
2328                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2329                 mhba->request_id_enabled = 1;
2330                 break;
2331         case PCI_DEVICE_ID_MARVELL_MV9580:
2332                 mhba->instancet = &mvumi_instance_9580;
2333                 mhba->io_seq = 0;
2334                 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2335                 break;
2336         default:
2337                 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2338                                                         mhba->pdev->device);
2339                 mhba->instancet = NULL;
2340                 ret = -EINVAL;
2341                 goto fail_alloc_mem;
2342         }
2343         dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2344                                                         mhba->pdev->device);
2345         ret = mvumi_cfg_hw_reg(mhba);
2346         if (ret) {
2347                 dev_err(&mhba->pdev->dev,
2348                         "failed to allocate memory for reg\n");
2349                 ret = -ENOMEM;
2350                 goto fail_alloc_mem;
2351         }
2352         mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2353                         HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2354         if (!mhba->handshake_page) {
2355                 dev_err(&mhba->pdev->dev,
2356                         "failed to allocate memory for handshake\n");
2357                 ret = -ENOMEM;
2358                 goto fail_alloc_page;
2359         }
2360
2361         if (mvumi_start(mhba)) {
2362                 ret = -EINVAL;
2363                 goto fail_ready_state;
2364         }
2365         ret = mvumi_alloc_cmds(mhba);
2366         if (ret)
2367                 goto fail_ready_state;
2368
2369         return 0;
2370
2371 fail_ready_state:
2372         mvumi_release_mem_resource(mhba);
2373         dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2374                 mhba->handshake_page, mhba->handshake_page_phys);
2375 fail_alloc_page:
2376         kfree(mhba->regs);
2377 fail_alloc_mem:
2378         mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2379 fail_ioremap:
2380         pci_release_regions(mhba->pdev);
2381
2382         return ret;
2383 }
2384
2385 /**
2386  * mvumi_io_attach -    Attaches this driver to SCSI mid-layer
2387  * @mhba:               Adapter soft state
2388  */
2389 static int mvumi_io_attach(struct mvumi_hba *mhba)
2390 {
2391         struct Scsi_Host *host = mhba->shost;
2392         struct scsi_device *sdev = NULL;
2393         int ret;
2394         unsigned int max_sg = (mhba->ib_max_size + 4 -
2395                 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2396
2397         host->irq = mhba->pdev->irq;
2398         host->unique_id = mhba->unique_id;
2399         host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2400         host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2401         host->max_sectors = mhba->max_transfer_size / 512;
2402         host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2403         host->max_id = mhba->max_target_id;
2404         host->max_cmd_len = MAX_COMMAND_SIZE;
2405
2406         ret = scsi_add_host(host, &mhba->pdev->dev);
2407         if (ret) {
2408                 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2409                 return ret;
2410         }
2411         mhba->fw_flag |= MVUMI_FW_ATTACH;
2412
2413         mutex_lock(&mhba->sas_discovery_mutex);
2414         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2415                 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2416         else
2417                 ret = 0;
2418         if (ret) {
2419                 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2420                 mutex_unlock(&mhba->sas_discovery_mutex);
2421                 goto fail_add_device;
2422         }
2423
2424         mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2425                                                 mhba, "mvumi_scanthread");
2426         if (IS_ERR(mhba->dm_thread)) {
2427                 dev_err(&mhba->pdev->dev,
2428                         "failed to create device scan thread\n");
2429                 mutex_unlock(&mhba->sas_discovery_mutex);
2430                 goto fail_create_thread;
2431         }
2432         atomic_set(&mhba->pnp_count, 1);
2433         wake_up_process(mhba->dm_thread);
2434
2435         mutex_unlock(&mhba->sas_discovery_mutex);
2436         return 0;
2437
2438 fail_create_thread:
2439         if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2440                 sdev = scsi_device_lookup(mhba->shost, 0,
2441                                                 mhba->max_target_id - 1, 0);
2442         if (sdev) {
2443                 scsi_remove_device(sdev);
2444                 scsi_device_put(sdev);
2445         }
2446 fail_add_device:
2447         scsi_remove_host(mhba->shost);
2448         return ret;
2449 }
2450
2451 /**
2452  * mvumi_probe_one -    PCI hotplug entry point
2453  * @pdev:               PCI device structure
2454  * @id:                 PCI ids of supported hotplugged adapter
2455  */
2456 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2457 {
2458         struct Scsi_Host *host;
2459         struct mvumi_hba *mhba;
2460         int ret;
2461
2462         dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2463                         pdev->vendor, pdev->device, pdev->subsystem_vendor,
2464                         pdev->subsystem_device);
2465
2466         ret = pci_enable_device(pdev);
2467         if (ret)
2468                 return ret;
2469
2470         ret = mvumi_pci_set_master(pdev);
2471         if (ret)
2472                 goto fail_set_dma_mask;
2473
2474         host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2475         if (!host) {
2476                 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2477                 ret = -ENOMEM;
2478                 goto fail_alloc_instance;
2479         }
2480         mhba = shost_priv(host);
2481
2482         INIT_LIST_HEAD(&mhba->cmd_pool);
2483         INIT_LIST_HEAD(&mhba->ob_data_list);
2484         INIT_LIST_HEAD(&mhba->free_ob_list);
2485         INIT_LIST_HEAD(&mhba->res_list);
2486         INIT_LIST_HEAD(&mhba->waiting_req_list);
2487         mutex_init(&mhba->device_lock);
2488         INIT_LIST_HEAD(&mhba->mhba_dev_list);
2489         INIT_LIST_HEAD(&mhba->shost_dev_list);
2490         atomic_set(&mhba->fw_outstanding, 0);
2491         init_waitqueue_head(&mhba->int_cmd_wait_q);
2492         mutex_init(&mhba->sas_discovery_mutex);
2493
2494         mhba->pdev = pdev;
2495         mhba->shost = host;
2496         mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2497
2498         ret = mvumi_init_fw(mhba);
2499         if (ret)
2500                 goto fail_init_fw;
2501
2502         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2503                                 "mvumi", mhba);
2504         if (ret) {
2505                 dev_err(&pdev->dev, "failed to register IRQ\n");
2506                 goto fail_init_irq;
2507         }
2508
2509         mhba->instancet->enable_intr(mhba);
2510         pci_set_drvdata(pdev, mhba);
2511
2512         ret = mvumi_io_attach(mhba);
2513         if (ret)
2514                 goto fail_io_attach;
2515
2516         mvumi_backup_bar_addr(mhba);
2517         dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2518
2519         return 0;
2520
2521 fail_io_attach:
2522         mhba->instancet->disable_intr(mhba);
2523         free_irq(mhba->pdev->irq, mhba);
2524 fail_init_irq:
2525         mvumi_release_fw(mhba);
2526 fail_init_fw:
2527         scsi_host_put(host);
2528
2529 fail_alloc_instance:
2530 fail_set_dma_mask:
2531         pci_disable_device(pdev);
2532
2533         return ret;
2534 }
2535
2536 static void mvumi_detach_one(struct pci_dev *pdev)
2537 {
2538         struct Scsi_Host *host;
2539         struct mvumi_hba *mhba;
2540
2541         mhba = pci_get_drvdata(pdev);
2542         if (mhba->dm_thread) {
2543                 kthread_stop(mhba->dm_thread);
2544                 mhba->dm_thread = NULL;
2545         }
2546
2547         mvumi_detach_devices(mhba);
2548         host = mhba->shost;
2549         scsi_remove_host(mhba->shost);
2550         mvumi_flush_cache(mhba);
2551
2552         mhba->instancet->disable_intr(mhba);
2553         free_irq(mhba->pdev->irq, mhba);
2554         mvumi_release_fw(mhba);
2555         scsi_host_put(host);
2556         pci_disable_device(pdev);
2557         dev_dbg(&pdev->dev, "driver is removed!\n");
2558 }
2559
2560 /**
2561  * mvumi_shutdown -     Shutdown entry point
2562  * @device:             Generic device structure
2563  */
2564 static void mvumi_shutdown(struct pci_dev *pdev)
2565 {
2566         struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2567
2568         mvumi_flush_cache(mhba);
2569 }
2570
2571 static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2572 {
2573         struct mvumi_hba *mhba = NULL;
2574
2575         mhba = pci_get_drvdata(pdev);
2576         mvumi_flush_cache(mhba);
2577
2578         pci_set_drvdata(pdev, mhba);
2579         mhba->instancet->disable_intr(mhba);
2580         free_irq(mhba->pdev->irq, mhba);
2581         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2582         pci_release_regions(pdev);
2583         pci_save_state(pdev);
2584         pci_disable_device(pdev);
2585         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2586
2587         return 0;
2588 }
2589
2590 static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2591 {
2592         int ret;
2593         struct mvumi_hba *mhba = NULL;
2594
2595         mhba = pci_get_drvdata(pdev);
2596
2597         pci_set_power_state(pdev, PCI_D0);
2598         pci_enable_wake(pdev, PCI_D0, 0);
2599         pci_restore_state(pdev);
2600
2601         ret = pci_enable_device(pdev);
2602         if (ret) {
2603                 dev_err(&pdev->dev, "enable device failed\n");
2604                 return ret;
2605         }
2606
2607         ret = mvumi_pci_set_master(pdev);
2608         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2609         if (ret)
2610                 goto fail;
2611         ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2612         if (ret)
2613                 goto fail;
2614         ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2615         if (ret)
2616                 goto release_regions;
2617
2618         if (mvumi_cfg_hw_reg(mhba)) {
2619                 ret = -EINVAL;
2620                 goto unmap_pci_addr;
2621         }
2622
2623         mhba->mmio = mhba->base_addr[0];
2624         mvumi_reset(mhba);
2625
2626         if (mvumi_start(mhba)) {
2627                 ret = -EINVAL;
2628                 goto unmap_pci_addr;
2629         }
2630
2631         ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2632                                 "mvumi", mhba);
2633         if (ret) {
2634                 dev_err(&pdev->dev, "failed to register IRQ\n");
2635                 goto unmap_pci_addr;
2636         }
2637         mhba->instancet->enable_intr(mhba);
2638
2639         return 0;
2640
2641 unmap_pci_addr:
2642         mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2643 release_regions:
2644         pci_release_regions(pdev);
2645 fail:
2646         pci_disable_device(pdev);
2647
2648         return ret;
2649 }
2650
2651 static struct pci_driver mvumi_pci_driver = {
2652
2653         .name = MV_DRIVER_NAME,
2654         .id_table = mvumi_pci_table,
2655         .probe = mvumi_probe_one,
2656         .remove = mvumi_detach_one,
2657         .shutdown = mvumi_shutdown,
2658 #ifdef CONFIG_PM
2659         .suspend = mvumi_suspend,
2660         .resume = mvumi_resume,
2661 #endif
2662 };
2663
2664 module_pci_driver(mvumi_pci_driver);