]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
1391f2dd8102e7ff172f5f67ad3d1d2b320e54c4
[linux.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22                              struct domain_device *device,
23                              int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25
26 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
27 {
28         return device->port->ha->lldd_ha;
29 }
30
31 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
32 {
33         return container_of(sas_port, struct hisi_sas_port, sas_port);
34 }
35 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
36
37 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
38 {
39         void *bitmap = hisi_hba->slot_index_tags;
40
41         clear_bit(slot_idx, bitmap);
42 }
43
44 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
45 {
46         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
47 }
48
49 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
50 {
51         void *bitmap = hisi_hba->slot_index_tags;
52
53         set_bit(slot_idx, bitmap);
54 }
55
56 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
57 {
58         unsigned int index;
59         void *bitmap = hisi_hba->slot_index_tags;
60
61         index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
62         if (index >= hisi_hba->slot_index_count)
63                 return -SAS_QUEUE_FULL;
64         hisi_sas_slot_index_set(hisi_hba, index);
65         *slot_idx = index;
66         return 0;
67 }
68
69 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
70 {
71         int i;
72
73         for (i = 0; i < hisi_hba->slot_index_count; ++i)
74                 hisi_sas_slot_index_clear(hisi_hba, i);
75 }
76
77 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
78                              struct hisi_sas_slot *slot)
79 {
80         struct device *dev = &hisi_hba->pdev->dev;
81         struct domain_device *device = task->dev;
82         struct hisi_sas_device *sas_dev = device->lldd_dev;
83
84         if (!slot->task)
85                 return;
86
87         if (!sas_protocol_ata(task->task_proto))
88                 if (slot->n_elem)
89                         dma_unmap_sg(dev, task->scatter, slot->n_elem,
90                                      task->data_dir);
91
92         if (slot->command_table)
93                 dma_pool_free(hisi_hba->command_table_pool,
94                               slot->command_table, slot->command_table_dma);
95
96         if (slot->status_buffer)
97                 dma_pool_free(hisi_hba->status_buffer_pool,
98                               slot->status_buffer, slot->status_buffer_dma);
99
100         if (slot->sge_page)
101                 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
102                               slot->sge_page_dma);
103
104         list_del_init(&slot->entry);
105         task->lldd_task = NULL;
106         slot->task = NULL;
107         slot->port = NULL;
108         hisi_sas_slot_index_free(hisi_hba, slot->idx);
109         if (sas_dev)
110                 atomic64_dec(&sas_dev->running_req);
111         /* slot memory is fully zeroed when it is reused */
112 }
113 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
114
115 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
116                                   struct hisi_sas_slot *slot)
117 {
118         return hisi_hba->hw->prep_smp(hisi_hba, slot);
119 }
120
121 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
122                                   struct hisi_sas_slot *slot, int is_tmf,
123                                   struct hisi_sas_tmf_task *tmf)
124 {
125         return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
126 }
127
128 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
129                                   struct hisi_sas_slot *slot)
130 {
131         return hisi_hba->hw->prep_stp(hisi_hba, slot);
132 }
133
134 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
135                 struct hisi_sas_slot *slot,
136                 int device_id, int abort_flag, int tag_to_abort)
137 {
138         return hisi_hba->hw->prep_abort(hisi_hba, slot,
139                         device_id, abort_flag, tag_to_abort);
140 }
141
142 /*
143  * This function will issue an abort TMF regardless of whether the
144  * task is in the sdev or not. Then it will do the task complete
145  * cleanup and callbacks.
146  */
147 static void hisi_sas_slot_abort(struct work_struct *work)
148 {
149         struct hisi_sas_slot *abort_slot =
150                 container_of(work, struct hisi_sas_slot, abort_slot);
151         struct sas_task *task = abort_slot->task;
152         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
153         struct scsi_cmnd *cmnd = task->uldd_task;
154         struct hisi_sas_tmf_task tmf_task;
155         struct scsi_lun lun;
156         struct device *dev = &hisi_hba->pdev->dev;
157         int tag = abort_slot->idx;
158         unsigned long flags;
159
160         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
161                 dev_err(dev, "cannot abort slot for non-ssp task\n");
162                 goto out;
163         }
164
165         int_to_scsilun(cmnd->device->lun, &lun);
166         tmf_task.tmf = TMF_ABORT_TASK;
167         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
168
169         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
170 out:
171         /* Do cleanup for this task */
172         spin_lock_irqsave(&hisi_hba->lock, flags);
173         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
174         spin_unlock_irqrestore(&hisi_hba->lock, flags);
175         if (task->task_done)
176                 task->task_done(task);
177 }
178
179 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
180                               int is_tmf, struct hisi_sas_tmf_task *tmf,
181                               int *pass)
182 {
183         struct domain_device *device = task->dev;
184         struct hisi_sas_device *sas_dev = device->lldd_dev;
185         struct hisi_sas_port *port;
186         struct hisi_sas_slot *slot;
187         struct hisi_sas_cmd_hdr *cmd_hdr_base;
188         struct asd_sas_port *sas_port = device->port;
189         struct device *dev = &hisi_hba->pdev->dev;
190         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
191         unsigned long flags;
192
193         if (!sas_port) {
194                 struct task_status_struct *ts = &task->task_status;
195
196                 ts->resp = SAS_TASK_UNDELIVERED;
197                 ts->stat = SAS_PHY_DOWN;
198                 /*
199                  * libsas will use dev->port, should
200                  * not call task_done for sata
201                  */
202                 if (device->dev_type != SAS_SATA_DEV)
203                         task->task_done(task);
204                 return SAS_PHY_DOWN;
205         }
206
207         if (DEV_IS_GONE(sas_dev)) {
208                 if (sas_dev)
209                         dev_info(dev, "task prep: device %llu not ready\n",
210                                  sas_dev->device_id);
211                 else
212                         dev_info(dev, "task prep: device %016llx not ready\n",
213                                  SAS_ADDR(device->sas_addr));
214
215                 return SAS_PHY_DOWN;
216         }
217
218         port = to_hisi_sas_port(sas_port);
219         if (port && !port->port_attached) {
220                 dev_info(dev, "task prep: %s port%d not attach device\n",
221                          (sas_protocol_ata(task->task_proto)) ?
222                          "SATA/STP" : "SAS",
223                          device->port->id);
224
225                 return SAS_PHY_DOWN;
226         }
227
228         if (!sas_protocol_ata(task->task_proto)) {
229                 if (task->num_scatter) {
230                         n_elem = dma_map_sg(dev, task->scatter,
231                                             task->num_scatter, task->data_dir);
232                         if (!n_elem) {
233                                 rc = -ENOMEM;
234                                 goto prep_out;
235                         }
236                 }
237         } else
238                 n_elem = task->num_scatter;
239
240         if (hisi_hba->hw->slot_index_alloc)
241                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
242                                                     device);
243         else
244                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
245         if (rc)
246                 goto err_out;
247         rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
248                                         &dlvry_queue, &dlvry_queue_slot);
249         if (rc)
250                 goto err_out_tag;
251
252         slot = &hisi_hba->slot_info[slot_idx];
253         memset(slot, 0, sizeof(struct hisi_sas_slot));
254
255         slot->idx = slot_idx;
256         slot->n_elem = n_elem;
257         slot->dlvry_queue = dlvry_queue;
258         slot->dlvry_queue_slot = dlvry_queue_slot;
259         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
260         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
261         slot->task = task;
262         slot->port = port;
263         task->lldd_task = slot;
264         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
265
266         slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
267                                              GFP_ATOMIC,
268                                              &slot->status_buffer_dma);
269         if (!slot->status_buffer) {
270                 rc = -ENOMEM;
271                 goto err_out_slot_buf;
272         }
273         memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
274
275         slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
276                                              GFP_ATOMIC,
277                                              &slot->command_table_dma);
278         if (!slot->command_table) {
279                 rc = -ENOMEM;
280                 goto err_out_status_buf;
281         }
282         memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
283         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
284
285         switch (task->task_proto) {
286         case SAS_PROTOCOL_SMP:
287                 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
288                 break;
289         case SAS_PROTOCOL_SSP:
290                 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
291                 break;
292         case SAS_PROTOCOL_SATA:
293         case SAS_PROTOCOL_STP:
294         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
295                 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
296                 break;
297         default:
298                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
299                         task->task_proto);
300                 rc = -EINVAL;
301                 break;
302         }
303
304         if (rc) {
305                 dev_err(dev, "task prep: rc = 0x%x\n", rc);
306                 if (slot->sge_page)
307                         goto err_out_sge;
308                 goto err_out_command_table;
309         }
310
311         list_add_tail(&slot->entry, &sas_dev->list);
312         spin_lock_irqsave(&task->task_state_lock, flags);
313         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
314         spin_unlock_irqrestore(&task->task_state_lock, flags);
315
316         hisi_hba->slot_prep = slot;
317
318         atomic64_inc(&sas_dev->running_req);
319         ++(*pass);
320
321         return 0;
322
323 err_out_sge:
324         dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
325                 slot->sge_page_dma);
326 err_out_command_table:
327         dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
328                 slot->command_table_dma);
329 err_out_status_buf:
330         dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
331                 slot->status_buffer_dma);
332 err_out_slot_buf:
333         /* Nothing to be done */
334 err_out_tag:
335         hisi_sas_slot_index_free(hisi_hba, slot_idx);
336 err_out:
337         dev_err(dev, "task prep: failed[%d]!\n", rc);
338         if (!sas_protocol_ata(task->task_proto))
339                 if (n_elem)
340                         dma_unmap_sg(dev, task->scatter, n_elem,
341                                      task->data_dir);
342 prep_out:
343         return rc;
344 }
345
346 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
347                               int is_tmf, struct hisi_sas_tmf_task *tmf)
348 {
349         u32 rc;
350         u32 pass = 0;
351         unsigned long flags;
352         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
353         struct device *dev = &hisi_hba->pdev->dev;
354
355         if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
356                 return -EINVAL;
357
358         /* protect task_prep and start_delivery sequence */
359         spin_lock_irqsave(&hisi_hba->lock, flags);
360         rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
361         if (rc)
362                 dev_err(dev, "task exec: failed[%d]!\n", rc);
363
364         if (likely(pass))
365                 hisi_hba->hw->start_delivery(hisi_hba);
366         spin_unlock_irqrestore(&hisi_hba->lock, flags);
367
368         return rc;
369 }
370
371 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
372 {
373         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
374         struct asd_sas_phy *sas_phy = &phy->sas_phy;
375         struct sas_ha_struct *sas_ha;
376
377         if (!phy->phy_attached)
378                 return;
379
380         sas_ha = &hisi_hba->sha;
381         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
382
383         if (sas_phy->phy) {
384                 struct sas_phy *sphy = sas_phy->phy;
385
386                 sphy->negotiated_linkrate = sas_phy->linkrate;
387                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
388                 sphy->maximum_linkrate_hw =
389                         hisi_hba->hw->phy_get_max_linkrate();
390                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
391                         sphy->minimum_linkrate = phy->minimum_linkrate;
392
393                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
394                         sphy->maximum_linkrate = phy->maximum_linkrate;
395         }
396
397         if (phy->phy_type & PORT_TYPE_SAS) {
398                 struct sas_identify_frame *id;
399
400                 id = (struct sas_identify_frame *)phy->frame_rcvd;
401                 id->dev_type = phy->identify.device_type;
402                 id->initiator_bits = SAS_PROTOCOL_ALL;
403                 id->target_bits = phy->identify.target_port_protocols;
404         } else if (phy->phy_type & PORT_TYPE_SATA) {
405                 /*Nothing*/
406         }
407
408         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
409         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
410 }
411
412 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
413 {
414         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
415         struct hisi_sas_device *sas_dev = NULL;
416         int i;
417
418         spin_lock(&hisi_hba->lock);
419         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
420                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
421                         hisi_hba->devices[i].device_id = i;
422                         sas_dev = &hisi_hba->devices[i];
423                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
424                         sas_dev->dev_type = device->dev_type;
425                         sas_dev->hisi_hba = hisi_hba;
426                         sas_dev->sas_device = device;
427                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
428                         break;
429                 }
430         }
431         spin_unlock(&hisi_hba->lock);
432
433         return sas_dev;
434 }
435
436 static int hisi_sas_dev_found(struct domain_device *device)
437 {
438         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
439         struct domain_device *parent_dev = device->parent;
440         struct hisi_sas_device *sas_dev;
441         struct device *dev = &hisi_hba->pdev->dev;
442
443         if (hisi_hba->hw->alloc_dev)
444                 sas_dev = hisi_hba->hw->alloc_dev(device);
445         else
446                 sas_dev = hisi_sas_alloc_dev(device);
447         if (!sas_dev) {
448                 dev_err(dev, "fail alloc dev: max support %d devices\n",
449                         HISI_SAS_MAX_DEVICES);
450                 return -EINVAL;
451         }
452
453         device->lldd_dev = sas_dev;
454         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
455
456         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
457                 int phy_no;
458                 u8 phy_num = parent_dev->ex_dev.num_phys;
459                 struct ex_phy *phy;
460
461                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
462                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
463                         if (SAS_ADDR(phy->attached_sas_addr) ==
464                                 SAS_ADDR(device->sas_addr)) {
465                                 sas_dev->attached_phy = phy_no;
466                                 break;
467                         }
468                 }
469
470                 if (phy_no == phy_num) {
471                         dev_info(dev, "dev found: no attached "
472                                  "dev:%016llx at ex:%016llx\n",
473                                  SAS_ADDR(device->sas_addr),
474                                  SAS_ADDR(parent_dev->sas_addr));
475                         return -EINVAL;
476                 }
477         }
478
479         return 0;
480 }
481
482 static int hisi_sas_slave_configure(struct scsi_device *sdev)
483 {
484         struct domain_device *dev = sdev_to_domain_dev(sdev);
485         int ret = sas_slave_configure(sdev);
486
487         if (ret)
488                 return ret;
489         if (!dev_is_sata(dev))
490                 sas_change_queue_depth(sdev, 64);
491
492         return 0;
493 }
494
495 static void hisi_sas_scan_start(struct Scsi_Host *shost)
496 {
497         struct hisi_hba *hisi_hba = shost_priv(shost);
498
499         hisi_hba->hw->phys_init(hisi_hba);
500 }
501
502 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
503 {
504         struct hisi_hba *hisi_hba = shost_priv(shost);
505         struct sas_ha_struct *sha = &hisi_hba->sha;
506
507         /* Wait for PHY up interrupt to occur */
508         if (time < HZ)
509                 return 0;
510
511         sas_drain_work(sha);
512         return 1;
513 }
514
515 static void hisi_sas_phyup_work(struct work_struct *work)
516 {
517         struct hisi_sas_phy *phy =
518                 container_of(work, struct hisi_sas_phy, phyup_ws);
519         struct hisi_hba *hisi_hba = phy->hisi_hba;
520         struct asd_sas_phy *sas_phy = &phy->sas_phy;
521         int phy_no = sas_phy->id;
522
523         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
524         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
525 }
526
527 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
528 {
529         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
530         struct asd_sas_phy *sas_phy = &phy->sas_phy;
531
532         phy->hisi_hba = hisi_hba;
533         phy->port = NULL;
534         init_timer(&phy->timer);
535         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
536         sas_phy->class = SAS;
537         sas_phy->iproto = SAS_PROTOCOL_ALL;
538         sas_phy->tproto = 0;
539         sas_phy->type = PHY_TYPE_PHYSICAL;
540         sas_phy->role = PHY_ROLE_INITIATOR;
541         sas_phy->oob_mode = OOB_NOT_CONNECTED;
542         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
543         sas_phy->id = phy_no;
544         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
545         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
546         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
547         sas_phy->lldd_phy = phy;
548
549         INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
550 }
551
552 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
553 {
554         struct sas_ha_struct *sas_ha = sas_phy->ha;
555         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
556         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
557         struct asd_sas_port *sas_port = sas_phy->port;
558         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
559         unsigned long flags;
560
561         if (!sas_port)
562                 return;
563
564         spin_lock_irqsave(&hisi_hba->lock, flags);
565         port->port_attached = 1;
566         port->id = phy->port_id;
567         phy->port = port;
568         sas_port->lldd_port = port;
569         spin_unlock_irqrestore(&hisi_hba->lock, flags);
570 }
571
572 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba,
573                                      struct sas_task *task,
574                                      struct hisi_sas_slot *slot)
575 {
576         struct task_status_struct *ts;
577         unsigned long flags;
578
579         if (!task)
580                 return;
581
582         ts = &task->task_status;
583
584         ts->resp = SAS_TASK_COMPLETE;
585         ts->stat = SAS_ABORTED_TASK;
586         spin_lock_irqsave(&task->task_state_lock, flags);
587         task->task_state_flags &=
588                 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
589         task->task_state_flags |= SAS_TASK_STATE_DONE;
590         spin_unlock_irqrestore(&task->task_state_lock, flags);
591
592         hisi_sas_slot_task_free(hisi_hba, task, slot);
593 }
594
595 /* hisi_hba.lock should be locked */
596 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
597                         struct domain_device *device)
598 {
599         struct hisi_sas_slot *slot, *slot2;
600         struct hisi_sas_device *sas_dev = device->lldd_dev;
601
602         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
603                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
604 }
605
606 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
607 {
608         struct hisi_sas_device *sas_dev;
609         struct domain_device *device;
610         int i;
611
612         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
613                 sas_dev = &hisi_hba->devices[i];
614                 device = sas_dev->sas_device;
615
616                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
617                     !device)
618                         continue;
619
620                 hisi_sas_release_task(hisi_hba, device);
621         }
622 }
623
624 static void hisi_sas_dev_gone(struct domain_device *device)
625 {
626         struct hisi_sas_device *sas_dev = device->lldd_dev;
627         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
628         struct device *dev = &hisi_hba->pdev->dev;
629         u64 dev_id = sas_dev->device_id;
630
631         dev_info(dev, "found dev[%lld:%x] is gone\n",
632                  sas_dev->device_id, sas_dev->dev_type);
633
634         hisi_sas_internal_task_abort(hisi_hba, device,
635                                      HISI_SAS_INT_ABT_DEV, 0);
636
637         hisi_hba->hw->free_device(hisi_hba, sas_dev);
638         device->lldd_dev = NULL;
639         memset(sas_dev, 0, sizeof(*sas_dev));
640         sas_dev->device_id = dev_id;
641         sas_dev->dev_type = SAS_PHY_UNUSED;
642         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
643 }
644
645 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
646 {
647         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
648 }
649
650 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
651                                 void *funcdata)
652 {
653         struct sas_ha_struct *sas_ha = sas_phy->ha;
654         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
655         int phy_no = sas_phy->id;
656
657         switch (func) {
658         case PHY_FUNC_HARD_RESET:
659                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
660                 break;
661
662         case PHY_FUNC_LINK_RESET:
663                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
664                 msleep(100);
665                 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
666                 break;
667
668         case PHY_FUNC_DISABLE:
669                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
670                 break;
671
672         case PHY_FUNC_SET_LINK_RATE:
673                 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
674                 break;
675
676         case PHY_FUNC_RELEASE_SPINUP_HOLD:
677         default:
678                 return -EOPNOTSUPP;
679         }
680         return 0;
681 }
682
683 static void hisi_sas_task_done(struct sas_task *task)
684 {
685         if (!del_timer(&task->slow_task->timer))
686                 return;
687         complete(&task->slow_task->completion);
688 }
689
690 static void hisi_sas_tmf_timedout(unsigned long data)
691 {
692         struct sas_task *task = (struct sas_task *)data;
693
694         task->task_state_flags |= SAS_TASK_STATE_ABORTED;
695         complete(&task->slow_task->completion);
696 }
697
698 #define TASK_TIMEOUT 20
699 #define TASK_RETRY 3
700 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
701                                            void *parameter, u32 para_len,
702                                            struct hisi_sas_tmf_task *tmf)
703 {
704         struct hisi_sas_device *sas_dev = device->lldd_dev;
705         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
706         struct device *dev = &hisi_hba->pdev->dev;
707         struct sas_task *task;
708         int res, retry;
709
710         for (retry = 0; retry < TASK_RETRY; retry++) {
711                 task = sas_alloc_slow_task(GFP_KERNEL);
712                 if (!task)
713                         return -ENOMEM;
714
715                 task->dev = device;
716                 task->task_proto = device->tproto;
717
718                 if (dev_is_sata(device)) {
719                         task->ata_task.device_control_reg_update = 1;
720                         memcpy(&task->ata_task.fis, parameter, para_len);
721                 } else {
722                         memcpy(&task->ssp_task, parameter, para_len);
723                 }
724                 task->task_done = hisi_sas_task_done;
725
726                 task->slow_task->timer.data = (unsigned long) task;
727                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
728                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
729                 add_timer(&task->slow_task->timer);
730
731                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
732
733                 if (res) {
734                         del_timer(&task->slow_task->timer);
735                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
736                                 res);
737                         goto ex_err;
738                 }
739
740                 wait_for_completion(&task->slow_task->completion);
741                 res = TMF_RESP_FUNC_FAILED;
742                 /* Even TMF timed out, return direct. */
743                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
744                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
745                                 dev_err(dev, "abort tmf: TMF task timeout\n");
746                                 goto ex_err;
747                         }
748                 }
749
750                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
751                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
752                         res = TMF_RESP_FUNC_COMPLETE;
753                         break;
754                 }
755
756                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
757                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
758                         res = TMF_RESP_FUNC_SUCC;
759                         break;
760                 }
761
762                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
763                       task->task_status.stat == SAS_DATA_UNDERRUN) {
764                         /* no error, but return the number of bytes of
765                          * underrun
766                          */
767                         dev_warn(dev, "abort tmf: task to dev %016llx "
768                                  "resp: 0x%x sts 0x%x underrun\n",
769                                  SAS_ADDR(device->sas_addr),
770                                  task->task_status.resp,
771                                  task->task_status.stat);
772                         res = task->task_status.residual;
773                         break;
774                 }
775
776                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
777                         task->task_status.stat == SAS_DATA_OVERRUN) {
778                         dev_warn(dev, "abort tmf: blocked task error\n");
779                         res = -EMSGSIZE;
780                         break;
781                 }
782
783                 dev_warn(dev, "abort tmf: task to dev "
784                          "%016llx resp: 0x%x status 0x%x\n",
785                          SAS_ADDR(device->sas_addr), task->task_status.resp,
786                          task->task_status.stat);
787                 sas_free_task(task);
788                 task = NULL;
789         }
790 ex_err:
791         if (retry == TASK_RETRY)
792                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
793         sas_free_task(task);
794         return res;
795 }
796
797 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
798                 bool reset, int pmp, u8 *fis)
799 {
800         struct ata_taskfile tf;
801
802         ata_tf_init(dev, &tf);
803         if (reset)
804                 tf.ctl |= ATA_SRST;
805         else
806                 tf.ctl &= ~ATA_SRST;
807         tf.command = ATA_CMD_DEV_RESET;
808         ata_tf_to_fis(&tf, pmp, 0, fis);
809 }
810
811 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
812 {
813         u8 fis[20] = {0};
814         struct ata_port *ap = device->sata_dev.ap;
815         struct ata_link *link;
816         int rc = TMF_RESP_FUNC_FAILED;
817         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
818         struct device *dev = &hisi_hba->pdev->dev;
819         int s = sizeof(struct host_to_dev_fis);
820         unsigned long flags;
821
822         ata_for_each_link(link, ap, EDGE) {
823                 int pmp = sata_srst_pmp(link);
824
825                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
826                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
827                 if (rc != TMF_RESP_FUNC_COMPLETE)
828                         break;
829         }
830
831         if (rc == TMF_RESP_FUNC_COMPLETE) {
832                 ata_for_each_link(link, ap, EDGE) {
833                         int pmp = sata_srst_pmp(link);
834
835                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
836                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
837                                                              s, NULL);
838                         if (rc != TMF_RESP_FUNC_COMPLETE)
839                                 dev_err(dev, "ata disk de-reset failed\n");
840                 }
841         } else {
842                 dev_err(dev, "ata disk reset failed\n");
843         }
844
845         if (rc == TMF_RESP_FUNC_COMPLETE) {
846                 spin_lock_irqsave(&hisi_hba->lock, flags);
847                 hisi_sas_release_task(hisi_hba, device);
848                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
849         }
850
851         return rc;
852 }
853
854 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
855                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
856 {
857         struct sas_ssp_task ssp_task;
858
859         if (!(device->tproto & SAS_PROTOCOL_SSP))
860                 return TMF_RESP_FUNC_ESUPP;
861
862         memcpy(ssp_task.LUN, lun, 8);
863
864         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
865                                 sizeof(ssp_task), tmf);
866 }
867
868 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
869 {
870         int rc;
871
872         if (!hisi_hba->hw->soft_reset)
873                 return -1;
874
875         if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
876                 struct device *dev = &hisi_hba->pdev->dev;
877                 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
878                 unsigned long flags;
879
880                 dev_dbg(dev, "controller reset begins!\n");
881                 scsi_block_requests(hisi_hba->shost);
882                 rc = hisi_hba->hw->soft_reset(hisi_hba);
883                 if (rc) {
884                         dev_warn(dev, "controller reset failed (%d)\n", rc);
885                         goto out;
886                 }
887                 spin_lock_irqsave(&hisi_hba->lock, flags);
888                 hisi_sas_release_tasks(hisi_hba);
889                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
890
891                 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
892                 dev_dbg(dev, "controller reset successful!\n");
893         } else
894                 return -1;
895
896 out:
897         scsi_unblock_requests(hisi_hba->shost);
898         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
899         return rc;
900 }
901
902 static int hisi_sas_abort_task(struct sas_task *task)
903 {
904         struct scsi_lun lun;
905         struct hisi_sas_tmf_task tmf_task;
906         struct domain_device *device = task->dev;
907         struct hisi_sas_device *sas_dev = device->lldd_dev;
908         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
909         struct device *dev = &hisi_hba->pdev->dev;
910         int rc = TMF_RESP_FUNC_FAILED;
911         unsigned long flags;
912
913         if (!sas_dev) {
914                 dev_warn(dev, "Device has been removed\n");
915                 return TMF_RESP_FUNC_FAILED;
916         }
917
918         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
919                 rc = TMF_RESP_FUNC_COMPLETE;
920                 goto out;
921         }
922
923         sas_dev->dev_status = HISI_SAS_DEV_EH;
924         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
925                 struct scsi_cmnd *cmnd = task->uldd_task;
926                 struct hisi_sas_slot *slot = task->lldd_task;
927                 u32 tag = slot->idx;
928                 int rc2;
929
930                 int_to_scsilun(cmnd->device->lun, &lun);
931                 tmf_task.tmf = TMF_ABORT_TASK;
932                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
933
934                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
935                                                   &tmf_task);
936
937                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
938                                                    HISI_SAS_INT_ABT_CMD, tag);
939                 /*
940                  * If the TMF finds that the IO is not in the device and also
941                  * the internal abort does not succeed, then it is safe to
942                  * free the slot.
943                  * Note: if the internal abort succeeds then the slot
944                  * will have already been completed
945                  */
946                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
947                         if (task->lldd_task) {
948                                 spin_lock_irqsave(&hisi_hba->lock, flags);
949                                 hisi_sas_do_release_task(hisi_hba, task, slot);
950                                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
951                         }
952                 }
953         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
954                 task->task_proto & SAS_PROTOCOL_STP) {
955                 if (task->dev->dev_type == SAS_SATA_DEV) {
956                         hisi_sas_internal_task_abort(hisi_hba, device,
957                                                      HISI_SAS_INT_ABT_DEV, 0);
958                         rc = hisi_sas_softreset_ata_disk(device);
959                 }
960         } else if (task->task_proto & SAS_PROTOCOL_SMP) {
961                 /* SMP */
962                 struct hisi_sas_slot *slot = task->lldd_task;
963                 u32 tag = slot->idx;
964
965                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
966                              HISI_SAS_INT_ABT_CMD, tag);
967                 if (rc == TMF_RESP_FUNC_FAILED) {
968                         spin_lock_irqsave(&hisi_hba->lock, flags);
969                         hisi_sas_do_release_task(hisi_hba, task, slot);
970                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
971                 }
972         }
973
974 out:
975         if (rc != TMF_RESP_FUNC_COMPLETE)
976                 dev_notice(dev, "abort task: rc=%d\n", rc);
977         return rc;
978 }
979
980 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
981 {
982         struct hisi_sas_tmf_task tmf_task;
983         int rc = TMF_RESP_FUNC_FAILED;
984
985         tmf_task.tmf = TMF_ABORT_TASK_SET;
986         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
987
988         return rc;
989 }
990
991 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
992 {
993         int rc = TMF_RESP_FUNC_FAILED;
994         struct hisi_sas_tmf_task tmf_task;
995
996         tmf_task.tmf = TMF_CLEAR_ACA;
997         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
998
999         return rc;
1000 }
1001
1002 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1003 {
1004         struct sas_phy *phy = sas_get_local_phy(device);
1005         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1006                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1007         rc = sas_phy_reset(phy, reset_type);
1008         sas_put_local_phy(phy);
1009         msleep(2000);
1010         return rc;
1011 }
1012
1013 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1014 {
1015         struct hisi_sas_device *sas_dev = device->lldd_dev;
1016         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1017         unsigned long flags;
1018         int rc = TMF_RESP_FUNC_FAILED;
1019
1020         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1021                 return TMF_RESP_FUNC_FAILED;
1022         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1023
1024         rc = hisi_sas_debug_I_T_nexus_reset(device);
1025
1026         if (rc == TMF_RESP_FUNC_COMPLETE) {
1027                 spin_lock_irqsave(&hisi_hba->lock, flags);
1028                 hisi_sas_release_task(hisi_hba, device);
1029                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1030         }
1031         return rc;
1032 }
1033
1034 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1035 {
1036         struct hisi_sas_device *sas_dev = device->lldd_dev;
1037         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1038         struct device *dev = &hisi_hba->pdev->dev;
1039         unsigned long flags;
1040         int rc = TMF_RESP_FUNC_FAILED;
1041
1042         sas_dev->dev_status = HISI_SAS_DEV_EH;
1043         if (dev_is_sata(device)) {
1044                 struct sas_phy *phy;
1045
1046                 /* Clear internal IO and then hardreset */
1047                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1048                                                   HISI_SAS_INT_ABT_DEV, 0);
1049                 if (rc == TMF_RESP_FUNC_FAILED)
1050                         goto out;
1051
1052                 phy = sas_get_local_phy(device);
1053
1054                 rc = sas_phy_reset(phy, 1);
1055
1056                 if (rc == 0) {
1057                         spin_lock_irqsave(&hisi_hba->lock, flags);
1058                         hisi_sas_release_task(hisi_hba, device);
1059                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1060                 }
1061                 sas_put_local_phy(phy);
1062         } else {
1063                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1064
1065                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1066                 if (rc == TMF_RESP_FUNC_COMPLETE) {
1067                         spin_lock_irqsave(&hisi_hba->lock, flags);
1068                         hisi_sas_release_task(hisi_hba, device);
1069                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1070                 }
1071         }
1072 out:
1073         dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
1074                 sas_dev->device_id, rc);
1075         return rc;
1076 }
1077
1078 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1079 {
1080         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1081
1082         return hisi_sas_controller_reset(hisi_hba);
1083 }
1084
1085 static int hisi_sas_query_task(struct sas_task *task)
1086 {
1087         struct scsi_lun lun;
1088         struct hisi_sas_tmf_task tmf_task;
1089         int rc = TMF_RESP_FUNC_FAILED;
1090
1091         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1092                 struct scsi_cmnd *cmnd = task->uldd_task;
1093                 struct domain_device *device = task->dev;
1094                 struct hisi_sas_slot *slot = task->lldd_task;
1095                 u32 tag = slot->idx;
1096
1097                 int_to_scsilun(cmnd->device->lun, &lun);
1098                 tmf_task.tmf = TMF_QUERY_TASK;
1099                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1100
1101                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1102                                                   lun.scsi_lun,
1103                                                   &tmf_task);
1104                 switch (rc) {
1105                 /* The task is still in Lun, release it then */
1106                 case TMF_RESP_FUNC_SUCC:
1107                 /* The task is not in Lun or failed, reset the phy */
1108                 case TMF_RESP_FUNC_FAILED:
1109                 case TMF_RESP_FUNC_COMPLETE:
1110                         break;
1111                 default:
1112                         rc = TMF_RESP_FUNC_FAILED;
1113                         break;
1114                 }
1115         }
1116         return rc;
1117 }
1118
1119 static int
1120 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
1121                                   struct sas_task *task, int abort_flag,
1122                                   int task_tag)
1123 {
1124         struct domain_device *device = task->dev;
1125         struct hisi_sas_device *sas_dev = device->lldd_dev;
1126         struct device *dev = &hisi_hba->pdev->dev;
1127         struct hisi_sas_port *port;
1128         struct hisi_sas_slot *slot;
1129         struct asd_sas_port *sas_port = device->port;
1130         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1131         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1132         unsigned long flags;
1133
1134         if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
1135                 return -EINVAL;
1136
1137         if (!device->port)
1138                 return -1;
1139
1140         port = to_hisi_sas_port(sas_port);
1141
1142         /* simply get a slot and send abort command */
1143         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1144         if (rc)
1145                 goto err_out;
1146         rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
1147                                         &dlvry_queue, &dlvry_queue_slot);
1148         if (rc)
1149                 goto err_out_tag;
1150
1151         slot = &hisi_hba->slot_info[slot_idx];
1152         memset(slot, 0, sizeof(struct hisi_sas_slot));
1153
1154         slot->idx = slot_idx;
1155         slot->n_elem = n_elem;
1156         slot->dlvry_queue = dlvry_queue;
1157         slot->dlvry_queue_slot = dlvry_queue_slot;
1158         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1159         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1160         slot->task = task;
1161         slot->port = port;
1162         task->lldd_task = slot;
1163
1164         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1165
1166         rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1167                                       abort_flag, task_tag);
1168         if (rc)
1169                 goto err_out_tag;
1170
1171
1172         list_add_tail(&slot->entry, &sas_dev->list);
1173         spin_lock_irqsave(&task->task_state_lock, flags);
1174         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1175         spin_unlock_irqrestore(&task->task_state_lock, flags);
1176
1177         hisi_hba->slot_prep = slot;
1178
1179         atomic64_inc(&sas_dev->running_req);
1180
1181         /* send abort command to our chip */
1182         hisi_hba->hw->start_delivery(hisi_hba);
1183
1184         return 0;
1185
1186 err_out_tag:
1187         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1188 err_out:
1189         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1190
1191         return rc;
1192 }
1193
1194 /**
1195  * hisi_sas_internal_task_abort -- execute an internal
1196  * abort command for single IO command or a device
1197  * @hisi_hba: host controller struct
1198  * @device: domain device
1199  * @abort_flag: mode of operation, device or single IO
1200  * @tag: tag of IO to be aborted (only relevant to single
1201  *       IO mode)
1202  */
1203 static int
1204 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1205                              struct domain_device *device,
1206                              int abort_flag, int tag)
1207 {
1208         struct sas_task *task;
1209         struct hisi_sas_device *sas_dev = device->lldd_dev;
1210         struct device *dev = &hisi_hba->pdev->dev;
1211         int res;
1212         unsigned long flags;
1213
1214         if (!hisi_hba->hw->prep_abort)
1215                 return -EOPNOTSUPP;
1216
1217         task = sas_alloc_slow_task(GFP_KERNEL);
1218         if (!task)
1219                 return -ENOMEM;
1220
1221         task->dev = device;
1222         task->task_proto = device->tproto;
1223         task->task_done = hisi_sas_task_done;
1224         task->slow_task->timer.data = (unsigned long)task;
1225         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1226         task->slow_task->timer.expires = jiffies + 20*HZ;
1227         add_timer(&task->slow_task->timer);
1228
1229         /* Lock as we are alloc'ing a slot, which cannot be interrupted */
1230         spin_lock_irqsave(&hisi_hba->lock, flags);
1231         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1232                                                 task, abort_flag, tag);
1233         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1234         if (res) {
1235                 del_timer(&task->slow_task->timer);
1236                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1237                         res);
1238                 goto exit;
1239         }
1240         wait_for_completion(&task->slow_task->completion);
1241         res = TMF_RESP_FUNC_FAILED;
1242
1243         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1244                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1245                 res = TMF_RESP_FUNC_COMPLETE;
1246                 goto exit;
1247         }
1248
1249         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1250                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1251                 res = TMF_RESP_FUNC_SUCC;
1252                 goto exit;
1253         }
1254
1255         /* Internal abort timed out */
1256         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1257                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1258                         dev_err(dev, "internal task abort: timeout.\n");
1259                 }
1260         }
1261
1262 exit:
1263         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1264                 "resp: 0x%x sts 0x%x\n",
1265                 SAS_ADDR(device->sas_addr),
1266                 task,
1267                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1268                 task->task_status.stat);
1269         sas_free_task(task);
1270
1271         return res;
1272 }
1273
1274 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1275 {
1276         hisi_sas_port_notify_formed(sas_phy);
1277 }
1278
1279 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1280 {
1281         phy->phy_attached = 0;
1282         phy->phy_type = 0;
1283         phy->port = NULL;
1284 }
1285
1286 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1287 {
1288         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1289         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1290         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1291
1292         if (rdy) {
1293                 /* Phy down but ready */
1294                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1295                 hisi_sas_port_notify_formed(sas_phy);
1296         } else {
1297                 struct hisi_sas_port *port  = phy->port;
1298
1299                 /* Phy down and not ready */
1300                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1301                 sas_phy_disconnected(sas_phy);
1302
1303                 if (port) {
1304                         if (phy->phy_type & PORT_TYPE_SAS) {
1305                                 int port_id = port->id;
1306
1307                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1308                                                                        port_id))
1309                                         port->port_attached = 0;
1310                         } else if (phy->phy_type & PORT_TYPE_SATA)
1311                                 port->port_attached = 0;
1312                 }
1313                 hisi_sas_phy_disconnected(phy);
1314         }
1315 }
1316 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1317
1318 void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1319                               u32 state)
1320 {
1321         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1322         int phy_no;
1323
1324         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1325                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1326                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1327                 struct asd_sas_port *sas_port = sas_phy->port;
1328                 struct domain_device *dev;
1329
1330                 if (sas_phy->enabled) {
1331                         /* Report PHY state change to libsas */
1332                         if (state & (1 << phy_no))
1333                                 continue;
1334
1335                         if (old_state & (1 << phy_no))
1336                                 /* PHY down but was up before */
1337                                 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1338                 }
1339                 if (!sas_port)
1340                         continue;
1341                 dev = sas_port->port_dev;
1342
1343                 if (DEV_IS_EXPANDER(dev->dev_type))
1344                         sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD);
1345         }
1346 }
1347 EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
1348
1349 static struct scsi_transport_template *hisi_sas_stt;
1350
1351 static struct scsi_host_template hisi_sas_sht = {
1352         .module                 = THIS_MODULE,
1353         .name                   = DRV_NAME,
1354         .queuecommand           = sas_queuecommand,
1355         .target_alloc           = sas_target_alloc,
1356         .slave_configure        = hisi_sas_slave_configure,
1357         .scan_finished          = hisi_sas_scan_finished,
1358         .scan_start             = hisi_sas_scan_start,
1359         .change_queue_depth     = sas_change_queue_depth,
1360         .bios_param             = sas_bios_param,
1361         .can_queue              = 1,
1362         .this_id                = -1,
1363         .sg_tablesize           = SG_ALL,
1364         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
1365         .use_clustering         = ENABLE_CLUSTERING,
1366         .eh_device_reset_handler = sas_eh_device_reset_handler,
1367         .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
1368         .target_destroy         = sas_target_destroy,
1369         .ioctl                  = sas_ioctl,
1370 };
1371
1372 static struct sas_domain_function_template hisi_sas_transport_ops = {
1373         .lldd_dev_found         = hisi_sas_dev_found,
1374         .lldd_dev_gone          = hisi_sas_dev_gone,
1375         .lldd_execute_task      = hisi_sas_queue_command,
1376         .lldd_control_phy       = hisi_sas_control_phy,
1377         .lldd_abort_task        = hisi_sas_abort_task,
1378         .lldd_abort_task_set    = hisi_sas_abort_task_set,
1379         .lldd_clear_aca         = hisi_sas_clear_aca,
1380         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1381         .lldd_lu_reset          = hisi_sas_lu_reset,
1382         .lldd_query_task        = hisi_sas_query_task,
1383         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1384         .lldd_port_formed       = hisi_sas_port_formed,
1385 };
1386
1387 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1388 {
1389         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1390
1391         for (i = 0; i < hisi_hba->queue_count; i++) {
1392                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1393                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1394
1395                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1396                 memset(hisi_hba->cmd_hdr[i], 0, s);
1397                 dq->wr_point = 0;
1398
1399                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1400                 memset(hisi_hba->complete_hdr[i], 0, s);
1401                 cq->rd_point = 0;
1402         }
1403
1404         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1405         memset(hisi_hba->initial_fis, 0, s);
1406
1407         s = max_command_entries * sizeof(struct hisi_sas_iost);
1408         memset(hisi_hba->iost, 0, s);
1409
1410         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1411         memset(hisi_hba->breakpoint, 0, s);
1412
1413         s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1414         memset(hisi_hba->sata_breakpoint, 0, s);
1415 }
1416 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1417
1418 static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1419 {
1420         struct platform_device *pdev = hisi_hba->pdev;
1421         struct device *dev = &pdev->dev;
1422         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1423
1424         spin_lock_init(&hisi_hba->lock);
1425         for (i = 0; i < hisi_hba->n_phy; i++) {
1426                 hisi_sas_phy_init(hisi_hba, i);
1427                 hisi_hba->port[i].port_attached = 0;
1428                 hisi_hba->port[i].id = -1;
1429         }
1430
1431         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1432                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1433                 hisi_hba->devices[i].device_id = i;
1434                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1435         }
1436
1437         for (i = 0; i < hisi_hba->queue_count; i++) {
1438                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1439                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1440
1441                 /* Completion queue structure */
1442                 cq->id = i;
1443                 cq->hisi_hba = hisi_hba;
1444
1445                 /* Delivery queue structure */
1446                 dq->id = i;
1447                 dq->hisi_hba = hisi_hba;
1448
1449                 /* Delivery queue */
1450                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1451                 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1452                                         &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1453                 if (!hisi_hba->cmd_hdr[i])
1454                         goto err_out;
1455
1456                 /* Completion queue */
1457                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1458                 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1459                                 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1460                 if (!hisi_hba->complete_hdr[i])
1461                         goto err_out;
1462         }
1463
1464         s = HISI_SAS_STATUS_BUF_SZ;
1465         hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1466                                                        dev, s, 16, 0);
1467         if (!hisi_hba->status_buffer_pool)
1468                 goto err_out;
1469
1470         s = HISI_SAS_COMMAND_TABLE_SZ;
1471         hisi_hba->command_table_pool = dma_pool_create("command_table",
1472                                                        dev, s, 16, 0);
1473         if (!hisi_hba->command_table_pool)
1474                 goto err_out;
1475
1476         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1477         hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1478                                             GFP_KERNEL);
1479         if (!hisi_hba->itct)
1480                 goto err_out;
1481
1482         memset(hisi_hba->itct, 0, s);
1483
1484         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1485                                            sizeof(struct hisi_sas_slot),
1486                                            GFP_KERNEL);
1487         if (!hisi_hba->slot_info)
1488                 goto err_out;
1489
1490         s = max_command_entries * sizeof(struct hisi_sas_iost);
1491         hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1492                                             GFP_KERNEL);
1493         if (!hisi_hba->iost)
1494                 goto err_out;
1495
1496         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1497         hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1498                                 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1499         if (!hisi_hba->breakpoint)
1500                 goto err_out;
1501
1502         hisi_hba->slot_index_count = max_command_entries;
1503         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1504         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1505         if (!hisi_hba->slot_index_tags)
1506                 goto err_out;
1507
1508         hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1509                                 sizeof(struct hisi_sas_sge_page), 16, 0);
1510         if (!hisi_hba->sge_page_pool)
1511                 goto err_out;
1512
1513         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1514         hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1515                                 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1516         if (!hisi_hba->initial_fis)
1517                 goto err_out;
1518
1519         s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1520         hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1521                                 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1522         if (!hisi_hba->sata_breakpoint)
1523                 goto err_out;
1524         hisi_sas_init_mem(hisi_hba);
1525
1526         hisi_sas_slot_index_init(hisi_hba);
1527
1528         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1529         if (!hisi_hba->wq) {
1530                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1531                 goto err_out;
1532         }
1533
1534         return 0;
1535 err_out:
1536         return -ENOMEM;
1537 }
1538
1539 static void hisi_sas_free(struct hisi_hba *hisi_hba)
1540 {
1541         struct device *dev = &hisi_hba->pdev->dev;
1542         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1543
1544         for (i = 0; i < hisi_hba->queue_count; i++) {
1545                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1546                 if (hisi_hba->cmd_hdr[i])
1547                         dma_free_coherent(dev, s,
1548                                           hisi_hba->cmd_hdr[i],
1549                                           hisi_hba->cmd_hdr_dma[i]);
1550
1551                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1552                 if (hisi_hba->complete_hdr[i])
1553                         dma_free_coherent(dev, s,
1554                                           hisi_hba->complete_hdr[i],
1555                                           hisi_hba->complete_hdr_dma[i]);
1556         }
1557
1558         dma_pool_destroy(hisi_hba->status_buffer_pool);
1559         dma_pool_destroy(hisi_hba->command_table_pool);
1560         dma_pool_destroy(hisi_hba->sge_page_pool);
1561
1562         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1563         if (hisi_hba->itct)
1564                 dma_free_coherent(dev, s,
1565                                   hisi_hba->itct, hisi_hba->itct_dma);
1566
1567         s = max_command_entries * sizeof(struct hisi_sas_iost);
1568         if (hisi_hba->iost)
1569                 dma_free_coherent(dev, s,
1570                                   hisi_hba->iost, hisi_hba->iost_dma);
1571
1572         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1573         if (hisi_hba->breakpoint)
1574                 dma_free_coherent(dev, s,
1575                                   hisi_hba->breakpoint,
1576                                   hisi_hba->breakpoint_dma);
1577
1578
1579         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1580         if (hisi_hba->initial_fis)
1581                 dma_free_coherent(dev, s,
1582                                   hisi_hba->initial_fis,
1583                                   hisi_hba->initial_fis_dma);
1584
1585         s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1586         if (hisi_hba->sata_breakpoint)
1587                 dma_free_coherent(dev, s,
1588                                   hisi_hba->sata_breakpoint,
1589                                   hisi_hba->sata_breakpoint_dma);
1590
1591         if (hisi_hba->wq)
1592                 destroy_workqueue(hisi_hba->wq);
1593 }
1594
1595 static void hisi_sas_rst_work_handler(struct work_struct *work)
1596 {
1597         struct hisi_hba *hisi_hba =
1598                 container_of(work, struct hisi_hba, rst_work);
1599
1600         hisi_sas_controller_reset(hisi_hba);
1601 }
1602
1603 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1604                                               const struct hisi_sas_hw *hw)
1605 {
1606         struct resource *res;
1607         struct Scsi_Host *shost;
1608         struct hisi_hba *hisi_hba;
1609         struct device *dev = &pdev->dev;
1610         struct device_node *np = pdev->dev.of_node;
1611         struct clk *refclk;
1612
1613         shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1614         if (!shost) {
1615                 dev_err(dev, "scsi host alloc failed\n");
1616                 return NULL;
1617         }
1618         hisi_hba = shost_priv(shost);
1619
1620         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1621         hisi_hba->hw = hw;
1622         hisi_hba->pdev = pdev;
1623         hisi_hba->shost = shost;
1624         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1625
1626         init_timer(&hisi_hba->timer);
1627
1628         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1629                                           SAS_ADDR_SIZE))
1630                 goto err_out;
1631
1632         if (np) {
1633                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1634                                         "hisilicon,sas-syscon");
1635                 if (IS_ERR(hisi_hba->ctrl))
1636                         goto err_out;
1637
1638                 if (device_property_read_u32(dev, "ctrl-reset-reg",
1639                                              &hisi_hba->ctrl_reset_reg))
1640                         goto err_out;
1641
1642                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1643                                              &hisi_hba->ctrl_reset_sts_reg))
1644                         goto err_out;
1645
1646                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1647                                              &hisi_hba->ctrl_clock_ena_reg))
1648                         goto err_out;
1649         }
1650
1651         refclk = devm_clk_get(&pdev->dev, NULL);
1652         if (IS_ERR(refclk))
1653                 dev_dbg(dev, "no ref clk property\n");
1654         else
1655                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1656
1657         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
1658                 goto err_out;
1659
1660         if (device_property_read_u32(dev, "queue-count",
1661                                      &hisi_hba->queue_count))
1662                 goto err_out;
1663
1664         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1665             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1666                 dev_err(dev, "No usable DMA addressing method\n");
1667                 goto err_out;
1668         }
1669
1670         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1671         hisi_hba->regs = devm_ioremap_resource(dev, res);
1672         if (IS_ERR(hisi_hba->regs))
1673                 goto err_out;
1674
1675         if (hisi_sas_alloc(hisi_hba, shost)) {
1676                 hisi_sas_free(hisi_hba);
1677                 goto err_out;
1678         }
1679
1680         return shost;
1681 err_out:
1682         kfree(shost);
1683         dev_err(dev, "shost alloc failed\n");
1684         return NULL;
1685 }
1686
1687 static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1688 {
1689         int i;
1690
1691         for (i = 0; i < hisi_hba->n_phy; i++)
1692                 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1693                        hisi_hba->sas_addr,
1694                        SAS_ADDR_SIZE);
1695 }
1696
1697 int hisi_sas_probe(struct platform_device *pdev,
1698                          const struct hisi_sas_hw *hw)
1699 {
1700         struct Scsi_Host *shost;
1701         struct hisi_hba *hisi_hba;
1702         struct device *dev = &pdev->dev;
1703         struct asd_sas_phy **arr_phy;
1704         struct asd_sas_port **arr_port;
1705         struct sas_ha_struct *sha;
1706         int rc, phy_nr, port_nr, i;
1707
1708         shost = hisi_sas_shost_alloc(pdev, hw);
1709         if (!shost)
1710                 return -ENOMEM;
1711
1712         sha = SHOST_TO_SAS_HA(shost);
1713         hisi_hba = shost_priv(shost);
1714         platform_set_drvdata(pdev, sha);
1715
1716         phy_nr = port_nr = hisi_hba->n_phy;
1717
1718         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1719         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1720         if (!arr_phy || !arr_port) {
1721                 rc = -ENOMEM;
1722                 goto err_out_ha;
1723         }
1724
1725         sha->sas_phy = arr_phy;
1726         sha->sas_port = arr_port;
1727         sha->lldd_ha = hisi_hba;
1728
1729         shost->transportt = hisi_sas_stt;
1730         shost->max_id = HISI_SAS_MAX_DEVICES;
1731         shost->max_lun = ~0;
1732         shost->max_channel = 1;
1733         shost->max_cmd_len = 16;
1734         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1735         shost->can_queue = hisi_hba->hw->max_command_entries;
1736         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1737
1738         sha->sas_ha_name = DRV_NAME;
1739         sha->dev = &hisi_hba->pdev->dev;
1740         sha->lldd_module = THIS_MODULE;
1741         sha->sas_addr = &hisi_hba->sas_addr[0];
1742         sha->num_phys = hisi_hba->n_phy;
1743         sha->core.shost = hisi_hba->shost;
1744
1745         for (i = 0; i < hisi_hba->n_phy; i++) {
1746                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1747                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1748         }
1749
1750         hisi_sas_init_add(hisi_hba);
1751
1752         rc = scsi_add_host(shost, &pdev->dev);
1753         if (rc)
1754                 goto err_out_ha;
1755
1756         rc = sas_register_ha(sha);
1757         if (rc)
1758                 goto err_out_register_ha;
1759
1760         rc = hisi_hba->hw->hw_init(hisi_hba);
1761         if (rc)
1762                 goto err_out_register_ha;
1763
1764         scsi_scan_host(shost);
1765
1766         return 0;
1767
1768 err_out_register_ha:
1769         scsi_remove_host(shost);
1770 err_out_ha:
1771         hisi_sas_free(hisi_hba);
1772         kfree(shost);
1773         return rc;
1774 }
1775 EXPORT_SYMBOL_GPL(hisi_sas_probe);
1776
1777 int hisi_sas_remove(struct platform_device *pdev)
1778 {
1779         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1780         struct hisi_hba *hisi_hba = sha->lldd_ha;
1781         struct Scsi_Host *shost = sha->core.shost;
1782
1783         scsi_remove_host(sha->core.shost);
1784         sas_unregister_ha(sha);
1785         sas_remove_host(sha->core.shost);
1786
1787         hisi_sas_free(hisi_hba);
1788         kfree(shost);
1789         return 0;
1790 }
1791 EXPORT_SYMBOL_GPL(hisi_sas_remove);
1792
1793 static __init int hisi_sas_init(void)
1794 {
1795         pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1796
1797         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1798         if (!hisi_sas_stt)
1799                 return -ENOMEM;
1800
1801         return 0;
1802 }
1803
1804 static __exit void hisi_sas_exit(void)
1805 {
1806         sas_release_transport(hisi_sas_stt);
1807 }
1808
1809 module_init(hisi_sas_init);
1810 module_exit(hisi_sas_exit);
1811
1812 MODULE_VERSION(DRV_VERSION);
1813 MODULE_LICENSE("GPL");
1814 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1815 MODULE_DESCRIPTION("HISILICON SAS controller driver");
1816 MODULE_ALIAS("platform:" DRV_NAME);