]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
scsi: hisi_sas: remove some unneeded structure members
[linux.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_GONE(dev) \
16         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19                                 u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22                              struct domain_device *device,
23                              int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26                                 void *funcdata);
27
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
29 {
30         switch (fis->command) {
31         case ATA_CMD_FPDMA_WRITE:
32         case ATA_CMD_FPDMA_READ:
33         case ATA_CMD_FPDMA_RECV:
34         case ATA_CMD_FPDMA_SEND:
35         case ATA_CMD_NCQ_NON_DATA:
36                 return HISI_SAS_SATA_PROTOCOL_FPDMA;
37
38         case ATA_CMD_DOWNLOAD_MICRO:
39         case ATA_CMD_ID_ATA:
40         case ATA_CMD_PMP_READ:
41         case ATA_CMD_READ_LOG_EXT:
42         case ATA_CMD_PIO_READ:
43         case ATA_CMD_PIO_READ_EXT:
44         case ATA_CMD_PMP_WRITE:
45         case ATA_CMD_WRITE_LOG_EXT:
46         case ATA_CMD_PIO_WRITE:
47         case ATA_CMD_PIO_WRITE_EXT:
48                 return HISI_SAS_SATA_PROTOCOL_PIO;
49
50         case ATA_CMD_DSM:
51         case ATA_CMD_DOWNLOAD_MICRO_DMA:
52         case ATA_CMD_PMP_READ_DMA:
53         case ATA_CMD_PMP_WRITE_DMA:
54         case ATA_CMD_READ:
55         case ATA_CMD_READ_EXT:
56         case ATA_CMD_READ_LOG_DMA_EXT:
57         case ATA_CMD_READ_STREAM_DMA_EXT:
58         case ATA_CMD_TRUSTED_RCV_DMA:
59         case ATA_CMD_TRUSTED_SND_DMA:
60         case ATA_CMD_WRITE:
61         case ATA_CMD_WRITE_EXT:
62         case ATA_CMD_WRITE_FUA_EXT:
63         case ATA_CMD_WRITE_QUEUED:
64         case ATA_CMD_WRITE_LOG_DMA_EXT:
65         case ATA_CMD_WRITE_STREAM_DMA_EXT:
66         case ATA_CMD_ZAC_MGMT_IN:
67                 return HISI_SAS_SATA_PROTOCOL_DMA;
68
69         case ATA_CMD_CHK_POWER:
70         case ATA_CMD_DEV_RESET:
71         case ATA_CMD_EDD:
72         case ATA_CMD_FLUSH:
73         case ATA_CMD_FLUSH_EXT:
74         case ATA_CMD_VERIFY:
75         case ATA_CMD_VERIFY_EXT:
76         case ATA_CMD_SET_FEATURES:
77         case ATA_CMD_STANDBY:
78         case ATA_CMD_STANDBYNOW1:
79         case ATA_CMD_ZAC_MGMT_OUT:
80                 return HISI_SAS_SATA_PROTOCOL_NONDATA;
81
82         case ATA_CMD_SET_MAX:
83                 switch (fis->features) {
84                 case ATA_SET_MAX_PASSWD:
85                 case ATA_SET_MAX_LOCK:
86                         return HISI_SAS_SATA_PROTOCOL_PIO;
87
88                 case ATA_SET_MAX_PASSWD_DMA:
89                 case ATA_SET_MAX_UNLOCK_DMA:
90                         return HISI_SAS_SATA_PROTOCOL_DMA;
91
92                 default:
93                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
94                 }
95
96         default:
97         {
98                 if (direction == DMA_NONE)
99                         return HISI_SAS_SATA_PROTOCOL_NONDATA;
100                 return HISI_SAS_SATA_PROTOCOL_PIO;
101         }
102         }
103 }
104 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
105
106 void hisi_sas_sata_done(struct sas_task *task,
107                             struct hisi_sas_slot *slot)
108 {
109         struct task_status_struct *ts = &task->task_status;
110         struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
111         struct hisi_sas_status_buffer *status_buf =
112                         hisi_sas_status_buf_addr_mem(slot);
113         u8 *iu = &status_buf->iu[0];
114         struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
115
116         resp->frame_len = sizeof(struct dev_to_host_fis);
117         memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
118
119         ts->buf_valid_size = sizeof(*resp);
120 }
121 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
122
123 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
124 {
125         struct ata_queued_cmd *qc = task->uldd_task;
126
127         if (qc) {
128                 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
129                         qc->tf.command == ATA_CMD_FPDMA_READ) {
130                         *tag = qc->tag;
131                         return 1;
132                 }
133         }
134         return 0;
135 }
136 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
137
138 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
139 {
140         return device->port->ha->lldd_ha;
141 }
142
143 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
144 {
145         return container_of(sas_port, struct hisi_sas_port, sas_port);
146 }
147 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
148
149 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
150 {
151         int phy_no;
152
153         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
154                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
155 }
156 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
157
158 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
159 {
160         void *bitmap = hisi_hba->slot_index_tags;
161
162         clear_bit(slot_idx, bitmap);
163 }
164
165 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
166 {
167         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
168 }
169
170 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
171 {
172         void *bitmap = hisi_hba->slot_index_tags;
173
174         set_bit(slot_idx, bitmap);
175 }
176
177 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
178 {
179         unsigned int index;
180         void *bitmap = hisi_hba->slot_index_tags;
181
182         index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
183         if (index >= hisi_hba->slot_index_count)
184                 return -SAS_QUEUE_FULL;
185         hisi_sas_slot_index_set(hisi_hba, index);
186         *slot_idx = index;
187         return 0;
188 }
189
190 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
191 {
192         int i;
193
194         for (i = 0; i < hisi_hba->slot_index_count; ++i)
195                 hisi_sas_slot_index_clear(hisi_hba, i);
196 }
197
198 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
199                              struct hisi_sas_slot *slot)
200 {
201
202         if (task) {
203                 struct device *dev = hisi_hba->dev;
204
205                 if (!task->lldd_task)
206                         return;
207
208                 task->lldd_task = NULL;
209
210                 if (!sas_protocol_ata(task->task_proto))
211                         if (slot->n_elem)
212                                 dma_unmap_sg(dev, task->scatter,
213                                              task->num_scatter,
214                                              task->data_dir);
215         }
216
217         if (slot->buf)
218                 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
219
220         list_del_init(&slot->entry);
221         slot->buf = NULL;
222         slot->task = NULL;
223         slot->port = NULL;
224         hisi_sas_slot_index_free(hisi_hba, slot->idx);
225
226         /* slot memory is fully zeroed when it is reused */
227 }
228 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
229
230 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
231                                   struct hisi_sas_slot *slot)
232 {
233         return hisi_hba->hw->prep_smp(hisi_hba, slot);
234 }
235
236 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
237                                   struct hisi_sas_slot *slot, int is_tmf,
238                                   struct hisi_sas_tmf_task *tmf)
239 {
240         return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
241 }
242
243 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
244                                   struct hisi_sas_slot *slot)
245 {
246         return hisi_hba->hw->prep_stp(hisi_hba, slot);
247 }
248
249 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
250                 struct hisi_sas_slot *slot,
251                 int device_id, int abort_flag, int tag_to_abort)
252 {
253         return hisi_hba->hw->prep_abort(hisi_hba, slot,
254                         device_id, abort_flag, tag_to_abort);
255 }
256
257 /*
258  * This function will issue an abort TMF regardless of whether the
259  * task is in the sdev or not. Then it will do the task complete
260  * cleanup and callbacks.
261  */
262 static void hisi_sas_slot_abort(struct work_struct *work)
263 {
264         struct hisi_sas_slot *abort_slot =
265                 container_of(work, struct hisi_sas_slot, abort_slot);
266         struct sas_task *task = abort_slot->task;
267         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
268         struct scsi_cmnd *cmnd = task->uldd_task;
269         struct hisi_sas_tmf_task tmf_task;
270         struct scsi_lun lun;
271         struct device *dev = hisi_hba->dev;
272         int tag = abort_slot->idx;
273         unsigned long flags;
274
275         if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
276                 dev_err(dev, "cannot abort slot for non-ssp task\n");
277                 goto out;
278         }
279
280         int_to_scsilun(cmnd->device->lun, &lun);
281         tmf_task.tmf = TMF_ABORT_TASK;
282         tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
283
284         hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
285 out:
286         /* Do cleanup for this task */
287         spin_lock_irqsave(&hisi_hba->lock, flags);
288         hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
289         spin_unlock_irqrestore(&hisi_hba->lock, flags);
290         if (task->task_done)
291                 task->task_done(task);
292 }
293
294 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
295                 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
296                 int *pass)
297 {
298         struct hisi_hba *hisi_hba = dq->hisi_hba;
299         struct domain_device *device = task->dev;
300         struct hisi_sas_device *sas_dev = device->lldd_dev;
301         struct hisi_sas_port *port;
302         struct hisi_sas_slot *slot;
303         struct hisi_sas_cmd_hdr *cmd_hdr_base;
304         struct asd_sas_port *sas_port = device->port;
305         struct device *dev = hisi_hba->dev;
306         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
307         unsigned long flags;
308
309         if (!sas_port) {
310                 struct task_status_struct *ts = &task->task_status;
311
312                 ts->resp = SAS_TASK_UNDELIVERED;
313                 ts->stat = SAS_PHY_DOWN;
314                 /*
315                  * libsas will use dev->port, should
316                  * not call task_done for sata
317                  */
318                 if (device->dev_type != SAS_SATA_DEV)
319                         task->task_done(task);
320                 return -ECOMM;
321         }
322
323         if (DEV_IS_GONE(sas_dev)) {
324                 if (sas_dev)
325                         dev_info(dev, "task prep: device %d not ready\n",
326                                  sas_dev->device_id);
327                 else
328                         dev_info(dev, "task prep: device %016llx not ready\n",
329                                  SAS_ADDR(device->sas_addr));
330
331                 return -ECOMM;
332         }
333
334         port = to_hisi_sas_port(sas_port);
335         if (port && !port->port_attached) {
336                 dev_info(dev, "task prep: %s port%d not attach device\n",
337                          (dev_is_sata(device)) ?
338                          "SATA/STP" : "SAS",
339                          device->port->id);
340
341                 return -ECOMM;
342         }
343
344         if (!sas_protocol_ata(task->task_proto)) {
345                 if (task->num_scatter) {
346                         n_elem = dma_map_sg(dev, task->scatter,
347                                             task->num_scatter, task->data_dir);
348                         if (!n_elem) {
349                                 rc = -ENOMEM;
350                                 goto prep_out;
351                         }
352                 }
353         } else
354                 n_elem = task->num_scatter;
355
356         spin_lock_irqsave(&hisi_hba->lock, flags);
357         if (hisi_hba->hw->slot_index_alloc)
358                 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
359                                                     device);
360         else
361                 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
362         if (rc) {
363                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
364                 goto err_out;
365         }
366         spin_unlock_irqrestore(&hisi_hba->lock, flags);
367
368         rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
369         if (rc)
370                 goto err_out_tag;
371
372         dlvry_queue = dq->id;
373         dlvry_queue_slot = dq->wr_point;
374         slot = &hisi_hba->slot_info[slot_idx];
375         memset(slot, 0, sizeof(struct hisi_sas_slot));
376
377         slot->idx = slot_idx;
378         slot->n_elem = n_elem;
379         slot->dlvry_queue = dlvry_queue;
380         slot->dlvry_queue_slot = dlvry_queue_slot;
381         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
382         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
383         slot->task = task;
384         slot->port = port;
385         task->lldd_task = slot;
386         INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
387
388         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
389                                    GFP_ATOMIC, &slot->buf_dma);
390         if (!slot->buf) {
391                 rc = -ENOMEM;
392                 goto err_out_slot_buf;
393         }
394         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
395         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
396         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
397
398         switch (task->task_proto) {
399         case SAS_PROTOCOL_SMP:
400                 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
401                 break;
402         case SAS_PROTOCOL_SSP:
403                 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
404                 break;
405         case SAS_PROTOCOL_SATA:
406         case SAS_PROTOCOL_STP:
407         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
408                 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
409                 break;
410         default:
411                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
412                         task->task_proto);
413                 rc = -EINVAL;
414                 break;
415         }
416
417         if (rc) {
418                 dev_err(dev, "task prep: rc = 0x%x\n", rc);
419                 goto err_out_buf;
420         }
421
422         spin_lock_irqsave(&hisi_hba->lock, flags);
423         list_add_tail(&slot->entry, &sas_dev->list);
424         spin_unlock_irqrestore(&hisi_hba->lock, flags);
425         spin_lock_irqsave(&task->task_state_lock, flags);
426         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
427         spin_unlock_irqrestore(&task->task_state_lock, flags);
428
429         dq->slot_prep = slot;
430         ++(*pass);
431
432         return 0;
433
434 err_out_buf:
435         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
436                 slot->buf_dma);
437 err_out_slot_buf:
438         /* Nothing to be done */
439 err_out_tag:
440         spin_lock_irqsave(&hisi_hba->lock, flags);
441         hisi_sas_slot_index_free(hisi_hba, slot_idx);
442         spin_unlock_irqrestore(&hisi_hba->lock, flags);
443 err_out:
444         dev_err(dev, "task prep: failed[%d]!\n", rc);
445         if (!sas_protocol_ata(task->task_proto))
446                 if (n_elem)
447                         dma_unmap_sg(dev, task->scatter,
448                                      task->num_scatter,
449                                      task->data_dir);
450 prep_out:
451         return rc;
452 }
453
454 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
455                               int is_tmf, struct hisi_sas_tmf_task *tmf)
456 {
457         u32 rc;
458         u32 pass = 0;
459         unsigned long flags;
460         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
461         struct device *dev = hisi_hba->dev;
462         struct domain_device *device = task->dev;
463         struct hisi_sas_device *sas_dev = device->lldd_dev;
464         struct hisi_sas_dq *dq = sas_dev->dq;
465
466         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
467                 return -EINVAL;
468
469         /* protect task_prep and start_delivery sequence */
470         spin_lock_irqsave(&dq->lock, flags);
471         rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
472         if (rc)
473                 dev_err(dev, "task exec: failed[%d]!\n", rc);
474
475         if (likely(pass))
476                 hisi_hba->hw->start_delivery(dq);
477         spin_unlock_irqrestore(&dq->lock, flags);
478
479         return rc;
480 }
481
482 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
483 {
484         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
485         struct asd_sas_phy *sas_phy = &phy->sas_phy;
486         struct sas_ha_struct *sas_ha;
487
488         if (!phy->phy_attached)
489                 return;
490
491         sas_ha = &hisi_hba->sha;
492         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
493
494         if (sas_phy->phy) {
495                 struct sas_phy *sphy = sas_phy->phy;
496
497                 sphy->negotiated_linkrate = sas_phy->linkrate;
498                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
499                 sphy->maximum_linkrate_hw =
500                         hisi_hba->hw->phy_get_max_linkrate();
501                 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
502                         sphy->minimum_linkrate = phy->minimum_linkrate;
503
504                 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
505                         sphy->maximum_linkrate = phy->maximum_linkrate;
506         }
507
508         if (phy->phy_type & PORT_TYPE_SAS) {
509                 struct sas_identify_frame *id;
510
511                 id = (struct sas_identify_frame *)phy->frame_rcvd;
512                 id->dev_type = phy->identify.device_type;
513                 id->initiator_bits = SAS_PROTOCOL_ALL;
514                 id->target_bits = phy->identify.target_port_protocols;
515         } else if (phy->phy_type & PORT_TYPE_SATA) {
516                 /*Nothing*/
517         }
518
519         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
520         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
521 }
522
523 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
524 {
525         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
526         struct hisi_sas_device *sas_dev = NULL;
527         unsigned long flags;
528         int i;
529
530         spin_lock_irqsave(&hisi_hba->lock, flags);
531         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
532                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
533                         int queue = i % hisi_hba->queue_count;
534                         struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
535
536                         hisi_hba->devices[i].device_id = i;
537                         sas_dev = &hisi_hba->devices[i];
538                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
539                         sas_dev->dev_type = device->dev_type;
540                         sas_dev->hisi_hba = hisi_hba;
541                         sas_dev->sas_device = device;
542                         sas_dev->dq = dq;
543                         INIT_LIST_HEAD(&hisi_hba->devices[i].list);
544                         break;
545                 }
546         }
547         spin_unlock_irqrestore(&hisi_hba->lock, flags);
548
549         return sas_dev;
550 }
551
552 static int hisi_sas_dev_found(struct domain_device *device)
553 {
554         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
555         struct domain_device *parent_dev = device->parent;
556         struct hisi_sas_device *sas_dev;
557         struct device *dev = hisi_hba->dev;
558
559         if (hisi_hba->hw->alloc_dev)
560                 sas_dev = hisi_hba->hw->alloc_dev(device);
561         else
562                 sas_dev = hisi_sas_alloc_dev(device);
563         if (!sas_dev) {
564                 dev_err(dev, "fail alloc dev: max support %d devices\n",
565                         HISI_SAS_MAX_DEVICES);
566                 return -EINVAL;
567         }
568
569         device->lldd_dev = sas_dev;
570         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
571
572         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
573                 int phy_no;
574                 u8 phy_num = parent_dev->ex_dev.num_phys;
575                 struct ex_phy *phy;
576
577                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
578                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
579                         if (SAS_ADDR(phy->attached_sas_addr) ==
580                                 SAS_ADDR(device->sas_addr))
581                                 break;
582                 }
583
584                 if (phy_no == phy_num) {
585                         dev_info(dev, "dev found: no attached "
586                                  "dev:%016llx at ex:%016llx\n",
587                                  SAS_ADDR(device->sas_addr),
588                                  SAS_ADDR(parent_dev->sas_addr));
589                         return -EINVAL;
590                 }
591         }
592
593         dev_info(dev, "dev[%d:%x] found\n",
594                 sas_dev->device_id, sas_dev->dev_type);
595
596         return 0;
597 }
598
599 static int hisi_sas_slave_configure(struct scsi_device *sdev)
600 {
601         struct domain_device *dev = sdev_to_domain_dev(sdev);
602         int ret = sas_slave_configure(sdev);
603
604         if (ret)
605                 return ret;
606         if (!dev_is_sata(dev))
607                 sas_change_queue_depth(sdev, 64);
608
609         return 0;
610 }
611
612 static void hisi_sas_scan_start(struct Scsi_Host *shost)
613 {
614         struct hisi_hba *hisi_hba = shost_priv(shost);
615
616         hisi_hba->hw->phys_init(hisi_hba);
617 }
618
619 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
620 {
621         struct hisi_hba *hisi_hba = shost_priv(shost);
622         struct sas_ha_struct *sha = &hisi_hba->sha;
623
624         /* Wait for PHY up interrupt to occur */
625         if (time < HZ)
626                 return 0;
627
628         sas_drain_work(sha);
629         return 1;
630 }
631
632 static void hisi_sas_phyup_work(struct work_struct *work)
633 {
634         struct hisi_sas_phy *phy =
635                 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
636         struct hisi_hba *hisi_hba = phy->hisi_hba;
637         struct asd_sas_phy *sas_phy = &phy->sas_phy;
638         int phy_no = sas_phy->id;
639
640         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
641         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
642 }
643
644 static void hisi_sas_linkreset_work(struct work_struct *work)
645 {
646         struct hisi_sas_phy *phy =
647                 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
648         struct asd_sas_phy *sas_phy = &phy->sas_phy;
649
650         hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
651 }
652
653 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
654         [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
655         [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
656 };
657
658 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
659                                 enum hisi_sas_phy_event event)
660 {
661         struct hisi_hba *hisi_hba = phy->hisi_hba;
662
663         if (WARN_ON(event >= HISI_PHYES_NUM))
664                 return false;
665
666         return queue_work(hisi_hba->wq, &phy->works[event]);
667 }
668 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
669
670 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
671 {
672         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
673         struct asd_sas_phy *sas_phy = &phy->sas_phy;
674         int i;
675
676         phy->hisi_hba = hisi_hba;
677         phy->port = NULL;
678         phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
679         phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
680         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
681         sas_phy->class = SAS;
682         sas_phy->iproto = SAS_PROTOCOL_ALL;
683         sas_phy->tproto = 0;
684         sas_phy->type = PHY_TYPE_PHYSICAL;
685         sas_phy->role = PHY_ROLE_INITIATOR;
686         sas_phy->oob_mode = OOB_NOT_CONNECTED;
687         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
688         sas_phy->id = phy_no;
689         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
690         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
691         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
692         sas_phy->lldd_phy = phy;
693
694         for (i = 0; i < HISI_PHYES_NUM; i++)
695                 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
696 }
697
698 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
699 {
700         struct sas_ha_struct *sas_ha = sas_phy->ha;
701         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
702         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
703         struct asd_sas_port *sas_port = sas_phy->port;
704         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
705         unsigned long flags;
706
707         if (!sas_port)
708                 return;
709
710         spin_lock_irqsave(&hisi_hba->lock, flags);
711         port->port_attached = 1;
712         port->id = phy->port_id;
713         phy->port = port;
714         sas_port->lldd_port = port;
715         spin_unlock_irqrestore(&hisi_hba->lock, flags);
716 }
717
718 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
719                                      struct hisi_sas_slot *slot)
720 {
721         if (task) {
722                 unsigned long flags;
723                 struct task_status_struct *ts;
724
725                 ts = &task->task_status;
726
727                 ts->resp = SAS_TASK_COMPLETE;
728                 ts->stat = SAS_ABORTED_TASK;
729                 spin_lock_irqsave(&task->task_state_lock, flags);
730                 task->task_state_flags &=
731                         ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
732                 task->task_state_flags |= SAS_TASK_STATE_DONE;
733                 spin_unlock_irqrestore(&task->task_state_lock, flags);
734         }
735
736         hisi_sas_slot_task_free(hisi_hba, task, slot);
737 }
738
739 /* hisi_hba.lock should be locked */
740 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
741                         struct domain_device *device)
742 {
743         struct hisi_sas_slot *slot, *slot2;
744         struct hisi_sas_device *sas_dev = device->lldd_dev;
745
746         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
747                 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
748 }
749
750 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
751 {
752         struct hisi_sas_device *sas_dev;
753         struct domain_device *device;
754         int i;
755
756         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
757                 sas_dev = &hisi_hba->devices[i];
758                 device = sas_dev->sas_device;
759
760                 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
761                     !device)
762                         continue;
763
764                 hisi_sas_release_task(hisi_hba, device);
765         }
766 }
767 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
768
769 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
770                                 struct domain_device *device)
771 {
772         if (hisi_hba->hw->dereg_device)
773                 hisi_hba->hw->dereg_device(hisi_hba, device);
774 }
775
776 static void hisi_sas_dev_gone(struct domain_device *device)
777 {
778         struct hisi_sas_device *sas_dev = device->lldd_dev;
779         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
780         struct device *dev = hisi_hba->dev;
781
782         dev_info(dev, "dev[%d:%x] is gone\n",
783                  sas_dev->device_id, sas_dev->dev_type);
784
785         if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
786                 hisi_sas_internal_task_abort(hisi_hba, device,
787                                      HISI_SAS_INT_ABT_DEV, 0);
788
789                 hisi_sas_dereg_device(hisi_hba, device);
790
791                 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
792                 device->lldd_dev = NULL;
793         }
794
795         if (hisi_hba->hw->free_device)
796                 hisi_hba->hw->free_device(sas_dev);
797         sas_dev->dev_type = SAS_PHY_UNUSED;
798 }
799
800 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
801 {
802         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
803 }
804
805 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
806                                 void *funcdata)
807 {
808         struct sas_ha_struct *sas_ha = sas_phy->ha;
809         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
810         int phy_no = sas_phy->id;
811
812         switch (func) {
813         case PHY_FUNC_HARD_RESET:
814                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
815                 break;
816
817         case PHY_FUNC_LINK_RESET:
818                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
819                 msleep(100);
820                 hisi_hba->hw->phy_start(hisi_hba, phy_no);
821                 break;
822
823         case PHY_FUNC_DISABLE:
824                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
825                 break;
826
827         case PHY_FUNC_SET_LINK_RATE:
828                 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
829                 break;
830         case PHY_FUNC_GET_EVENTS:
831                 if (hisi_hba->hw->get_events) {
832                         hisi_hba->hw->get_events(hisi_hba, phy_no);
833                         break;
834                 }
835                 /* fallthru */
836         case PHY_FUNC_RELEASE_SPINUP_HOLD:
837         default:
838                 return -EOPNOTSUPP;
839         }
840         return 0;
841 }
842
843 static void hisi_sas_task_done(struct sas_task *task)
844 {
845         if (!del_timer(&task->slow_task->timer))
846                 return;
847         complete(&task->slow_task->completion);
848 }
849
850 static void hisi_sas_tmf_timedout(struct timer_list *t)
851 {
852         struct sas_task_slow *slow = from_timer(slow, t, timer);
853         struct sas_task *task = slow->task;
854         unsigned long flags;
855
856         spin_lock_irqsave(&task->task_state_lock, flags);
857         if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
858                 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
859         spin_unlock_irqrestore(&task->task_state_lock, flags);
860
861         complete(&task->slow_task->completion);
862 }
863
864 #define TASK_TIMEOUT 20
865 #define TASK_RETRY 3
866 #define INTERNAL_ABORT_TIMEOUT 6
867 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
868                                            void *parameter, u32 para_len,
869                                            struct hisi_sas_tmf_task *tmf)
870 {
871         struct hisi_sas_device *sas_dev = device->lldd_dev;
872         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
873         struct device *dev = hisi_hba->dev;
874         struct sas_task *task;
875         int res, retry;
876
877         for (retry = 0; retry < TASK_RETRY; retry++) {
878                 task = sas_alloc_slow_task(GFP_KERNEL);
879                 if (!task)
880                         return -ENOMEM;
881
882                 task->dev = device;
883                 task->task_proto = device->tproto;
884
885                 if (dev_is_sata(device)) {
886                         task->ata_task.device_control_reg_update = 1;
887                         memcpy(&task->ata_task.fis, parameter, para_len);
888                 } else {
889                         memcpy(&task->ssp_task, parameter, para_len);
890                 }
891                 task->task_done = hisi_sas_task_done;
892
893                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
894                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
895                 add_timer(&task->slow_task->timer);
896
897                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
898
899                 if (res) {
900                         del_timer(&task->slow_task->timer);
901                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
902                                 res);
903                         goto ex_err;
904                 }
905
906                 wait_for_completion(&task->slow_task->completion);
907                 res = TMF_RESP_FUNC_FAILED;
908                 /* Even TMF timed out, return direct. */
909                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
910                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
911                                 struct hisi_sas_slot *slot = task->lldd_task;
912
913                                 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
914                                 if (slot)
915                                         slot->task = NULL;
916
917                                 goto ex_err;
918                         } else
919                                 dev_err(dev, "abort tmf: TMF task timeout\n");
920                 }
921
922                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
923                      task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
924                         res = TMF_RESP_FUNC_COMPLETE;
925                         break;
926                 }
927
928                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
929                         task->task_status.stat == TMF_RESP_FUNC_SUCC) {
930                         res = TMF_RESP_FUNC_SUCC;
931                         break;
932                 }
933
934                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
935                       task->task_status.stat == SAS_DATA_UNDERRUN) {
936                         /* no error, but return the number of bytes of
937                          * underrun
938                          */
939                         dev_warn(dev, "abort tmf: task to dev %016llx "
940                                  "resp: 0x%x sts 0x%x underrun\n",
941                                  SAS_ADDR(device->sas_addr),
942                                  task->task_status.resp,
943                                  task->task_status.stat);
944                         res = task->task_status.residual;
945                         break;
946                 }
947
948                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
949                         task->task_status.stat == SAS_DATA_OVERRUN) {
950                         dev_warn(dev, "abort tmf: blocked task error\n");
951                         res = -EMSGSIZE;
952                         break;
953                 }
954
955                 dev_warn(dev, "abort tmf: task to dev "
956                          "%016llx resp: 0x%x status 0x%x\n",
957                          SAS_ADDR(device->sas_addr), task->task_status.resp,
958                          task->task_status.stat);
959                 sas_free_task(task);
960                 task = NULL;
961         }
962 ex_err:
963         if (retry == TASK_RETRY)
964                 dev_warn(dev, "abort tmf: executing internal task failed!\n");
965         sas_free_task(task);
966         return res;
967 }
968
969 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
970                 bool reset, int pmp, u8 *fis)
971 {
972         struct ata_taskfile tf;
973
974         ata_tf_init(dev, &tf);
975         if (reset)
976                 tf.ctl |= ATA_SRST;
977         else
978                 tf.ctl &= ~ATA_SRST;
979         tf.command = ATA_CMD_DEV_RESET;
980         ata_tf_to_fis(&tf, pmp, 0, fis);
981 }
982
983 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
984 {
985         u8 fis[20] = {0};
986         struct ata_port *ap = device->sata_dev.ap;
987         struct ata_link *link;
988         int rc = TMF_RESP_FUNC_FAILED;
989         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
990         struct device *dev = hisi_hba->dev;
991         int s = sizeof(struct host_to_dev_fis);
992         unsigned long flags;
993
994         ata_for_each_link(link, ap, EDGE) {
995                 int pmp = sata_srst_pmp(link);
996
997                 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
998                 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
999                 if (rc != TMF_RESP_FUNC_COMPLETE)
1000                         break;
1001         }
1002
1003         if (rc == TMF_RESP_FUNC_COMPLETE) {
1004                 ata_for_each_link(link, ap, EDGE) {
1005                         int pmp = sata_srst_pmp(link);
1006
1007                         hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1008                         rc = hisi_sas_exec_internal_tmf_task(device, fis,
1009                                                              s, NULL);
1010                         if (rc != TMF_RESP_FUNC_COMPLETE)
1011                                 dev_err(dev, "ata disk de-reset failed\n");
1012                 }
1013         } else {
1014                 dev_err(dev, "ata disk reset failed\n");
1015         }
1016
1017         if (rc == TMF_RESP_FUNC_COMPLETE) {
1018                 spin_lock_irqsave(&hisi_hba->lock, flags);
1019                 hisi_sas_release_task(hisi_hba, device);
1020                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1021         }
1022
1023         return rc;
1024 }
1025
1026 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1027                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
1028 {
1029         struct sas_ssp_task ssp_task;
1030
1031         if (!(device->tproto & SAS_PROTOCOL_SSP))
1032                 return TMF_RESP_FUNC_ESUPP;
1033
1034         memcpy(ssp_task.LUN, lun, 8);
1035
1036         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1037                                 sizeof(ssp_task), tmf);
1038 }
1039
1040 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1041 {
1042         u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1043         int i;
1044
1045         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1046                 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1047                 struct domain_device *device = sas_dev->sas_device;
1048                 struct asd_sas_port *sas_port;
1049                 struct hisi_sas_port *port;
1050                 struct hisi_sas_phy *phy = NULL;
1051                 struct asd_sas_phy *sas_phy;
1052
1053                 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1054                                 || !device || !device->port)
1055                         continue;
1056
1057                 sas_port = device->port;
1058                 port = to_hisi_sas_port(sas_port);
1059
1060                 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1061                         if (state & BIT(sas_phy->id)) {
1062                                 phy = sas_phy->lldd_phy;
1063                                 break;
1064                         }
1065
1066                 if (phy) {
1067                         port->id = phy->port_id;
1068
1069                         /* Update linkrate of directly attached device. */
1070                         if (!device->parent)
1071                                 device->linkrate = phy->sas_phy.linkrate;
1072
1073                         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1074                 } else
1075                         port->id = 0xff;
1076         }
1077 }
1078
1079 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1080                               u32 state)
1081 {
1082         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1083         struct asd_sas_port *_sas_port = NULL;
1084         int phy_no;
1085
1086         for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1087                 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1088                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1089                 struct asd_sas_port *sas_port = sas_phy->port;
1090                 bool do_port_check = !!(_sas_port != sas_port);
1091
1092                 if (!sas_phy->phy->enabled)
1093                         continue;
1094
1095                 /* Report PHY state change to libsas */
1096                 if (state & BIT(phy_no)) {
1097                         if (do_port_check && sas_port && sas_port->port_dev) {
1098                                 struct domain_device *dev = sas_port->port_dev;
1099
1100                                 _sas_port = sas_port;
1101
1102                                 if (DEV_IS_EXPANDER(dev->dev_type))
1103                                         sas_ha->notify_port_event(sas_phy,
1104                                                         PORTE_BROADCAST_RCVD);
1105                         }
1106                 } else if (old_state & (1 << phy_no))
1107                         /* PHY down but was up before */
1108                         hisi_sas_phy_down(hisi_hba, phy_no, 0);
1109
1110         }
1111 }
1112
1113 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1114 {
1115         struct device *dev = hisi_hba->dev;
1116         struct Scsi_Host *shost = hisi_hba->shost;
1117         u32 old_state, state;
1118         unsigned long flags;
1119         int rc;
1120
1121         if (!hisi_hba->hw->soft_reset)
1122                 return -1;
1123
1124         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1125                 return -1;
1126
1127         dev_info(dev, "controller resetting...\n");
1128         old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1129
1130         scsi_block_requests(shost);
1131         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1132         rc = hisi_hba->hw->soft_reset(hisi_hba);
1133         if (rc) {
1134                 dev_warn(dev, "controller reset failed (%d)\n", rc);
1135                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1136                 scsi_unblock_requests(shost);
1137                 goto out;
1138         }
1139         spin_lock_irqsave(&hisi_hba->lock, flags);
1140         hisi_sas_release_tasks(hisi_hba);
1141         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1142
1143         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1144
1145         /* Init and wait for PHYs to come up and all libsas event finished. */
1146         hisi_hba->hw->phys_init(hisi_hba);
1147         msleep(1000);
1148         hisi_sas_refresh_port_id(hisi_hba);
1149         scsi_unblock_requests(shost);
1150
1151         state = hisi_hba->hw->get_phys_state(hisi_hba);
1152         hisi_sas_rescan_topology(hisi_hba, old_state, state);
1153         dev_info(dev, "controller reset complete\n");
1154
1155 out:
1156         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1157
1158         return rc;
1159 }
1160
1161 static int hisi_sas_abort_task(struct sas_task *task)
1162 {
1163         struct scsi_lun lun;
1164         struct hisi_sas_tmf_task tmf_task;
1165         struct domain_device *device = task->dev;
1166         struct hisi_sas_device *sas_dev = device->lldd_dev;
1167         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1168         struct device *dev = hisi_hba->dev;
1169         int rc = TMF_RESP_FUNC_FAILED;
1170         unsigned long flags;
1171
1172         if (!sas_dev) {
1173                 dev_warn(dev, "Device has been removed\n");
1174                 return TMF_RESP_FUNC_FAILED;
1175         }
1176
1177         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1178                 rc = TMF_RESP_FUNC_COMPLETE;
1179                 goto out;
1180         }
1181
1182         sas_dev->dev_status = HISI_SAS_DEV_EH;
1183         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1184                 struct scsi_cmnd *cmnd = task->uldd_task;
1185                 struct hisi_sas_slot *slot = task->lldd_task;
1186                 u32 tag = slot->idx;
1187                 int rc2;
1188
1189                 int_to_scsilun(cmnd->device->lun, &lun);
1190                 tmf_task.tmf = TMF_ABORT_TASK;
1191                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1192
1193                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1194                                                   &tmf_task);
1195
1196                 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1197                                                    HISI_SAS_INT_ABT_CMD, tag);
1198                 if (rc2 < 0) {
1199                         dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1200                         return TMF_RESP_FUNC_FAILED;
1201                 }
1202
1203                 /*
1204                  * If the TMF finds that the IO is not in the device and also
1205                  * the internal abort does not succeed, then it is safe to
1206                  * free the slot.
1207                  * Note: if the internal abort succeeds then the slot
1208                  * will have already been completed
1209                  */
1210                 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1211                         if (task->lldd_task) {
1212                                 spin_lock_irqsave(&hisi_hba->lock, flags);
1213                                 hisi_sas_do_release_task(hisi_hba, task, slot);
1214                                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1215                         }
1216                 }
1217         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1218                 task->task_proto & SAS_PROTOCOL_STP) {
1219                 if (task->dev->dev_type == SAS_SATA_DEV) {
1220                         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1221                                                 HISI_SAS_INT_ABT_DEV, 0);
1222                         if (rc < 0) {
1223                                 dev_err(dev, "abort task: internal abort failed\n");
1224                                 goto out;
1225                         }
1226                         hisi_sas_dereg_device(hisi_hba, device);
1227                         rc = hisi_sas_softreset_ata_disk(device);
1228                 }
1229         } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1230                 /* SMP */
1231                 struct hisi_sas_slot *slot = task->lldd_task;
1232                 u32 tag = slot->idx;
1233
1234                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1235                              HISI_SAS_INT_ABT_CMD, tag);
1236                 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1237                                         task->lldd_task) {
1238                         spin_lock_irqsave(&hisi_hba->lock, flags);
1239                         hisi_sas_do_release_task(hisi_hba, task, slot);
1240                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1241                 }
1242         }
1243
1244 out:
1245         if (rc != TMF_RESP_FUNC_COMPLETE)
1246                 dev_notice(dev, "abort task: rc=%d\n", rc);
1247         return rc;
1248 }
1249
1250 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1251 {
1252         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1253         struct device *dev = hisi_hba->dev;
1254         struct hisi_sas_tmf_task tmf_task;
1255         int rc = TMF_RESP_FUNC_FAILED;
1256         unsigned long flags;
1257
1258         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1259                                         HISI_SAS_INT_ABT_DEV, 0);
1260         if (rc < 0) {
1261                 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1262                 return TMF_RESP_FUNC_FAILED;
1263         }
1264         hisi_sas_dereg_device(hisi_hba, device);
1265
1266         tmf_task.tmf = TMF_ABORT_TASK_SET;
1267         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1268
1269         if (rc == TMF_RESP_FUNC_COMPLETE) {
1270                 spin_lock_irqsave(&hisi_hba->lock, flags);
1271                 hisi_sas_release_task(hisi_hba, device);
1272                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1273         }
1274
1275         return rc;
1276 }
1277
1278 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1279 {
1280         int rc = TMF_RESP_FUNC_FAILED;
1281         struct hisi_sas_tmf_task tmf_task;
1282
1283         tmf_task.tmf = TMF_CLEAR_ACA;
1284         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1285
1286         return rc;
1287 }
1288
1289 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1290 {
1291         struct sas_phy *phy = sas_get_local_phy(device);
1292         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1293                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1294         rc = sas_phy_reset(phy, reset_type);
1295         sas_put_local_phy(phy);
1296         msleep(2000);
1297         return rc;
1298 }
1299
1300 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1301 {
1302         struct hisi_sas_device *sas_dev = device->lldd_dev;
1303         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1304         struct device *dev = hisi_hba->dev;
1305         int rc = TMF_RESP_FUNC_FAILED;
1306         unsigned long flags;
1307
1308         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1309                 return TMF_RESP_FUNC_FAILED;
1310         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1311
1312         rc = hisi_sas_internal_task_abort(hisi_hba, device,
1313                                         HISI_SAS_INT_ABT_DEV, 0);
1314         if (rc < 0) {
1315                 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1316                 return TMF_RESP_FUNC_FAILED;
1317         }
1318         hisi_sas_dereg_device(hisi_hba, device);
1319
1320         rc = hisi_sas_debug_I_T_nexus_reset(device);
1321
1322         if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1323                 spin_lock_irqsave(&hisi_hba->lock, flags);
1324                 hisi_sas_release_task(hisi_hba, device);
1325                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1326         }
1327         return rc;
1328 }
1329
1330 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1331 {
1332         struct hisi_sas_device *sas_dev = device->lldd_dev;
1333         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1334         struct device *dev = hisi_hba->dev;
1335         unsigned long flags;
1336         int rc = TMF_RESP_FUNC_FAILED;
1337
1338         sas_dev->dev_status = HISI_SAS_DEV_EH;
1339         if (dev_is_sata(device)) {
1340                 struct sas_phy *phy;
1341
1342                 /* Clear internal IO and then hardreset */
1343                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1344                                                   HISI_SAS_INT_ABT_DEV, 0);
1345                 if (rc < 0) {
1346                         dev_err(dev, "lu_reset: internal abort failed\n");
1347                         goto out;
1348                 }
1349                 hisi_sas_dereg_device(hisi_hba, device);
1350
1351                 phy = sas_get_local_phy(device);
1352
1353                 rc = sas_phy_reset(phy, 1);
1354
1355                 if (rc == 0) {
1356                         spin_lock_irqsave(&hisi_hba->lock, flags);
1357                         hisi_sas_release_task(hisi_hba, device);
1358                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1359                 }
1360                 sas_put_local_phy(phy);
1361         } else {
1362                 struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1363
1364                 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1365                                                 HISI_SAS_INT_ABT_DEV, 0);
1366                 if (rc < 0) {
1367                         dev_err(dev, "lu_reset: internal abort failed\n");
1368                         goto out;
1369                 }
1370                 hisi_sas_dereg_device(hisi_hba, device);
1371
1372                 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1373                 if (rc == TMF_RESP_FUNC_COMPLETE) {
1374                         spin_lock_irqsave(&hisi_hba->lock, flags);
1375                         hisi_sas_release_task(hisi_hba, device);
1376                         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1377                 }
1378         }
1379 out:
1380         if (rc != TMF_RESP_FUNC_COMPLETE)
1381                 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1382                              sas_dev->device_id, rc);
1383         return rc;
1384 }
1385
1386 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1387 {
1388         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1389         HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1390
1391         queue_work(hisi_hba->wq, &r.work);
1392         wait_for_completion(r.completion);
1393         if (r.done)
1394                 return TMF_RESP_FUNC_COMPLETE;
1395
1396         return TMF_RESP_FUNC_FAILED;
1397 }
1398
1399 static int hisi_sas_query_task(struct sas_task *task)
1400 {
1401         struct scsi_lun lun;
1402         struct hisi_sas_tmf_task tmf_task;
1403         int rc = TMF_RESP_FUNC_FAILED;
1404
1405         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1406                 struct scsi_cmnd *cmnd = task->uldd_task;
1407                 struct domain_device *device = task->dev;
1408                 struct hisi_sas_slot *slot = task->lldd_task;
1409                 u32 tag = slot->idx;
1410
1411                 int_to_scsilun(cmnd->device->lun, &lun);
1412                 tmf_task.tmf = TMF_QUERY_TASK;
1413                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1414
1415                 rc = hisi_sas_debug_issue_ssp_tmf(device,
1416                                                   lun.scsi_lun,
1417                                                   &tmf_task);
1418                 switch (rc) {
1419                 /* The task is still in Lun, release it then */
1420                 case TMF_RESP_FUNC_SUCC:
1421                 /* The task is not in Lun or failed, reset the phy */
1422                 case TMF_RESP_FUNC_FAILED:
1423                 case TMF_RESP_FUNC_COMPLETE:
1424                         break;
1425                 default:
1426                         rc = TMF_RESP_FUNC_FAILED;
1427                         break;
1428                 }
1429         }
1430         return rc;
1431 }
1432
1433 static int
1434 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1435                                   struct sas_task *task, int abort_flag,
1436                                   int task_tag)
1437 {
1438         struct domain_device *device = task->dev;
1439         struct hisi_sas_device *sas_dev = device->lldd_dev;
1440         struct device *dev = hisi_hba->dev;
1441         struct hisi_sas_port *port;
1442         struct hisi_sas_slot *slot;
1443         struct asd_sas_port *sas_port = device->port;
1444         struct hisi_sas_cmd_hdr *cmd_hdr_base;
1445         struct hisi_sas_dq *dq = sas_dev->dq;
1446         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1447         unsigned long flags, flags_dq;
1448
1449         if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1450                 return -EINVAL;
1451
1452         if (!device->port)
1453                 return -1;
1454
1455         port = to_hisi_sas_port(sas_port);
1456
1457         /* simply get a slot and send abort command */
1458         spin_lock_irqsave(&hisi_hba->lock, flags);
1459         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1460         if (rc) {
1461                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1462                 goto err_out;
1463         }
1464         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1465
1466         spin_lock_irqsave(&dq->lock, flags_dq);
1467         rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1468         if (rc)
1469                 goto err_out_tag;
1470
1471         dlvry_queue = dq->id;
1472         dlvry_queue_slot = dq->wr_point;
1473
1474         slot = &hisi_hba->slot_info[slot_idx];
1475         memset(slot, 0, sizeof(struct hisi_sas_slot));
1476
1477         slot->idx = slot_idx;
1478         slot->n_elem = n_elem;
1479         slot->dlvry_queue = dlvry_queue;
1480         slot->dlvry_queue_slot = dlvry_queue_slot;
1481         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1482         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1483         slot->task = task;
1484         slot->port = port;
1485         task->lldd_task = slot;
1486
1487         slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1488                         GFP_ATOMIC, &slot->buf_dma);
1489         if (!slot->buf) {
1490                 rc = -ENOMEM;
1491                 goto err_out_tag;
1492         }
1493
1494         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1495         memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1496         memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1497
1498         rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1499                                       abort_flag, task_tag);
1500         if (rc)
1501                 goto err_out_buf;
1502
1503         spin_lock_irqsave(&hisi_hba->lock, flags);
1504         list_add_tail(&slot->entry, &sas_dev->list);
1505         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1506         spin_lock_irqsave(&task->task_state_lock, flags);
1507         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1508         spin_unlock_irqrestore(&task->task_state_lock, flags);
1509
1510         dq->slot_prep = slot;
1511
1512         /* send abort command to the chip */
1513         hisi_hba->hw->start_delivery(dq);
1514         spin_unlock_irqrestore(&dq->lock, flags_dq);
1515
1516         return 0;
1517
1518 err_out_buf:
1519         dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1520                 slot->buf_dma);
1521 err_out_tag:
1522         spin_lock_irqsave(&hisi_hba->lock, flags);
1523         hisi_sas_slot_index_free(hisi_hba, slot_idx);
1524         spin_unlock_irqrestore(&hisi_hba->lock, flags);
1525         spin_unlock_irqrestore(&dq->lock, flags_dq);
1526 err_out:
1527         dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1528
1529         return rc;
1530 }
1531
1532 /**
1533  * hisi_sas_internal_task_abort -- execute an internal
1534  * abort command for single IO command or a device
1535  * @hisi_hba: host controller struct
1536  * @device: domain device
1537  * @abort_flag: mode of operation, device or single IO
1538  * @tag: tag of IO to be aborted (only relevant to single
1539  *       IO mode)
1540  */
1541 static int
1542 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1543                              struct domain_device *device,
1544                              int abort_flag, int tag)
1545 {
1546         struct sas_task *task;
1547         struct hisi_sas_device *sas_dev = device->lldd_dev;
1548         struct device *dev = hisi_hba->dev;
1549         int res;
1550
1551         /*
1552          * The interface is not realized means this HW don't support internal
1553          * abort, or don't need to do internal abort. Then here, we return
1554          * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1555          * the internal abort has been executed and returned CQ.
1556          */
1557         if (!hisi_hba->hw->prep_abort)
1558                 return TMF_RESP_FUNC_FAILED;
1559
1560         task = sas_alloc_slow_task(GFP_KERNEL);
1561         if (!task)
1562                 return -ENOMEM;
1563
1564         task->dev = device;
1565         task->task_proto = device->tproto;
1566         task->task_done = hisi_sas_task_done;
1567         task->slow_task->timer.function = hisi_sas_tmf_timedout;
1568         task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1569         add_timer(&task->slow_task->timer);
1570
1571         res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1572                                                 task, abort_flag, tag);
1573         if (res) {
1574                 del_timer(&task->slow_task->timer);
1575                 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1576                         res);
1577                 goto exit;
1578         }
1579         wait_for_completion(&task->slow_task->completion);
1580         res = TMF_RESP_FUNC_FAILED;
1581
1582         /* Internal abort timed out */
1583         if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1584                 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1585                         struct hisi_sas_slot *slot = task->lldd_task;
1586
1587                         if (slot)
1588                                 slot->task = NULL;
1589                         dev_err(dev, "internal task abort: timeout and not done.\n");
1590                         res = -EIO;
1591                         goto exit;
1592                 } else
1593                         dev_err(dev, "internal task abort: timeout.\n");
1594         }
1595
1596         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1597                 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1598                 res = TMF_RESP_FUNC_COMPLETE;
1599                 goto exit;
1600         }
1601
1602         if (task->task_status.resp == SAS_TASK_COMPLETE &&
1603                 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1604                 res = TMF_RESP_FUNC_SUCC;
1605                 goto exit;
1606         }
1607
1608 exit:
1609         dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1610                 "resp: 0x%x sts 0x%x\n",
1611                 SAS_ADDR(device->sas_addr),
1612                 task,
1613                 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1614                 task->task_status.stat);
1615         sas_free_task(task);
1616
1617         return res;
1618 }
1619
1620 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1621 {
1622         hisi_sas_port_notify_formed(sas_phy);
1623 }
1624
1625 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1626 {
1627 }
1628
1629 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1630                         u8 reg_index, u8 reg_count, u8 *write_data)
1631 {
1632         struct hisi_hba *hisi_hba = sha->lldd_ha;
1633
1634         if (!hisi_hba->hw->write_gpio)
1635                 return -EOPNOTSUPP;
1636
1637         return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1638                                 reg_index, reg_count, write_data);
1639 }
1640
1641 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1642 {
1643         phy->phy_attached = 0;
1644         phy->phy_type = 0;
1645         phy->port = NULL;
1646 }
1647
1648 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1649 {
1650         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1651         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1652         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1653
1654         if (rdy) {
1655                 /* Phy down but ready */
1656                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1657                 hisi_sas_port_notify_formed(sas_phy);
1658         } else {
1659                 struct hisi_sas_port *port  = phy->port;
1660
1661                 /* Phy down and not ready */
1662                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1663                 sas_phy_disconnected(sas_phy);
1664
1665                 if (port) {
1666                         if (phy->phy_type & PORT_TYPE_SAS) {
1667                                 int port_id = port->id;
1668
1669                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1670                                                                        port_id))
1671                                         port->port_attached = 0;
1672                         } else if (phy->phy_type & PORT_TYPE_SATA)
1673                                 port->port_attached = 0;
1674                 }
1675                 hisi_sas_phy_disconnected(phy);
1676         }
1677 }
1678 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1679
1680 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1681 {
1682         int i;
1683
1684         for (i = 0; i < hisi_hba->queue_count; i++) {
1685                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1686
1687                 tasklet_kill(&cq->tasklet);
1688         }
1689 }
1690 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1691
1692 struct scsi_transport_template *hisi_sas_stt;
1693 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1694
1695 static struct device_attribute *host_attrs[] = {
1696         &dev_attr_phy_event_threshold,
1697         NULL,
1698 };
1699
1700 static struct scsi_host_template _hisi_sas_sht = {
1701         .module                 = THIS_MODULE,
1702         .name                   = DRV_NAME,
1703         .queuecommand           = sas_queuecommand,
1704         .target_alloc           = sas_target_alloc,
1705         .slave_configure        = hisi_sas_slave_configure,
1706         .scan_finished          = hisi_sas_scan_finished,
1707         .scan_start             = hisi_sas_scan_start,
1708         .change_queue_depth     = sas_change_queue_depth,
1709         .bios_param             = sas_bios_param,
1710         .can_queue              = 1,
1711         .this_id                = -1,
1712         .sg_tablesize           = SG_ALL,
1713         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
1714         .use_clustering         = ENABLE_CLUSTERING,
1715         .eh_device_reset_handler = sas_eh_device_reset_handler,
1716         .eh_target_reset_handler = sas_eh_target_reset_handler,
1717         .target_destroy         = sas_target_destroy,
1718         .ioctl                  = sas_ioctl,
1719         .shost_attrs            = host_attrs,
1720 };
1721 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1722 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1723
1724 static struct sas_domain_function_template hisi_sas_transport_ops = {
1725         .lldd_dev_found         = hisi_sas_dev_found,
1726         .lldd_dev_gone          = hisi_sas_dev_gone,
1727         .lldd_execute_task      = hisi_sas_queue_command,
1728         .lldd_control_phy       = hisi_sas_control_phy,
1729         .lldd_abort_task        = hisi_sas_abort_task,
1730         .lldd_abort_task_set    = hisi_sas_abort_task_set,
1731         .lldd_clear_aca         = hisi_sas_clear_aca,
1732         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1733         .lldd_lu_reset          = hisi_sas_lu_reset,
1734         .lldd_query_task        = hisi_sas_query_task,
1735         .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1736         .lldd_port_formed       = hisi_sas_port_formed,
1737         .lldd_port_deformed = hisi_sas_port_deformed,
1738         .lldd_write_gpio = hisi_sas_write_gpio,
1739 };
1740
1741 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1742 {
1743         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1744
1745         for (i = 0; i < hisi_hba->queue_count; i++) {
1746                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1747                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1748
1749                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1750                 memset(hisi_hba->cmd_hdr[i], 0, s);
1751                 dq->wr_point = 0;
1752
1753                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1754                 memset(hisi_hba->complete_hdr[i], 0, s);
1755                 cq->rd_point = 0;
1756         }
1757
1758         s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1759         memset(hisi_hba->initial_fis, 0, s);
1760
1761         s = max_command_entries * sizeof(struct hisi_sas_iost);
1762         memset(hisi_hba->iost, 0, s);
1763
1764         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1765         memset(hisi_hba->breakpoint, 0, s);
1766
1767         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1768         memset(hisi_hba->sata_breakpoint, 0, s);
1769 }
1770 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1771
1772 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1773 {
1774         struct device *dev = hisi_hba->dev;
1775         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1776
1777         spin_lock_init(&hisi_hba->lock);
1778         for (i = 0; i < hisi_hba->n_phy; i++) {
1779                 hisi_sas_phy_init(hisi_hba, i);
1780                 hisi_hba->port[i].port_attached = 0;
1781                 hisi_hba->port[i].id = -1;
1782         }
1783
1784         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1785                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1786                 hisi_hba->devices[i].device_id = i;
1787                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1788         }
1789
1790         for (i = 0; i < hisi_hba->queue_count; i++) {
1791                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1792                 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1793
1794                 /* Completion queue structure */
1795                 cq->id = i;
1796                 cq->hisi_hba = hisi_hba;
1797
1798                 /* Delivery queue structure */
1799                 spin_lock_init(&dq->lock);
1800                 dq->id = i;
1801                 dq->hisi_hba = hisi_hba;
1802
1803                 /* Delivery queue */
1804                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1805                 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1806                                         &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1807                 if (!hisi_hba->cmd_hdr[i])
1808                         goto err_out;
1809
1810                 /* Completion queue */
1811                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1812                 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1813                                 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1814                 if (!hisi_hba->complete_hdr[i])
1815                         goto err_out;
1816         }
1817
1818         s = sizeof(struct hisi_sas_slot_buf_table);
1819         hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1820         if (!hisi_hba->buffer_pool)
1821                 goto err_out;
1822
1823         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1824         hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
1825                                             GFP_KERNEL);
1826         if (!hisi_hba->itct)
1827                 goto err_out;
1828
1829         hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1830                                            sizeof(struct hisi_sas_slot),
1831                                            GFP_KERNEL);
1832         if (!hisi_hba->slot_info)
1833                 goto err_out;
1834
1835         s = max_command_entries * sizeof(struct hisi_sas_iost);
1836         hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1837                                             GFP_KERNEL);
1838         if (!hisi_hba->iost)
1839                 goto err_out;
1840
1841         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1842         hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1843                                 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1844         if (!hisi_hba->breakpoint)
1845                 goto err_out;
1846
1847         hisi_hba->slot_index_count = max_command_entries;
1848         s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1849         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1850         if (!hisi_hba->slot_index_tags)
1851                 goto err_out;
1852
1853         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1854         hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1855                                 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1856         if (!hisi_hba->initial_fis)
1857                 goto err_out;
1858
1859         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1860         hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1861                                 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1862         if (!hisi_hba->sata_breakpoint)
1863                 goto err_out;
1864         hisi_sas_init_mem(hisi_hba);
1865
1866         hisi_sas_slot_index_init(hisi_hba);
1867
1868         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1869         if (!hisi_hba->wq) {
1870                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1871                 goto err_out;
1872         }
1873
1874         return 0;
1875 err_out:
1876         return -ENOMEM;
1877 }
1878 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1879
1880 void hisi_sas_free(struct hisi_hba *hisi_hba)
1881 {
1882         struct device *dev = hisi_hba->dev;
1883         int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1884
1885         for (i = 0; i < hisi_hba->queue_count; i++) {
1886                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1887                 if (hisi_hba->cmd_hdr[i])
1888                         dma_free_coherent(dev, s,
1889                                           hisi_hba->cmd_hdr[i],
1890                                           hisi_hba->cmd_hdr_dma[i]);
1891
1892                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1893                 if (hisi_hba->complete_hdr[i])
1894                         dma_free_coherent(dev, s,
1895                                           hisi_hba->complete_hdr[i],
1896                                           hisi_hba->complete_hdr_dma[i]);
1897         }
1898
1899         dma_pool_destroy(hisi_hba->buffer_pool);
1900
1901         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1902         if (hisi_hba->itct)
1903                 dma_free_coherent(dev, s,
1904                                   hisi_hba->itct, hisi_hba->itct_dma);
1905
1906         s = max_command_entries * sizeof(struct hisi_sas_iost);
1907         if (hisi_hba->iost)
1908                 dma_free_coherent(dev, s,
1909                                   hisi_hba->iost, hisi_hba->iost_dma);
1910
1911         s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1912         if (hisi_hba->breakpoint)
1913                 dma_free_coherent(dev, s,
1914                                   hisi_hba->breakpoint,
1915                                   hisi_hba->breakpoint_dma);
1916
1917
1918         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1919         if (hisi_hba->initial_fis)
1920                 dma_free_coherent(dev, s,
1921                                   hisi_hba->initial_fis,
1922                                   hisi_hba->initial_fis_dma);
1923
1924         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1925         if (hisi_hba->sata_breakpoint)
1926                 dma_free_coherent(dev, s,
1927                                   hisi_hba->sata_breakpoint,
1928                                   hisi_hba->sata_breakpoint_dma);
1929
1930         if (hisi_hba->wq)
1931                 destroy_workqueue(hisi_hba->wq);
1932 }
1933 EXPORT_SYMBOL_GPL(hisi_sas_free);
1934
1935 void hisi_sas_rst_work_handler(struct work_struct *work)
1936 {
1937         struct hisi_hba *hisi_hba =
1938                 container_of(work, struct hisi_hba, rst_work);
1939
1940         hisi_sas_controller_reset(hisi_hba);
1941 }
1942 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1943
1944 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1945 {
1946         struct hisi_sas_rst *rst =
1947                 container_of(work, struct hisi_sas_rst, work);
1948
1949         if (!hisi_sas_controller_reset(rst->hisi_hba))
1950                 rst->done = true;
1951         complete(rst->completion);
1952 }
1953 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1954
1955 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1956 {
1957         struct device *dev = hisi_hba->dev;
1958         struct platform_device *pdev = hisi_hba->platform_dev;
1959         struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1960         struct clk *refclk;
1961
1962         if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1963                                           SAS_ADDR_SIZE)) {
1964                 dev_err(dev, "could not get property sas-addr\n");
1965                 return -ENOENT;
1966         }
1967
1968         if (np) {
1969                 /*
1970                  * These properties are only required for platform device-based
1971                  * controller with DT firmware.
1972                  */
1973                 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1974                                         "hisilicon,sas-syscon");
1975                 if (IS_ERR(hisi_hba->ctrl)) {
1976                         dev_err(dev, "could not get syscon\n");
1977                         return -ENOENT;
1978                 }
1979
1980                 if (device_property_read_u32(dev, "ctrl-reset-reg",
1981                                              &hisi_hba->ctrl_reset_reg)) {
1982                         dev_err(dev,
1983                                 "could not get property ctrl-reset-reg\n");
1984                         return -ENOENT;
1985                 }
1986
1987                 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1988                                              &hisi_hba->ctrl_reset_sts_reg)) {
1989                         dev_err(dev,
1990                                 "could not get property ctrl-reset-sts-reg\n");
1991                         return -ENOENT;
1992                 }
1993
1994                 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1995                                              &hisi_hba->ctrl_clock_ena_reg)) {
1996                         dev_err(dev,
1997                                 "could not get property ctrl-clock-ena-reg\n");
1998                         return -ENOENT;
1999                 }
2000         }
2001
2002         refclk = devm_clk_get(dev, NULL);
2003         if (IS_ERR(refclk))
2004                 dev_dbg(dev, "no ref clk property\n");
2005         else
2006                 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2007
2008         if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2009                 dev_err(dev, "could not get property phy-count\n");
2010                 return -ENOENT;
2011         }
2012
2013         if (device_property_read_u32(dev, "queue-count",
2014                                      &hisi_hba->queue_count)) {
2015                 dev_err(dev, "could not get property queue-count\n");
2016                 return -ENOENT;
2017         }
2018
2019         return 0;
2020 }
2021 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2022
2023 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2024                                               const struct hisi_sas_hw *hw)
2025 {
2026         struct resource *res;
2027         struct Scsi_Host *shost;
2028         struct hisi_hba *hisi_hba;
2029         struct device *dev = &pdev->dev;
2030
2031         shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2032         if (!shost) {
2033                 dev_err(dev, "scsi host alloc failed\n");
2034                 return NULL;
2035         }
2036         hisi_hba = shost_priv(shost);
2037
2038         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2039         hisi_hba->hw = hw;
2040         hisi_hba->dev = dev;
2041         hisi_hba->platform_dev = pdev;
2042         hisi_hba->shost = shost;
2043         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2044
2045         timer_setup(&hisi_hba->timer, NULL, 0);
2046
2047         if (hisi_sas_get_fw_info(hisi_hba) < 0)
2048                 goto err_out;
2049
2050         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2051             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2052                 dev_err(dev, "No usable DMA addressing method\n");
2053                 goto err_out;
2054         }
2055
2056         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2057         hisi_hba->regs = devm_ioremap_resource(dev, res);
2058         if (IS_ERR(hisi_hba->regs))
2059                 goto err_out;
2060
2061         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2062         if (res) {
2063                 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2064                 if (IS_ERR(hisi_hba->sgpio_regs))
2065                         goto err_out;
2066         }
2067
2068         if (hisi_sas_alloc(hisi_hba, shost)) {
2069                 hisi_sas_free(hisi_hba);
2070                 goto err_out;
2071         }
2072
2073         return shost;
2074 err_out:
2075         scsi_host_put(shost);
2076         dev_err(dev, "shost alloc failed\n");
2077         return NULL;
2078 }
2079
2080 int hisi_sas_probe(struct platform_device *pdev,
2081                          const struct hisi_sas_hw *hw)
2082 {
2083         struct Scsi_Host *shost;
2084         struct hisi_hba *hisi_hba;
2085         struct device *dev = &pdev->dev;
2086         struct asd_sas_phy **arr_phy;
2087         struct asd_sas_port **arr_port;
2088         struct sas_ha_struct *sha;
2089         int rc, phy_nr, port_nr, i;
2090
2091         shost = hisi_sas_shost_alloc(pdev, hw);
2092         if (!shost)
2093                 return -ENOMEM;
2094
2095         sha = SHOST_TO_SAS_HA(shost);
2096         hisi_hba = shost_priv(shost);
2097         platform_set_drvdata(pdev, sha);
2098
2099         phy_nr = port_nr = hisi_hba->n_phy;
2100
2101         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2102         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2103         if (!arr_phy || !arr_port) {
2104                 rc = -ENOMEM;
2105                 goto err_out_ha;
2106         }
2107
2108         sha->sas_phy = arr_phy;
2109         sha->sas_port = arr_port;
2110         sha->lldd_ha = hisi_hba;
2111
2112         shost->transportt = hisi_sas_stt;
2113         shost->max_id = HISI_SAS_MAX_DEVICES;
2114         shost->max_lun = ~0;
2115         shost->max_channel = 1;
2116         shost->max_cmd_len = 16;
2117         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2118         shost->can_queue = hisi_hba->hw->max_command_entries;
2119         shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2120
2121         sha->sas_ha_name = DRV_NAME;
2122         sha->dev = hisi_hba->dev;
2123         sha->lldd_module = THIS_MODULE;
2124         sha->sas_addr = &hisi_hba->sas_addr[0];
2125         sha->num_phys = hisi_hba->n_phy;
2126         sha->core.shost = hisi_hba->shost;
2127
2128         for (i = 0; i < hisi_hba->n_phy; i++) {
2129                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2130                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2131         }
2132
2133         rc = scsi_add_host(shost, &pdev->dev);
2134         if (rc)
2135                 goto err_out_ha;
2136
2137         rc = sas_register_ha(sha);
2138         if (rc)
2139                 goto err_out_register_ha;
2140
2141         rc = hisi_hba->hw->hw_init(hisi_hba);
2142         if (rc)
2143                 goto err_out_register_ha;
2144
2145         scsi_scan_host(shost);
2146
2147         return 0;
2148
2149 err_out_register_ha:
2150         scsi_remove_host(shost);
2151 err_out_ha:
2152         hisi_sas_free(hisi_hba);
2153         scsi_host_put(shost);
2154         return rc;
2155 }
2156 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2157
2158 int hisi_sas_remove(struct platform_device *pdev)
2159 {
2160         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2161         struct hisi_hba *hisi_hba = sha->lldd_ha;
2162         struct Scsi_Host *shost = sha->core.shost;
2163
2164         if (timer_pending(&hisi_hba->timer))
2165                 del_timer(&hisi_hba->timer);
2166
2167         sas_unregister_ha(sha);
2168         sas_remove_host(sha->core.shost);
2169
2170         hisi_sas_free(hisi_hba);
2171         scsi_host_put(shost);
2172         return 0;
2173 }
2174 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2175
2176 static __init int hisi_sas_init(void)
2177 {
2178         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2179         if (!hisi_sas_stt)
2180                 return -ENOMEM;
2181
2182         return 0;
2183 }
2184
2185 static __exit void hisi_sas_exit(void)
2186 {
2187         sas_release_transport(hisi_sas_stt);
2188 }
2189
2190 module_init(hisi_sas_init);
2191 module_exit(hisi_sas_exit);
2192
2193 MODULE_LICENSE("GPL");
2194 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2195 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2196 MODULE_ALIAS("platform:" DRV_NAME);