2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf);
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
30 switch (fis->command) {
31 case ATA_CMD_FPDMA_WRITE:
32 case ATA_CMD_FPDMA_READ:
33 case ATA_CMD_FPDMA_RECV:
34 case ATA_CMD_FPDMA_SEND:
35 case ATA_CMD_NCQ_NON_DATA:
36 return HISI_SAS_SATA_PROTOCOL_FPDMA;
38 case ATA_CMD_DOWNLOAD_MICRO:
40 case ATA_CMD_PMP_READ:
41 case ATA_CMD_READ_LOG_EXT:
42 case ATA_CMD_PIO_READ:
43 case ATA_CMD_PIO_READ_EXT:
44 case ATA_CMD_PMP_WRITE:
45 case ATA_CMD_WRITE_LOG_EXT:
46 case ATA_CMD_PIO_WRITE:
47 case ATA_CMD_PIO_WRITE_EXT:
48 return HISI_SAS_SATA_PROTOCOL_PIO;
51 case ATA_CMD_DOWNLOAD_MICRO_DMA:
52 case ATA_CMD_PMP_READ_DMA:
53 case ATA_CMD_PMP_WRITE_DMA:
55 case ATA_CMD_READ_EXT:
56 case ATA_CMD_READ_LOG_DMA_EXT:
57 case ATA_CMD_READ_STREAM_DMA_EXT:
58 case ATA_CMD_TRUSTED_RCV_DMA:
59 case ATA_CMD_TRUSTED_SND_DMA:
61 case ATA_CMD_WRITE_EXT:
62 case ATA_CMD_WRITE_FUA_EXT:
63 case ATA_CMD_WRITE_QUEUED:
64 case ATA_CMD_WRITE_LOG_DMA_EXT:
65 case ATA_CMD_WRITE_STREAM_DMA_EXT:
66 case ATA_CMD_ZAC_MGMT_IN:
67 return HISI_SAS_SATA_PROTOCOL_DMA;
69 case ATA_CMD_CHK_POWER:
70 case ATA_CMD_DEV_RESET:
73 case ATA_CMD_FLUSH_EXT:
75 case ATA_CMD_VERIFY_EXT:
76 case ATA_CMD_SET_FEATURES:
78 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
83 switch (fis->features) {
84 case ATA_SET_MAX_PASSWD:
85 case ATA_SET_MAX_LOCK:
86 return HISI_SAS_SATA_PROTOCOL_PIO;
88 case ATA_SET_MAX_PASSWD_DMA:
89 case ATA_SET_MAX_UNLOCK_DMA:
90 return HISI_SAS_SATA_PROTOCOL_DMA;
93 return HISI_SAS_SATA_PROTOCOL_NONDATA;
98 if (direction == DMA_NONE)
99 return HISI_SAS_SATA_PROTOCOL_NONDATA;
100 return HISI_SAS_SATA_PROTOCOL_PIO;
104 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
106 void hisi_sas_sata_done(struct sas_task *task,
107 struct hisi_sas_slot *slot)
109 struct task_status_struct *ts = &task->task_status;
110 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
111 struct hisi_sas_status_buffer *status_buf =
112 hisi_sas_status_buf_addr_mem(slot);
113 u8 *iu = &status_buf->iu[0];
114 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
116 resp->frame_len = sizeof(struct dev_to_host_fis);
117 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
119 ts->buf_valid_size = sizeof(*resp);
121 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
123 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
125 struct ata_queued_cmd *qc = task->uldd_task;
128 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
129 qc->tf.command == ATA_CMD_FPDMA_READ) {
136 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
138 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
140 return device->port->ha->lldd_ha;
143 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
145 return container_of(sas_port, struct hisi_sas_port, sas_port);
147 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
149 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
153 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
154 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
156 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
158 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
160 void *bitmap = hisi_hba->slot_index_tags;
162 clear_bit(slot_idx, bitmap);
165 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
167 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
170 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
172 void *bitmap = hisi_hba->slot_index_tags;
174 set_bit(slot_idx, bitmap);
177 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
180 void *bitmap = hisi_hba->slot_index_tags;
182 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
183 if (index >= hisi_hba->slot_index_count)
184 return -SAS_QUEUE_FULL;
185 hisi_sas_slot_index_set(hisi_hba, index);
190 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
194 for (i = 0; i < hisi_hba->slot_index_count; ++i)
195 hisi_sas_slot_index_clear(hisi_hba, i);
198 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
199 struct hisi_sas_slot *slot)
203 struct device *dev = hisi_hba->dev;
205 if (!task->lldd_task)
208 task->lldd_task = NULL;
210 if (!sas_protocol_ata(task->task_proto))
212 dma_unmap_sg(dev, task->scatter,
218 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
220 list_del_init(&slot->entry);
224 hisi_sas_slot_index_free(hisi_hba, slot->idx);
226 /* slot memory is fully zeroed when it is reused */
228 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
230 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
231 struct hisi_sas_slot *slot)
233 return hisi_hba->hw->prep_smp(hisi_hba, slot);
236 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
237 struct hisi_sas_slot *slot, int is_tmf,
238 struct hisi_sas_tmf_task *tmf)
240 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
243 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
244 struct hisi_sas_slot *slot)
246 return hisi_hba->hw->prep_stp(hisi_hba, slot);
249 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
250 struct hisi_sas_slot *slot,
251 int device_id, int abort_flag, int tag_to_abort)
253 return hisi_hba->hw->prep_abort(hisi_hba, slot,
254 device_id, abort_flag, tag_to_abort);
258 * This function will issue an abort TMF regardless of whether the
259 * task is in the sdev or not. Then it will do the task complete
260 * cleanup and callbacks.
262 static void hisi_sas_slot_abort(struct work_struct *work)
264 struct hisi_sas_slot *abort_slot =
265 container_of(work, struct hisi_sas_slot, abort_slot);
266 struct sas_task *task = abort_slot->task;
267 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
268 struct scsi_cmnd *cmnd = task->uldd_task;
269 struct hisi_sas_tmf_task tmf_task;
271 struct device *dev = hisi_hba->dev;
272 int tag = abort_slot->idx;
275 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
276 dev_err(dev, "cannot abort slot for non-ssp task\n");
280 int_to_scsilun(cmnd->device->lun, &lun);
281 tmf_task.tmf = TMF_ABORT_TASK;
282 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
284 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
286 /* Do cleanup for this task */
287 spin_lock_irqsave(&hisi_hba->lock, flags);
288 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
289 spin_unlock_irqrestore(&hisi_hba->lock, flags);
291 task->task_done(task);
294 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
295 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
298 struct hisi_hba *hisi_hba = dq->hisi_hba;
299 struct domain_device *device = task->dev;
300 struct hisi_sas_device *sas_dev = device->lldd_dev;
301 struct hisi_sas_port *port;
302 struct hisi_sas_slot *slot;
303 struct hisi_sas_cmd_hdr *cmd_hdr_base;
304 struct asd_sas_port *sas_port = device->port;
305 struct device *dev = hisi_hba->dev;
306 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
310 struct task_status_struct *ts = &task->task_status;
312 ts->resp = SAS_TASK_UNDELIVERED;
313 ts->stat = SAS_PHY_DOWN;
315 * libsas will use dev->port, should
316 * not call task_done for sata
318 if (device->dev_type != SAS_SATA_DEV)
319 task->task_done(task);
323 if (DEV_IS_GONE(sas_dev)) {
325 dev_info(dev, "task prep: device %d not ready\n",
328 dev_info(dev, "task prep: device %016llx not ready\n",
329 SAS_ADDR(device->sas_addr));
334 port = to_hisi_sas_port(sas_port);
335 if (port && !port->port_attached) {
336 dev_info(dev, "task prep: %s port%d not attach device\n",
337 (dev_is_sata(device)) ?
344 if (!sas_protocol_ata(task->task_proto)) {
345 if (task->num_scatter) {
346 n_elem = dma_map_sg(dev, task->scatter,
347 task->num_scatter, task->data_dir);
354 n_elem = task->num_scatter;
356 spin_lock_irqsave(&hisi_hba->lock, flags);
357 if (hisi_hba->hw->slot_index_alloc)
358 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
361 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
363 spin_unlock_irqrestore(&hisi_hba->lock, flags);
366 spin_unlock_irqrestore(&hisi_hba->lock, flags);
368 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
372 dlvry_queue = dq->id;
373 dlvry_queue_slot = dq->wr_point;
374 slot = &hisi_hba->slot_info[slot_idx];
375 memset(slot, 0, sizeof(struct hisi_sas_slot));
377 slot->idx = slot_idx;
378 slot->n_elem = n_elem;
379 slot->dlvry_queue = dlvry_queue;
380 slot->dlvry_queue_slot = dlvry_queue_slot;
381 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
382 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
385 task->lldd_task = slot;
386 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
388 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
389 GFP_ATOMIC, &slot->buf_dma);
392 goto err_out_slot_buf;
394 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
395 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
396 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
398 switch (task->task_proto) {
399 case SAS_PROTOCOL_SMP:
400 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
402 case SAS_PROTOCOL_SSP:
403 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
405 case SAS_PROTOCOL_SATA:
406 case SAS_PROTOCOL_STP:
407 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
408 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
411 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
418 dev_err(dev, "task prep: rc = 0x%x\n", rc);
422 spin_lock_irqsave(&hisi_hba->lock, flags);
423 list_add_tail(&slot->entry, &sas_dev->list);
424 spin_unlock_irqrestore(&hisi_hba->lock, flags);
425 spin_lock_irqsave(&task->task_state_lock, flags);
426 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
427 spin_unlock_irqrestore(&task->task_state_lock, flags);
429 dq->slot_prep = slot;
435 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
438 /* Nothing to be done */
440 spin_lock_irqsave(&hisi_hba->lock, flags);
441 hisi_sas_slot_index_free(hisi_hba, slot_idx);
442 spin_unlock_irqrestore(&hisi_hba->lock, flags);
444 dev_err(dev, "task prep: failed[%d]!\n", rc);
445 if (!sas_protocol_ata(task->task_proto))
447 dma_unmap_sg(dev, task->scatter,
454 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
455 int is_tmf, struct hisi_sas_tmf_task *tmf)
460 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
461 struct device *dev = hisi_hba->dev;
462 struct domain_device *device = task->dev;
463 struct hisi_sas_device *sas_dev = device->lldd_dev;
464 struct hisi_sas_dq *dq = sas_dev->dq;
466 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
469 /* protect task_prep and start_delivery sequence */
470 spin_lock_irqsave(&dq->lock, flags);
471 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
473 dev_err(dev, "task exec: failed[%d]!\n", rc);
476 hisi_hba->hw->start_delivery(dq);
477 spin_unlock_irqrestore(&dq->lock, flags);
482 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
484 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
485 struct asd_sas_phy *sas_phy = &phy->sas_phy;
486 struct sas_ha_struct *sas_ha;
488 if (!phy->phy_attached)
491 sas_ha = &hisi_hba->sha;
492 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
495 struct sas_phy *sphy = sas_phy->phy;
497 sphy->negotiated_linkrate = sas_phy->linkrate;
498 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
499 sphy->maximum_linkrate_hw =
500 hisi_hba->hw->phy_get_max_linkrate();
501 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
502 sphy->minimum_linkrate = phy->minimum_linkrate;
504 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
505 sphy->maximum_linkrate = phy->maximum_linkrate;
508 if (phy->phy_type & PORT_TYPE_SAS) {
509 struct sas_identify_frame *id;
511 id = (struct sas_identify_frame *)phy->frame_rcvd;
512 id->dev_type = phy->identify.device_type;
513 id->initiator_bits = SAS_PROTOCOL_ALL;
514 id->target_bits = phy->identify.target_port_protocols;
515 } else if (phy->phy_type & PORT_TYPE_SATA) {
519 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
520 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
523 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
525 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
526 struct hisi_sas_device *sas_dev = NULL;
530 spin_lock_irqsave(&hisi_hba->lock, flags);
531 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
532 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
533 int queue = i % hisi_hba->queue_count;
534 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
536 hisi_hba->devices[i].device_id = i;
537 sas_dev = &hisi_hba->devices[i];
538 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
539 sas_dev->dev_type = device->dev_type;
540 sas_dev->hisi_hba = hisi_hba;
541 sas_dev->sas_device = device;
543 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
547 spin_unlock_irqrestore(&hisi_hba->lock, flags);
552 static int hisi_sas_dev_found(struct domain_device *device)
554 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
555 struct domain_device *parent_dev = device->parent;
556 struct hisi_sas_device *sas_dev;
557 struct device *dev = hisi_hba->dev;
559 if (hisi_hba->hw->alloc_dev)
560 sas_dev = hisi_hba->hw->alloc_dev(device);
562 sas_dev = hisi_sas_alloc_dev(device);
564 dev_err(dev, "fail alloc dev: max support %d devices\n",
565 HISI_SAS_MAX_DEVICES);
569 device->lldd_dev = sas_dev;
570 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
572 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
574 u8 phy_num = parent_dev->ex_dev.num_phys;
577 for (phy_no = 0; phy_no < phy_num; phy_no++) {
578 phy = &parent_dev->ex_dev.ex_phy[phy_no];
579 if (SAS_ADDR(phy->attached_sas_addr) ==
580 SAS_ADDR(device->sas_addr))
584 if (phy_no == phy_num) {
585 dev_info(dev, "dev found: no attached "
586 "dev:%016llx at ex:%016llx\n",
587 SAS_ADDR(device->sas_addr),
588 SAS_ADDR(parent_dev->sas_addr));
593 dev_info(dev, "dev[%d:%x] found\n",
594 sas_dev->device_id, sas_dev->dev_type);
599 static int hisi_sas_slave_configure(struct scsi_device *sdev)
601 struct domain_device *dev = sdev_to_domain_dev(sdev);
602 int ret = sas_slave_configure(sdev);
606 if (!dev_is_sata(dev))
607 sas_change_queue_depth(sdev, 64);
612 static void hisi_sas_scan_start(struct Scsi_Host *shost)
614 struct hisi_hba *hisi_hba = shost_priv(shost);
616 hisi_hba->hw->phys_init(hisi_hba);
619 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
621 struct hisi_hba *hisi_hba = shost_priv(shost);
622 struct sas_ha_struct *sha = &hisi_hba->sha;
624 /* Wait for PHY up interrupt to occur */
632 static void hisi_sas_phyup_work(struct work_struct *work)
634 struct hisi_sas_phy *phy =
635 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
636 struct hisi_hba *hisi_hba = phy->hisi_hba;
637 struct asd_sas_phy *sas_phy = &phy->sas_phy;
638 int phy_no = sas_phy->id;
640 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
641 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
644 static void hisi_sas_linkreset_work(struct work_struct *work)
646 struct hisi_sas_phy *phy =
647 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
648 struct asd_sas_phy *sas_phy = &phy->sas_phy;
650 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
653 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
654 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
655 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
658 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
659 enum hisi_sas_phy_event event)
661 struct hisi_hba *hisi_hba = phy->hisi_hba;
663 if (WARN_ON(event >= HISI_PHYES_NUM))
666 return queue_work(hisi_hba->wq, &phy->works[event]);
668 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
670 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
672 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
673 struct asd_sas_phy *sas_phy = &phy->sas_phy;
676 phy->hisi_hba = hisi_hba;
678 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
679 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
680 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
681 sas_phy->class = SAS;
682 sas_phy->iproto = SAS_PROTOCOL_ALL;
684 sas_phy->type = PHY_TYPE_PHYSICAL;
685 sas_phy->role = PHY_ROLE_INITIATOR;
686 sas_phy->oob_mode = OOB_NOT_CONNECTED;
687 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
688 sas_phy->id = phy_no;
689 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
690 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
691 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
692 sas_phy->lldd_phy = phy;
694 for (i = 0; i < HISI_PHYES_NUM; i++)
695 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
698 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
700 struct sas_ha_struct *sas_ha = sas_phy->ha;
701 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
702 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
703 struct asd_sas_port *sas_port = sas_phy->port;
704 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
710 spin_lock_irqsave(&hisi_hba->lock, flags);
711 port->port_attached = 1;
712 port->id = phy->port_id;
714 sas_port->lldd_port = port;
715 spin_unlock_irqrestore(&hisi_hba->lock, flags);
718 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
719 struct hisi_sas_slot *slot)
723 struct task_status_struct *ts;
725 ts = &task->task_status;
727 ts->resp = SAS_TASK_COMPLETE;
728 ts->stat = SAS_ABORTED_TASK;
729 spin_lock_irqsave(&task->task_state_lock, flags);
730 task->task_state_flags &=
731 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
732 task->task_state_flags |= SAS_TASK_STATE_DONE;
733 spin_unlock_irqrestore(&task->task_state_lock, flags);
736 hisi_sas_slot_task_free(hisi_hba, task, slot);
739 /* hisi_hba.lock should be locked */
740 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
741 struct domain_device *device)
743 struct hisi_sas_slot *slot, *slot2;
744 struct hisi_sas_device *sas_dev = device->lldd_dev;
746 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
747 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
750 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
752 struct hisi_sas_device *sas_dev;
753 struct domain_device *device;
756 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
757 sas_dev = &hisi_hba->devices[i];
758 device = sas_dev->sas_device;
760 if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
764 hisi_sas_release_task(hisi_hba, device);
767 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
769 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
770 struct domain_device *device)
772 if (hisi_hba->hw->dereg_device)
773 hisi_hba->hw->dereg_device(hisi_hba, device);
776 static void hisi_sas_dev_gone(struct domain_device *device)
778 struct hisi_sas_device *sas_dev = device->lldd_dev;
779 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
780 struct device *dev = hisi_hba->dev;
782 dev_info(dev, "dev[%d:%x] is gone\n",
783 sas_dev->device_id, sas_dev->dev_type);
785 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
786 hisi_sas_internal_task_abort(hisi_hba, device,
787 HISI_SAS_INT_ABT_DEV, 0);
789 hisi_sas_dereg_device(hisi_hba, device);
791 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
792 device->lldd_dev = NULL;
795 if (hisi_hba->hw->free_device)
796 hisi_hba->hw->free_device(sas_dev);
797 sas_dev->dev_type = SAS_PHY_UNUSED;
800 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
802 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
805 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
808 struct sas_ha_struct *sas_ha = sas_phy->ha;
809 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
810 int phy_no = sas_phy->id;
813 case PHY_FUNC_HARD_RESET:
814 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
817 case PHY_FUNC_LINK_RESET:
818 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
820 hisi_hba->hw->phy_start(hisi_hba, phy_no);
823 case PHY_FUNC_DISABLE:
824 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
827 case PHY_FUNC_SET_LINK_RATE:
828 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
830 case PHY_FUNC_GET_EVENTS:
831 if (hisi_hba->hw->get_events) {
832 hisi_hba->hw->get_events(hisi_hba, phy_no);
836 case PHY_FUNC_RELEASE_SPINUP_HOLD:
843 static void hisi_sas_task_done(struct sas_task *task)
845 if (!del_timer(&task->slow_task->timer))
847 complete(&task->slow_task->completion);
850 static void hisi_sas_tmf_timedout(struct timer_list *t)
852 struct sas_task_slow *slow = from_timer(slow, t, timer);
853 struct sas_task *task = slow->task;
856 spin_lock_irqsave(&task->task_state_lock, flags);
857 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
858 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
859 spin_unlock_irqrestore(&task->task_state_lock, flags);
861 complete(&task->slow_task->completion);
864 #define TASK_TIMEOUT 20
866 #define INTERNAL_ABORT_TIMEOUT 6
867 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
868 void *parameter, u32 para_len,
869 struct hisi_sas_tmf_task *tmf)
871 struct hisi_sas_device *sas_dev = device->lldd_dev;
872 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
873 struct device *dev = hisi_hba->dev;
874 struct sas_task *task;
877 for (retry = 0; retry < TASK_RETRY; retry++) {
878 task = sas_alloc_slow_task(GFP_KERNEL);
883 task->task_proto = device->tproto;
885 if (dev_is_sata(device)) {
886 task->ata_task.device_control_reg_update = 1;
887 memcpy(&task->ata_task.fis, parameter, para_len);
889 memcpy(&task->ssp_task, parameter, para_len);
891 task->task_done = hisi_sas_task_done;
893 task->slow_task->timer.function = hisi_sas_tmf_timedout;
894 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
895 add_timer(&task->slow_task->timer);
897 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
900 del_timer(&task->slow_task->timer);
901 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
906 wait_for_completion(&task->slow_task->completion);
907 res = TMF_RESP_FUNC_FAILED;
908 /* Even TMF timed out, return direct. */
909 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
910 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
911 struct hisi_sas_slot *slot = task->lldd_task;
913 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
919 dev_err(dev, "abort tmf: TMF task timeout\n");
922 if (task->task_status.resp == SAS_TASK_COMPLETE &&
923 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
924 res = TMF_RESP_FUNC_COMPLETE;
928 if (task->task_status.resp == SAS_TASK_COMPLETE &&
929 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
930 res = TMF_RESP_FUNC_SUCC;
934 if (task->task_status.resp == SAS_TASK_COMPLETE &&
935 task->task_status.stat == SAS_DATA_UNDERRUN) {
936 /* no error, but return the number of bytes of
939 dev_warn(dev, "abort tmf: task to dev %016llx "
940 "resp: 0x%x sts 0x%x underrun\n",
941 SAS_ADDR(device->sas_addr),
942 task->task_status.resp,
943 task->task_status.stat);
944 res = task->task_status.residual;
948 if (task->task_status.resp == SAS_TASK_COMPLETE &&
949 task->task_status.stat == SAS_DATA_OVERRUN) {
950 dev_warn(dev, "abort tmf: blocked task error\n");
955 dev_warn(dev, "abort tmf: task to dev "
956 "%016llx resp: 0x%x status 0x%x\n",
957 SAS_ADDR(device->sas_addr), task->task_status.resp,
958 task->task_status.stat);
963 if (retry == TASK_RETRY)
964 dev_warn(dev, "abort tmf: executing internal task failed!\n");
969 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
970 bool reset, int pmp, u8 *fis)
972 struct ata_taskfile tf;
974 ata_tf_init(dev, &tf);
979 tf.command = ATA_CMD_DEV_RESET;
980 ata_tf_to_fis(&tf, pmp, 0, fis);
983 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
986 struct ata_port *ap = device->sata_dev.ap;
987 struct ata_link *link;
988 int rc = TMF_RESP_FUNC_FAILED;
989 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
990 struct device *dev = hisi_hba->dev;
991 int s = sizeof(struct host_to_dev_fis);
994 ata_for_each_link(link, ap, EDGE) {
995 int pmp = sata_srst_pmp(link);
997 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
998 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
999 if (rc != TMF_RESP_FUNC_COMPLETE)
1003 if (rc == TMF_RESP_FUNC_COMPLETE) {
1004 ata_for_each_link(link, ap, EDGE) {
1005 int pmp = sata_srst_pmp(link);
1007 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1008 rc = hisi_sas_exec_internal_tmf_task(device, fis,
1010 if (rc != TMF_RESP_FUNC_COMPLETE)
1011 dev_err(dev, "ata disk de-reset failed\n");
1014 dev_err(dev, "ata disk reset failed\n");
1017 if (rc == TMF_RESP_FUNC_COMPLETE) {
1018 spin_lock_irqsave(&hisi_hba->lock, flags);
1019 hisi_sas_release_task(hisi_hba, device);
1020 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1026 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1027 u8 *lun, struct hisi_sas_tmf_task *tmf)
1029 struct sas_ssp_task ssp_task;
1031 if (!(device->tproto & SAS_PROTOCOL_SSP))
1032 return TMF_RESP_FUNC_ESUPP;
1034 memcpy(ssp_task.LUN, lun, 8);
1036 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1037 sizeof(ssp_task), tmf);
1040 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1042 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1045 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1046 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1047 struct domain_device *device = sas_dev->sas_device;
1048 struct asd_sas_port *sas_port;
1049 struct hisi_sas_port *port;
1050 struct hisi_sas_phy *phy = NULL;
1051 struct asd_sas_phy *sas_phy;
1053 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1054 || !device || !device->port)
1057 sas_port = device->port;
1058 port = to_hisi_sas_port(sas_port);
1060 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1061 if (state & BIT(sas_phy->id)) {
1062 phy = sas_phy->lldd_phy;
1067 port->id = phy->port_id;
1069 /* Update linkrate of directly attached device. */
1070 if (!device->parent)
1071 device->linkrate = phy->sas_phy.linkrate;
1073 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1079 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1082 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1083 struct asd_sas_port *_sas_port = NULL;
1086 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1087 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1088 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1089 struct asd_sas_port *sas_port = sas_phy->port;
1090 bool do_port_check = !!(_sas_port != sas_port);
1092 if (!sas_phy->phy->enabled)
1095 /* Report PHY state change to libsas */
1096 if (state & BIT(phy_no)) {
1097 if (do_port_check && sas_port && sas_port->port_dev) {
1098 struct domain_device *dev = sas_port->port_dev;
1100 _sas_port = sas_port;
1102 if (DEV_IS_EXPANDER(dev->dev_type))
1103 sas_ha->notify_port_event(sas_phy,
1104 PORTE_BROADCAST_RCVD);
1106 } else if (old_state & (1 << phy_no))
1107 /* PHY down but was up before */
1108 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1113 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1115 struct device *dev = hisi_hba->dev;
1116 struct Scsi_Host *shost = hisi_hba->shost;
1117 u32 old_state, state;
1118 unsigned long flags;
1121 if (!hisi_hba->hw->soft_reset)
1124 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1127 dev_info(dev, "controller resetting...\n");
1128 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1130 scsi_block_requests(shost);
1131 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1132 rc = hisi_hba->hw->soft_reset(hisi_hba);
1134 dev_warn(dev, "controller reset failed (%d)\n", rc);
1135 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1136 scsi_unblock_requests(shost);
1139 spin_lock_irqsave(&hisi_hba->lock, flags);
1140 hisi_sas_release_tasks(hisi_hba);
1141 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1143 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1145 /* Init and wait for PHYs to come up and all libsas event finished. */
1146 hisi_hba->hw->phys_init(hisi_hba);
1148 hisi_sas_refresh_port_id(hisi_hba);
1149 scsi_unblock_requests(shost);
1151 state = hisi_hba->hw->get_phys_state(hisi_hba);
1152 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1153 dev_info(dev, "controller reset complete\n");
1156 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1161 static int hisi_sas_abort_task(struct sas_task *task)
1163 struct scsi_lun lun;
1164 struct hisi_sas_tmf_task tmf_task;
1165 struct domain_device *device = task->dev;
1166 struct hisi_sas_device *sas_dev = device->lldd_dev;
1167 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1168 struct device *dev = hisi_hba->dev;
1169 int rc = TMF_RESP_FUNC_FAILED;
1170 unsigned long flags;
1173 dev_warn(dev, "Device has been removed\n");
1174 return TMF_RESP_FUNC_FAILED;
1177 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1178 rc = TMF_RESP_FUNC_COMPLETE;
1182 sas_dev->dev_status = HISI_SAS_DEV_EH;
1183 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1184 struct scsi_cmnd *cmnd = task->uldd_task;
1185 struct hisi_sas_slot *slot = task->lldd_task;
1186 u32 tag = slot->idx;
1189 int_to_scsilun(cmnd->device->lun, &lun);
1190 tmf_task.tmf = TMF_ABORT_TASK;
1191 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1193 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1196 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1197 HISI_SAS_INT_ABT_CMD, tag);
1199 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1200 return TMF_RESP_FUNC_FAILED;
1204 * If the TMF finds that the IO is not in the device and also
1205 * the internal abort does not succeed, then it is safe to
1207 * Note: if the internal abort succeeds then the slot
1208 * will have already been completed
1210 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1211 if (task->lldd_task) {
1212 spin_lock_irqsave(&hisi_hba->lock, flags);
1213 hisi_sas_do_release_task(hisi_hba, task, slot);
1214 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1217 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1218 task->task_proto & SAS_PROTOCOL_STP) {
1219 if (task->dev->dev_type == SAS_SATA_DEV) {
1220 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1221 HISI_SAS_INT_ABT_DEV, 0);
1223 dev_err(dev, "abort task: internal abort failed\n");
1226 hisi_sas_dereg_device(hisi_hba, device);
1227 rc = hisi_sas_softreset_ata_disk(device);
1229 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1231 struct hisi_sas_slot *slot = task->lldd_task;
1232 u32 tag = slot->idx;
1234 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1235 HISI_SAS_INT_ABT_CMD, tag);
1236 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1238 spin_lock_irqsave(&hisi_hba->lock, flags);
1239 hisi_sas_do_release_task(hisi_hba, task, slot);
1240 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1245 if (rc != TMF_RESP_FUNC_COMPLETE)
1246 dev_notice(dev, "abort task: rc=%d\n", rc);
1250 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1252 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1253 struct device *dev = hisi_hba->dev;
1254 struct hisi_sas_tmf_task tmf_task;
1255 int rc = TMF_RESP_FUNC_FAILED;
1256 unsigned long flags;
1258 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1259 HISI_SAS_INT_ABT_DEV, 0);
1261 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1262 return TMF_RESP_FUNC_FAILED;
1264 hisi_sas_dereg_device(hisi_hba, device);
1266 tmf_task.tmf = TMF_ABORT_TASK_SET;
1267 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1269 if (rc == TMF_RESP_FUNC_COMPLETE) {
1270 spin_lock_irqsave(&hisi_hba->lock, flags);
1271 hisi_sas_release_task(hisi_hba, device);
1272 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1278 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1280 int rc = TMF_RESP_FUNC_FAILED;
1281 struct hisi_sas_tmf_task tmf_task;
1283 tmf_task.tmf = TMF_CLEAR_ACA;
1284 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1289 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1291 struct sas_phy *phy = sas_get_local_phy(device);
1292 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1293 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1294 rc = sas_phy_reset(phy, reset_type);
1295 sas_put_local_phy(phy);
1300 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1302 struct hisi_sas_device *sas_dev = device->lldd_dev;
1303 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1304 struct device *dev = hisi_hba->dev;
1305 int rc = TMF_RESP_FUNC_FAILED;
1306 unsigned long flags;
1308 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1309 return TMF_RESP_FUNC_FAILED;
1310 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1312 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1313 HISI_SAS_INT_ABT_DEV, 0);
1315 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1316 return TMF_RESP_FUNC_FAILED;
1318 hisi_sas_dereg_device(hisi_hba, device);
1320 rc = hisi_sas_debug_I_T_nexus_reset(device);
1322 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1323 spin_lock_irqsave(&hisi_hba->lock, flags);
1324 hisi_sas_release_task(hisi_hba, device);
1325 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1330 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1332 struct hisi_sas_device *sas_dev = device->lldd_dev;
1333 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1334 struct device *dev = hisi_hba->dev;
1335 unsigned long flags;
1336 int rc = TMF_RESP_FUNC_FAILED;
1338 sas_dev->dev_status = HISI_SAS_DEV_EH;
1339 if (dev_is_sata(device)) {
1340 struct sas_phy *phy;
1342 /* Clear internal IO and then hardreset */
1343 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1344 HISI_SAS_INT_ABT_DEV, 0);
1346 dev_err(dev, "lu_reset: internal abort failed\n");
1349 hisi_sas_dereg_device(hisi_hba, device);
1351 phy = sas_get_local_phy(device);
1353 rc = sas_phy_reset(phy, 1);
1356 spin_lock_irqsave(&hisi_hba->lock, flags);
1357 hisi_sas_release_task(hisi_hba, device);
1358 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1360 sas_put_local_phy(phy);
1362 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1364 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1365 HISI_SAS_INT_ABT_DEV, 0);
1367 dev_err(dev, "lu_reset: internal abort failed\n");
1370 hisi_sas_dereg_device(hisi_hba, device);
1372 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1373 if (rc == TMF_RESP_FUNC_COMPLETE) {
1374 spin_lock_irqsave(&hisi_hba->lock, flags);
1375 hisi_sas_release_task(hisi_hba, device);
1376 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1380 if (rc != TMF_RESP_FUNC_COMPLETE)
1381 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1382 sas_dev->device_id, rc);
1386 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1388 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1389 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1391 queue_work(hisi_hba->wq, &r.work);
1392 wait_for_completion(r.completion);
1394 return TMF_RESP_FUNC_COMPLETE;
1396 return TMF_RESP_FUNC_FAILED;
1399 static int hisi_sas_query_task(struct sas_task *task)
1401 struct scsi_lun lun;
1402 struct hisi_sas_tmf_task tmf_task;
1403 int rc = TMF_RESP_FUNC_FAILED;
1405 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1406 struct scsi_cmnd *cmnd = task->uldd_task;
1407 struct domain_device *device = task->dev;
1408 struct hisi_sas_slot *slot = task->lldd_task;
1409 u32 tag = slot->idx;
1411 int_to_scsilun(cmnd->device->lun, &lun);
1412 tmf_task.tmf = TMF_QUERY_TASK;
1413 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1415 rc = hisi_sas_debug_issue_ssp_tmf(device,
1419 /* The task is still in Lun, release it then */
1420 case TMF_RESP_FUNC_SUCC:
1421 /* The task is not in Lun or failed, reset the phy */
1422 case TMF_RESP_FUNC_FAILED:
1423 case TMF_RESP_FUNC_COMPLETE:
1426 rc = TMF_RESP_FUNC_FAILED;
1434 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1435 struct sas_task *task, int abort_flag,
1438 struct domain_device *device = task->dev;
1439 struct hisi_sas_device *sas_dev = device->lldd_dev;
1440 struct device *dev = hisi_hba->dev;
1441 struct hisi_sas_port *port;
1442 struct hisi_sas_slot *slot;
1443 struct asd_sas_port *sas_port = device->port;
1444 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1445 struct hisi_sas_dq *dq = sas_dev->dq;
1446 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1447 unsigned long flags, flags_dq;
1449 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1455 port = to_hisi_sas_port(sas_port);
1457 /* simply get a slot and send abort command */
1458 spin_lock_irqsave(&hisi_hba->lock, flags);
1459 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1461 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1464 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1466 spin_lock_irqsave(&dq->lock, flags_dq);
1467 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1471 dlvry_queue = dq->id;
1472 dlvry_queue_slot = dq->wr_point;
1474 slot = &hisi_hba->slot_info[slot_idx];
1475 memset(slot, 0, sizeof(struct hisi_sas_slot));
1477 slot->idx = slot_idx;
1478 slot->n_elem = n_elem;
1479 slot->dlvry_queue = dlvry_queue;
1480 slot->dlvry_queue_slot = dlvry_queue_slot;
1481 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1482 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1485 task->lldd_task = slot;
1487 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1488 GFP_ATOMIC, &slot->buf_dma);
1494 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1495 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1496 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1498 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1499 abort_flag, task_tag);
1503 spin_lock_irqsave(&hisi_hba->lock, flags);
1504 list_add_tail(&slot->entry, &sas_dev->list);
1505 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1506 spin_lock_irqsave(&task->task_state_lock, flags);
1507 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1508 spin_unlock_irqrestore(&task->task_state_lock, flags);
1510 dq->slot_prep = slot;
1512 /* send abort command to the chip */
1513 hisi_hba->hw->start_delivery(dq);
1514 spin_unlock_irqrestore(&dq->lock, flags_dq);
1519 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1522 spin_lock_irqsave(&hisi_hba->lock, flags);
1523 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1524 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1525 spin_unlock_irqrestore(&dq->lock, flags_dq);
1527 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1533 * hisi_sas_internal_task_abort -- execute an internal
1534 * abort command for single IO command or a device
1535 * @hisi_hba: host controller struct
1536 * @device: domain device
1537 * @abort_flag: mode of operation, device or single IO
1538 * @tag: tag of IO to be aborted (only relevant to single
1542 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1543 struct domain_device *device,
1544 int abort_flag, int tag)
1546 struct sas_task *task;
1547 struct hisi_sas_device *sas_dev = device->lldd_dev;
1548 struct device *dev = hisi_hba->dev;
1552 * The interface is not realized means this HW don't support internal
1553 * abort, or don't need to do internal abort. Then here, we return
1554 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1555 * the internal abort has been executed and returned CQ.
1557 if (!hisi_hba->hw->prep_abort)
1558 return TMF_RESP_FUNC_FAILED;
1560 task = sas_alloc_slow_task(GFP_KERNEL);
1565 task->task_proto = device->tproto;
1566 task->task_done = hisi_sas_task_done;
1567 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1568 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1569 add_timer(&task->slow_task->timer);
1571 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1572 task, abort_flag, tag);
1574 del_timer(&task->slow_task->timer);
1575 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1579 wait_for_completion(&task->slow_task->completion);
1580 res = TMF_RESP_FUNC_FAILED;
1582 /* Internal abort timed out */
1583 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1584 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1585 struct hisi_sas_slot *slot = task->lldd_task;
1589 dev_err(dev, "internal task abort: timeout and not done.\n");
1593 dev_err(dev, "internal task abort: timeout.\n");
1596 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1597 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1598 res = TMF_RESP_FUNC_COMPLETE;
1602 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1603 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1604 res = TMF_RESP_FUNC_SUCC;
1609 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1610 "resp: 0x%x sts 0x%x\n",
1611 SAS_ADDR(device->sas_addr),
1613 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1614 task->task_status.stat);
1615 sas_free_task(task);
1620 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1622 hisi_sas_port_notify_formed(sas_phy);
1625 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1629 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1630 u8 reg_index, u8 reg_count, u8 *write_data)
1632 struct hisi_hba *hisi_hba = sha->lldd_ha;
1634 if (!hisi_hba->hw->write_gpio)
1637 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1638 reg_index, reg_count, write_data);
1641 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1643 phy->phy_attached = 0;
1648 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1650 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1651 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1652 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1655 /* Phy down but ready */
1656 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1657 hisi_sas_port_notify_formed(sas_phy);
1659 struct hisi_sas_port *port = phy->port;
1661 /* Phy down and not ready */
1662 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1663 sas_phy_disconnected(sas_phy);
1666 if (phy->phy_type & PORT_TYPE_SAS) {
1667 int port_id = port->id;
1669 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1671 port->port_attached = 0;
1672 } else if (phy->phy_type & PORT_TYPE_SATA)
1673 port->port_attached = 0;
1675 hisi_sas_phy_disconnected(phy);
1678 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1680 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1684 for (i = 0; i < hisi_hba->queue_count; i++) {
1685 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1687 tasklet_kill(&cq->tasklet);
1690 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1692 struct scsi_transport_template *hisi_sas_stt;
1693 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1695 static struct device_attribute *host_attrs[] = {
1696 &dev_attr_phy_event_threshold,
1700 static struct scsi_host_template _hisi_sas_sht = {
1701 .module = THIS_MODULE,
1703 .queuecommand = sas_queuecommand,
1704 .target_alloc = sas_target_alloc,
1705 .slave_configure = hisi_sas_slave_configure,
1706 .scan_finished = hisi_sas_scan_finished,
1707 .scan_start = hisi_sas_scan_start,
1708 .change_queue_depth = sas_change_queue_depth,
1709 .bios_param = sas_bios_param,
1712 .sg_tablesize = SG_ALL,
1713 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1714 .use_clustering = ENABLE_CLUSTERING,
1715 .eh_device_reset_handler = sas_eh_device_reset_handler,
1716 .eh_target_reset_handler = sas_eh_target_reset_handler,
1717 .target_destroy = sas_target_destroy,
1719 .shost_attrs = host_attrs,
1721 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1722 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1724 static struct sas_domain_function_template hisi_sas_transport_ops = {
1725 .lldd_dev_found = hisi_sas_dev_found,
1726 .lldd_dev_gone = hisi_sas_dev_gone,
1727 .lldd_execute_task = hisi_sas_queue_command,
1728 .lldd_control_phy = hisi_sas_control_phy,
1729 .lldd_abort_task = hisi_sas_abort_task,
1730 .lldd_abort_task_set = hisi_sas_abort_task_set,
1731 .lldd_clear_aca = hisi_sas_clear_aca,
1732 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1733 .lldd_lu_reset = hisi_sas_lu_reset,
1734 .lldd_query_task = hisi_sas_query_task,
1735 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1736 .lldd_port_formed = hisi_sas_port_formed,
1737 .lldd_port_deformed = hisi_sas_port_deformed,
1738 .lldd_write_gpio = hisi_sas_write_gpio,
1741 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1743 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1745 for (i = 0; i < hisi_hba->queue_count; i++) {
1746 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1747 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1749 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1750 memset(hisi_hba->cmd_hdr[i], 0, s);
1753 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1754 memset(hisi_hba->complete_hdr[i], 0, s);
1758 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1759 memset(hisi_hba->initial_fis, 0, s);
1761 s = max_command_entries * sizeof(struct hisi_sas_iost);
1762 memset(hisi_hba->iost, 0, s);
1764 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1765 memset(hisi_hba->breakpoint, 0, s);
1767 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1768 memset(hisi_hba->sata_breakpoint, 0, s);
1770 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1772 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1774 struct device *dev = hisi_hba->dev;
1775 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1777 spin_lock_init(&hisi_hba->lock);
1778 for (i = 0; i < hisi_hba->n_phy; i++) {
1779 hisi_sas_phy_init(hisi_hba, i);
1780 hisi_hba->port[i].port_attached = 0;
1781 hisi_hba->port[i].id = -1;
1784 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1785 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1786 hisi_hba->devices[i].device_id = i;
1787 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1790 for (i = 0; i < hisi_hba->queue_count; i++) {
1791 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1792 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1794 /* Completion queue structure */
1796 cq->hisi_hba = hisi_hba;
1798 /* Delivery queue structure */
1799 spin_lock_init(&dq->lock);
1801 dq->hisi_hba = hisi_hba;
1803 /* Delivery queue */
1804 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1805 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1806 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1807 if (!hisi_hba->cmd_hdr[i])
1810 /* Completion queue */
1811 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1812 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1813 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1814 if (!hisi_hba->complete_hdr[i])
1818 s = sizeof(struct hisi_sas_slot_buf_table);
1819 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1820 if (!hisi_hba->buffer_pool)
1823 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1824 hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
1826 if (!hisi_hba->itct)
1829 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1830 sizeof(struct hisi_sas_slot),
1832 if (!hisi_hba->slot_info)
1835 s = max_command_entries * sizeof(struct hisi_sas_iost);
1836 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1838 if (!hisi_hba->iost)
1841 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1842 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1843 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1844 if (!hisi_hba->breakpoint)
1847 hisi_hba->slot_index_count = max_command_entries;
1848 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1849 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1850 if (!hisi_hba->slot_index_tags)
1853 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1854 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1855 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1856 if (!hisi_hba->initial_fis)
1859 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1860 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1861 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1862 if (!hisi_hba->sata_breakpoint)
1864 hisi_sas_init_mem(hisi_hba);
1866 hisi_sas_slot_index_init(hisi_hba);
1868 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1869 if (!hisi_hba->wq) {
1870 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1878 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1880 void hisi_sas_free(struct hisi_hba *hisi_hba)
1882 struct device *dev = hisi_hba->dev;
1883 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1885 for (i = 0; i < hisi_hba->queue_count; i++) {
1886 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1887 if (hisi_hba->cmd_hdr[i])
1888 dma_free_coherent(dev, s,
1889 hisi_hba->cmd_hdr[i],
1890 hisi_hba->cmd_hdr_dma[i]);
1892 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1893 if (hisi_hba->complete_hdr[i])
1894 dma_free_coherent(dev, s,
1895 hisi_hba->complete_hdr[i],
1896 hisi_hba->complete_hdr_dma[i]);
1899 dma_pool_destroy(hisi_hba->buffer_pool);
1901 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1903 dma_free_coherent(dev, s,
1904 hisi_hba->itct, hisi_hba->itct_dma);
1906 s = max_command_entries * sizeof(struct hisi_sas_iost);
1908 dma_free_coherent(dev, s,
1909 hisi_hba->iost, hisi_hba->iost_dma);
1911 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1912 if (hisi_hba->breakpoint)
1913 dma_free_coherent(dev, s,
1914 hisi_hba->breakpoint,
1915 hisi_hba->breakpoint_dma);
1918 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1919 if (hisi_hba->initial_fis)
1920 dma_free_coherent(dev, s,
1921 hisi_hba->initial_fis,
1922 hisi_hba->initial_fis_dma);
1924 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1925 if (hisi_hba->sata_breakpoint)
1926 dma_free_coherent(dev, s,
1927 hisi_hba->sata_breakpoint,
1928 hisi_hba->sata_breakpoint_dma);
1931 destroy_workqueue(hisi_hba->wq);
1933 EXPORT_SYMBOL_GPL(hisi_sas_free);
1935 void hisi_sas_rst_work_handler(struct work_struct *work)
1937 struct hisi_hba *hisi_hba =
1938 container_of(work, struct hisi_hba, rst_work);
1940 hisi_sas_controller_reset(hisi_hba);
1942 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1944 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1946 struct hisi_sas_rst *rst =
1947 container_of(work, struct hisi_sas_rst, work);
1949 if (!hisi_sas_controller_reset(rst->hisi_hba))
1951 complete(rst->completion);
1953 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1955 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1957 struct device *dev = hisi_hba->dev;
1958 struct platform_device *pdev = hisi_hba->platform_dev;
1959 struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1962 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1964 dev_err(dev, "could not get property sas-addr\n");
1970 * These properties are only required for platform device-based
1971 * controller with DT firmware.
1973 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1974 "hisilicon,sas-syscon");
1975 if (IS_ERR(hisi_hba->ctrl)) {
1976 dev_err(dev, "could not get syscon\n");
1980 if (device_property_read_u32(dev, "ctrl-reset-reg",
1981 &hisi_hba->ctrl_reset_reg)) {
1983 "could not get property ctrl-reset-reg\n");
1987 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1988 &hisi_hba->ctrl_reset_sts_reg)) {
1990 "could not get property ctrl-reset-sts-reg\n");
1994 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1995 &hisi_hba->ctrl_clock_ena_reg)) {
1997 "could not get property ctrl-clock-ena-reg\n");
2002 refclk = devm_clk_get(dev, NULL);
2004 dev_dbg(dev, "no ref clk property\n");
2006 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2008 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2009 dev_err(dev, "could not get property phy-count\n");
2013 if (device_property_read_u32(dev, "queue-count",
2014 &hisi_hba->queue_count)) {
2015 dev_err(dev, "could not get property queue-count\n");
2021 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2023 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2024 const struct hisi_sas_hw *hw)
2026 struct resource *res;
2027 struct Scsi_Host *shost;
2028 struct hisi_hba *hisi_hba;
2029 struct device *dev = &pdev->dev;
2031 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
2033 dev_err(dev, "scsi host alloc failed\n");
2036 hisi_hba = shost_priv(shost);
2038 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2040 hisi_hba->dev = dev;
2041 hisi_hba->platform_dev = pdev;
2042 hisi_hba->shost = shost;
2043 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2045 timer_setup(&hisi_hba->timer, NULL, 0);
2047 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2050 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2051 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2052 dev_err(dev, "No usable DMA addressing method\n");
2056 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2057 hisi_hba->regs = devm_ioremap_resource(dev, res);
2058 if (IS_ERR(hisi_hba->regs))
2061 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2063 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2064 if (IS_ERR(hisi_hba->sgpio_regs))
2068 if (hisi_sas_alloc(hisi_hba, shost)) {
2069 hisi_sas_free(hisi_hba);
2075 scsi_host_put(shost);
2076 dev_err(dev, "shost alloc failed\n");
2080 int hisi_sas_probe(struct platform_device *pdev,
2081 const struct hisi_sas_hw *hw)
2083 struct Scsi_Host *shost;
2084 struct hisi_hba *hisi_hba;
2085 struct device *dev = &pdev->dev;
2086 struct asd_sas_phy **arr_phy;
2087 struct asd_sas_port **arr_port;
2088 struct sas_ha_struct *sha;
2089 int rc, phy_nr, port_nr, i;
2091 shost = hisi_sas_shost_alloc(pdev, hw);
2095 sha = SHOST_TO_SAS_HA(shost);
2096 hisi_hba = shost_priv(shost);
2097 platform_set_drvdata(pdev, sha);
2099 phy_nr = port_nr = hisi_hba->n_phy;
2101 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2102 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2103 if (!arr_phy || !arr_port) {
2108 sha->sas_phy = arr_phy;
2109 sha->sas_port = arr_port;
2110 sha->lldd_ha = hisi_hba;
2112 shost->transportt = hisi_sas_stt;
2113 shost->max_id = HISI_SAS_MAX_DEVICES;
2114 shost->max_lun = ~0;
2115 shost->max_channel = 1;
2116 shost->max_cmd_len = 16;
2117 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2118 shost->can_queue = hisi_hba->hw->max_command_entries;
2119 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2121 sha->sas_ha_name = DRV_NAME;
2122 sha->dev = hisi_hba->dev;
2123 sha->lldd_module = THIS_MODULE;
2124 sha->sas_addr = &hisi_hba->sas_addr[0];
2125 sha->num_phys = hisi_hba->n_phy;
2126 sha->core.shost = hisi_hba->shost;
2128 for (i = 0; i < hisi_hba->n_phy; i++) {
2129 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2130 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2133 rc = scsi_add_host(shost, &pdev->dev);
2137 rc = sas_register_ha(sha);
2139 goto err_out_register_ha;
2141 rc = hisi_hba->hw->hw_init(hisi_hba);
2143 goto err_out_register_ha;
2145 scsi_scan_host(shost);
2149 err_out_register_ha:
2150 scsi_remove_host(shost);
2152 hisi_sas_free(hisi_hba);
2153 scsi_host_put(shost);
2156 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2158 int hisi_sas_remove(struct platform_device *pdev)
2160 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2161 struct hisi_hba *hisi_hba = sha->lldd_ha;
2162 struct Scsi_Host *shost = sha->core.shost;
2164 if (timer_pending(&hisi_hba->timer))
2165 del_timer(&hisi_hba->timer);
2167 sas_unregister_ha(sha);
2168 sas_remove_host(sha->core.shost);
2170 hisi_sas_free(hisi_hba);
2171 scsi_host_put(shost);
2174 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2176 static __init int hisi_sas_init(void)
2178 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2185 static __exit void hisi_sas_exit(void)
2187 sas_release_transport(hisi_sas_stt);
2190 module_init(hisi_sas_init);
2191 module_exit(hisi_sas_exit);
2193 MODULE_LICENSE("GPL");
2194 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2195 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2196 MODULE_ALIAS("platform:" DRV_NAME);