]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/ufs/ufshcd.c
scsi: ufs: Unlock on a couple error paths
[linux.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
43 #include <linux/of.h>
44 #include <linux/bitfield.h>
45 #include "ufshcd.h"
46 #include "ufs_quirks.h"
47 #include "unipro.h"
48 #include "ufs-sysfs.h"
49 #include "ufs_bsg.h"
50
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/ufs.h>
53
54 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
55                                  UTP_TASK_REQ_COMPL |\
56                                  UFSHCD_ERROR_MASK)
57 /* UIC command timeout, unit: ms */
58 #define UIC_CMD_TIMEOUT 500
59
60 /* NOP OUT retries waiting for NOP IN response */
61 #define NOP_OUT_RETRIES    10
62 /* Timeout after 30 msecs if NOP OUT hangs without response */
63 #define NOP_OUT_TIMEOUT    30 /* msecs */
64
65 /* Query request retries */
66 #define QUERY_REQ_RETRIES 3
67 /* Query request timeout */
68 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
69
70 /* Task management command timeout */
71 #define TM_CMD_TIMEOUT  100 /* msecs */
72
73 /* maximum number of retries for a general UIC command  */
74 #define UFS_UIC_COMMAND_RETRIES 3
75
76 /* maximum number of link-startup retries */
77 #define DME_LINKSTARTUP_RETRIES 3
78
79 /* Maximum retries for Hibern8 enter */
80 #define UIC_HIBERN8_ENTER_RETRIES 3
81
82 /* maximum number of reset retries before giving up */
83 #define MAX_HOST_RESET_RETRIES 5
84
85 /* Expose the flag value from utp_upiu_query.value */
86 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
88 /* Interrupt aggregation default timeout, unit: 40us */
89 #define INT_AGGR_DEF_TO 0x02
90
91 /* default delay of autosuspend: 2000 ms */
92 #define RPM_AUTOSUSPEND_DELAY_MS 2000
93
94 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
95         ({                                                              \
96                 int _ret;                                               \
97                 if (_on)                                                \
98                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
99                 else                                                    \
100                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
101                 _ret;                                                   \
102         })
103
104 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
105         size_t __len = (len);                                            \
106         print_hex_dump(KERN_ERR, prefix_str,                             \
107                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
108                        16, 4, buf, __len, false);                        \
109 } while (0)
110
111 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
112                      const char *prefix)
113 {
114         u32 *regs;
115         size_t pos;
116
117         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
118                 return -EINVAL;
119
120         regs = kzalloc(len, GFP_ATOMIC);
121         if (!regs)
122                 return -ENOMEM;
123
124         for (pos = 0; pos < len; pos += 4)
125                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
126
127         ufshcd_hex_dump(prefix, regs, len);
128         kfree(regs);
129
130         return 0;
131 }
132 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
133
134 enum {
135         UFSHCD_MAX_CHANNEL      = 0,
136         UFSHCD_MAX_ID           = 1,
137         UFSHCD_CMD_PER_LUN      = 32,
138         UFSHCD_CAN_QUEUE        = 32,
139 };
140
141 /* UFSHCD states */
142 enum {
143         UFSHCD_STATE_RESET,
144         UFSHCD_STATE_ERROR,
145         UFSHCD_STATE_OPERATIONAL,
146         UFSHCD_STATE_EH_SCHEDULED,
147 };
148
149 /* UFSHCD error handling flags */
150 enum {
151         UFSHCD_EH_IN_PROGRESS = (1 << 0),
152 };
153
154 /* UFSHCD UIC layer error flags */
155 enum {
156         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
157         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
158         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
159         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
160         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
161         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
162 };
163
164 #define ufshcd_set_eh_in_progress(h) \
165         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
166 #define ufshcd_eh_in_progress(h) \
167         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
168 #define ufshcd_clear_eh_in_progress(h) \
169         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
170
171 #define ufshcd_set_ufs_dev_active(h) \
172         ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
173 #define ufshcd_set_ufs_dev_sleep(h) \
174         ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
175 #define ufshcd_set_ufs_dev_poweroff(h) \
176         ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
177 #define ufshcd_is_ufs_dev_active(h) \
178         ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
179 #define ufshcd_is_ufs_dev_sleep(h) \
180         ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
181 #define ufshcd_is_ufs_dev_poweroff(h) \
182         ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
183
184 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
185         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
186         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
187         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
188         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
189         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
190         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
191 };
192
193 static inline enum ufs_dev_pwr_mode
194 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
195 {
196         return ufs_pm_lvl_states[lvl].dev_state;
197 }
198
199 static inline enum uic_link_state
200 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
201 {
202         return ufs_pm_lvl_states[lvl].link_state;
203 }
204
205 static inline enum ufs_pm_level
206 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
207                                         enum uic_link_state link_state)
208 {
209         enum ufs_pm_level lvl;
210
211         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
212                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
213                         (ufs_pm_lvl_states[lvl].link_state == link_state))
214                         return lvl;
215         }
216
217         /* if no match found, return the level 0 */
218         return UFS_PM_LVL_0;
219 }
220
221 static struct ufs_dev_fix ufs_fixups[] = {
222         /* UFS cards deviations table */
223         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
224                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
225         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
226                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
227         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
228                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
229         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
230                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
231         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
232                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
233         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
234                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
235         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
236                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
237         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
238                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
239
240         END_FIX
241 };
242
243 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
244 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
245 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
246 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
247 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
248 static void ufshcd_hba_exit(struct ufs_hba *hba);
249 static int ufshcd_probe_hba(struct ufs_hba *hba);
250 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
251                                  bool skip_ref_clk);
252 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
253 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
254 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
255 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
256 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
257 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
258 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
259 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
260 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
261 static irqreturn_t ufshcd_intr(int irq, void *__hba);
262 static int ufshcd_change_power_mode(struct ufs_hba *hba,
263                              struct ufs_pa_layer_attr *pwr_mode);
264 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
265 {
266         return tag >= 0 && tag < hba->nutrs;
267 }
268
269 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
270 {
271         if (!hba->is_irq_enabled) {
272                 enable_irq(hba->irq);
273                 hba->is_irq_enabled = true;
274         }
275 }
276
277 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
278 {
279         if (hba->is_irq_enabled) {
280                 disable_irq(hba->irq);
281                 hba->is_irq_enabled = false;
282         }
283 }
284
285 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
286 {
287         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
288                 scsi_unblock_requests(hba->host);
289 }
290
291 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
292 {
293         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
294                 scsi_block_requests(hba->host);
295 }
296
297 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
298                 const char *str)
299 {
300         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
301
302         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
303 }
304
305 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
306                 const char *str)
307 {
308         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
309
310         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
311 }
312
313 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
314                 const char *str)
315 {
316         int off = (int)tag - hba->nutrs;
317         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
318
319         trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
320                         &descp->input_param1);
321 }
322
323 static void ufshcd_add_command_trace(struct ufs_hba *hba,
324                 unsigned int tag, const char *str)
325 {
326         sector_t lba = -1;
327         u8 opcode = 0;
328         u32 intr, doorbell;
329         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
330         int transfer_len = -1;
331
332         if (!trace_ufshcd_command_enabled()) {
333                 /* trace UPIU W/O tracing command */
334                 if (lrbp->cmd)
335                         ufshcd_add_cmd_upiu_trace(hba, tag, str);
336                 return;
337         }
338
339         if (lrbp->cmd) { /* data phase exists */
340                 /* trace UPIU also */
341                 ufshcd_add_cmd_upiu_trace(hba, tag, str);
342                 opcode = (u8)(*lrbp->cmd->cmnd);
343                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
344                         /*
345                          * Currently we only fully trace read(10) and write(10)
346                          * commands
347                          */
348                         if (lrbp->cmd->request && lrbp->cmd->request->bio)
349                                 lba =
350                                   lrbp->cmd->request->bio->bi_iter.bi_sector;
351                         transfer_len = be32_to_cpu(
352                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
353                 }
354         }
355
356         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
357         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
358         trace_ufshcd_command(dev_name(hba->dev), str, tag,
359                                 doorbell, transfer_len, intr, lba, opcode);
360 }
361
362 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
363 {
364         struct ufs_clk_info *clki;
365         struct list_head *head = &hba->clk_list_head;
366
367         if (list_empty(head))
368                 return;
369
370         list_for_each_entry(clki, head, list) {
371                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
372                                 clki->max_freq)
373                         dev_err(hba->dev, "clk: %s, rate: %u\n",
374                                         clki->name, clki->curr_freq);
375         }
376 }
377
378 static void ufshcd_print_err_hist(struct ufs_hba *hba,
379                                   struct ufs_err_reg_hist *err_hist,
380                                   char *err_name)
381 {
382         int i;
383         bool found = false;
384
385         for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
386                 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
387
388                 if (err_hist->reg[p] == 0)
389                         continue;
390                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
391                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
392                 found = true;
393         }
394
395         if (!found)
396                 dev_err(hba->dev, "No record of %s errors\n", err_name);
397 }
398
399 static void ufshcd_print_host_regs(struct ufs_hba *hba)
400 {
401         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
402         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
403                 hba->ufs_version, hba->capabilities);
404         dev_err(hba->dev,
405                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
406                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
407         dev_err(hba->dev,
408                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
409                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
410                 hba->ufs_stats.hibern8_exit_cnt);
411
412         ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
413         ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
414         ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
415         ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
416         ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
417         ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
418                               "auto_hibern8_err");
419         ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
420         ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
421                               "link_startup_fail");
422         ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
423         ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
424                               "suspend_fail");
425         ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
426         ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
427         ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
428
429         ufshcd_print_clk_freqs(hba);
430
431         if (hba->vops && hba->vops->dbg_register_dump)
432                 hba->vops->dbg_register_dump(hba);
433 }
434
435 static
436 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
437 {
438         struct ufshcd_lrb *lrbp;
439         int prdt_length;
440         int tag;
441
442         for_each_set_bit(tag, &bitmap, hba->nutrs) {
443                 lrbp = &hba->lrb[tag];
444
445                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
446                                 tag, ktime_to_us(lrbp->issue_time_stamp));
447                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
448                                 tag, ktime_to_us(lrbp->compl_time_stamp));
449                 dev_err(hba->dev,
450                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
451                         tag, (u64)lrbp->utrd_dma_addr);
452
453                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
454                                 sizeof(struct utp_transfer_req_desc));
455                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
456                         (u64)lrbp->ucd_req_dma_addr);
457                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
458                                 sizeof(struct utp_upiu_req));
459                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
460                         (u64)lrbp->ucd_rsp_dma_addr);
461                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
462                                 sizeof(struct utp_upiu_rsp));
463
464                 prdt_length = le16_to_cpu(
465                         lrbp->utr_descriptor_ptr->prd_table_length);
466                 dev_err(hba->dev,
467                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
468                         tag, prdt_length,
469                         (u64)lrbp->ucd_prdt_dma_addr);
470
471                 if (pr_prdt)
472                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
473                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
474         }
475 }
476
477 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
478 {
479         int tag;
480
481         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
482                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
483
484                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
485                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
486         }
487 }
488
489 static void ufshcd_print_host_state(struct ufs_hba *hba)
490 {
491         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
492         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
493                 hba->outstanding_reqs, hba->outstanding_tasks);
494         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
495                 hba->saved_err, hba->saved_uic_err);
496         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
497                 hba->curr_dev_pwr_mode, hba->uic_link_state);
498         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
499                 hba->pm_op_in_progress, hba->is_sys_suspended);
500         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
501                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
502         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
503         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
504                 hba->eh_flags, hba->req_abort_count);
505         dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
506                 hba->capabilities, hba->caps);
507         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
508                 hba->dev_quirks);
509 }
510
511 /**
512  * ufshcd_print_pwr_info - print power params as saved in hba
513  * power info
514  * @hba: per-adapter instance
515  */
516 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
517 {
518         static const char * const names[] = {
519                 "INVALID MODE",
520                 "FAST MODE",
521                 "SLOW_MODE",
522                 "INVALID MODE",
523                 "FASTAUTO_MODE",
524                 "SLOWAUTO_MODE",
525                 "INVALID MODE",
526         };
527
528         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
529                  __func__,
530                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
531                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
532                  names[hba->pwr_info.pwr_rx],
533                  names[hba->pwr_info.pwr_tx],
534                  hba->pwr_info.hs_rate);
535 }
536
537 /*
538  * ufshcd_wait_for_register - wait for register value to change
539  * @hba - per-adapter interface
540  * @reg - mmio register offset
541  * @mask - mask to apply to read register value
542  * @val - wait condition
543  * @interval_us - polling interval in microsecs
544  * @timeout_ms - timeout in millisecs
545  * @can_sleep - perform sleep or just spin
546  *
547  * Returns -ETIMEDOUT on error, zero on success
548  */
549 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
550                                 u32 val, unsigned long interval_us,
551                                 unsigned long timeout_ms, bool can_sleep)
552 {
553         int err = 0;
554         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
555
556         /* ignore bits that we don't intend to wait on */
557         val = val & mask;
558
559         while ((ufshcd_readl(hba, reg) & mask) != val) {
560                 if (can_sleep)
561                         usleep_range(interval_us, interval_us + 50);
562                 else
563                         udelay(interval_us);
564                 if (time_after(jiffies, timeout)) {
565                         if ((ufshcd_readl(hba, reg) & mask) != val)
566                                 err = -ETIMEDOUT;
567                         break;
568                 }
569         }
570
571         return err;
572 }
573
574 /**
575  * ufshcd_get_intr_mask - Get the interrupt bit mask
576  * @hba: Pointer to adapter instance
577  *
578  * Returns interrupt bit mask per version
579  */
580 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
581 {
582         u32 intr_mask = 0;
583
584         switch (hba->ufs_version) {
585         case UFSHCI_VERSION_10:
586                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
587                 break;
588         case UFSHCI_VERSION_11:
589         case UFSHCI_VERSION_20:
590                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
591                 break;
592         case UFSHCI_VERSION_21:
593         default:
594                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
595                 break;
596         }
597
598         return intr_mask;
599 }
600
601 /**
602  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
603  * @hba: Pointer to adapter instance
604  *
605  * Returns UFSHCI version supported by the controller
606  */
607 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
608 {
609         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
610                 return ufshcd_vops_get_ufs_hci_version(hba);
611
612         return ufshcd_readl(hba, REG_UFS_VERSION);
613 }
614
615 /**
616  * ufshcd_is_device_present - Check if any device connected to
617  *                            the host controller
618  * @hba: pointer to adapter instance
619  *
620  * Returns true if device present, false if no device detected
621  */
622 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
623 {
624         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
625                                                 DEVICE_PRESENT) ? true : false;
626 }
627
628 /**
629  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
630  * @lrbp: pointer to local command reference block
631  *
632  * This function is used to get the OCS field from UTRD
633  * Returns the OCS field in the UTRD
634  */
635 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
636 {
637         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
638 }
639
640 /**
641  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
642  * @hba: per adapter instance
643  * @pos: position of the bit to be cleared
644  */
645 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
646 {
647         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
648                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
649         else
650                 ufshcd_writel(hba, ~(1 << pos),
651                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
652 }
653
654 /**
655  * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
656  * @hba: per adapter instance
657  * @pos: position of the bit to be cleared
658  */
659 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
660 {
661         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
662                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
663         else
664                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
665 }
666
667 /**
668  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
669  * @hba: per adapter instance
670  * @tag: position of the bit to be cleared
671  */
672 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
673 {
674         __clear_bit(tag, &hba->outstanding_reqs);
675 }
676
677 /**
678  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
679  * @reg: Register value of host controller status
680  *
681  * Returns integer, 0 on Success and positive value if failed
682  */
683 static inline int ufshcd_get_lists_status(u32 reg)
684 {
685         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
686 }
687
688 /**
689  * ufshcd_get_uic_cmd_result - Get the UIC command result
690  * @hba: Pointer to adapter instance
691  *
692  * This function gets the result of UIC command completion
693  * Returns 0 on success, non zero value on error
694  */
695 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
696 {
697         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
698                MASK_UIC_COMMAND_RESULT;
699 }
700
701 /**
702  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
703  * @hba: Pointer to adapter instance
704  *
705  * This function gets UIC command argument3
706  * Returns 0 on success, non zero value on error
707  */
708 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
709 {
710         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
711 }
712
713 /**
714  * ufshcd_get_req_rsp - returns the TR response transaction type
715  * @ucd_rsp_ptr: pointer to response UPIU
716  */
717 static inline int
718 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
719 {
720         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
721 }
722
723 /**
724  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
725  * @ucd_rsp_ptr: pointer to response UPIU
726  *
727  * This function gets the response status and scsi_status from response UPIU
728  * Returns the response result code.
729  */
730 static inline int
731 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
732 {
733         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
734 }
735
736 /*
737  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
738  *                              from response UPIU
739  * @ucd_rsp_ptr: pointer to response UPIU
740  *
741  * Return the data segment length.
742  */
743 static inline unsigned int
744 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
745 {
746         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
747                 MASK_RSP_UPIU_DATA_SEG_LEN;
748 }
749
750 /**
751  * ufshcd_is_exception_event - Check if the device raised an exception event
752  * @ucd_rsp_ptr: pointer to response UPIU
753  *
754  * The function checks if the device raised an exception event indicated in
755  * the Device Information field of response UPIU.
756  *
757  * Returns true if exception is raised, false otherwise.
758  */
759 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
760 {
761         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
762                         MASK_RSP_EXCEPTION_EVENT ? true : false;
763 }
764
765 /**
766  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
767  * @hba: per adapter instance
768  */
769 static inline void
770 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
771 {
772         ufshcd_writel(hba, INT_AGGR_ENABLE |
773                       INT_AGGR_COUNTER_AND_TIMER_RESET,
774                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
775 }
776
777 /**
778  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
779  * @hba: per adapter instance
780  * @cnt: Interrupt aggregation counter threshold
781  * @tmout: Interrupt aggregation timeout value
782  */
783 static inline void
784 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
785 {
786         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
787                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
788                       INT_AGGR_TIMEOUT_VAL(tmout),
789                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
790 }
791
792 /**
793  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
794  * @hba: per adapter instance
795  */
796 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
797 {
798         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
799 }
800
801 /**
802  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
803  *                      When run-stop registers are set to 1, it indicates the
804  *                      host controller that it can process the requests
805  * @hba: per adapter instance
806  */
807 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
808 {
809         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
810                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
811         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
812                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
813 }
814
815 /**
816  * ufshcd_hba_start - Start controller initialization sequence
817  * @hba: per adapter instance
818  */
819 static inline void ufshcd_hba_start(struct ufs_hba *hba)
820 {
821         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
822 }
823
824 /**
825  * ufshcd_is_hba_active - Get controller state
826  * @hba: per adapter instance
827  *
828  * Returns false if controller is active, true otherwise
829  */
830 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
831 {
832         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
833                 ? false : true;
834 }
835
836 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
837 {
838         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
839         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
840             (hba->ufs_version == UFSHCI_VERSION_11))
841                 return UFS_UNIPRO_VER_1_41;
842         else
843                 return UFS_UNIPRO_VER_1_6;
844 }
845 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
846
847 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
848 {
849         /*
850          * If both host and device support UniPro ver1.6 or later, PA layer
851          * parameters tuning happens during link startup itself.
852          *
853          * We can manually tune PA layer parameters if either host or device
854          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
855          * logic simple, we will only do manual tuning if local unipro version
856          * doesn't support ver1.6 or later.
857          */
858         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
859                 return true;
860         else
861                 return false;
862 }
863
864 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
865 {
866         int ret = 0;
867         struct ufs_clk_info *clki;
868         struct list_head *head = &hba->clk_list_head;
869         ktime_t start = ktime_get();
870         bool clk_state_changed = false;
871
872         if (list_empty(head))
873                 goto out;
874
875         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
876         if (ret)
877                 return ret;
878
879         list_for_each_entry(clki, head, list) {
880                 if (!IS_ERR_OR_NULL(clki->clk)) {
881                         if (scale_up && clki->max_freq) {
882                                 if (clki->curr_freq == clki->max_freq)
883                                         continue;
884
885                                 clk_state_changed = true;
886                                 ret = clk_set_rate(clki->clk, clki->max_freq);
887                                 if (ret) {
888                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
889                                                 __func__, clki->name,
890                                                 clki->max_freq, ret);
891                                         break;
892                                 }
893                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
894                                                 "scaled up", clki->name,
895                                                 clki->curr_freq,
896                                                 clki->max_freq);
897
898                                 clki->curr_freq = clki->max_freq;
899
900                         } else if (!scale_up && clki->min_freq) {
901                                 if (clki->curr_freq == clki->min_freq)
902                                         continue;
903
904                                 clk_state_changed = true;
905                                 ret = clk_set_rate(clki->clk, clki->min_freq);
906                                 if (ret) {
907                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
908                                                 __func__, clki->name,
909                                                 clki->min_freq, ret);
910                                         break;
911                                 }
912                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
913                                                 "scaled down", clki->name,
914                                                 clki->curr_freq,
915                                                 clki->min_freq);
916                                 clki->curr_freq = clki->min_freq;
917                         }
918                 }
919                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
920                                 clki->name, clk_get_rate(clki->clk));
921         }
922
923         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
924
925 out:
926         if (clk_state_changed)
927                 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
928                         (scale_up ? "up" : "down"),
929                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
930         return ret;
931 }
932
933 /**
934  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
935  * @hba: per adapter instance
936  * @scale_up: True if scaling up and false if scaling down
937  *
938  * Returns true if scaling is required, false otherwise.
939  */
940 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
941                                                bool scale_up)
942 {
943         struct ufs_clk_info *clki;
944         struct list_head *head = &hba->clk_list_head;
945
946         if (list_empty(head))
947                 return false;
948
949         list_for_each_entry(clki, head, list) {
950                 if (!IS_ERR_OR_NULL(clki->clk)) {
951                         if (scale_up && clki->max_freq) {
952                                 if (clki->curr_freq == clki->max_freq)
953                                         continue;
954                                 return true;
955                         } else if (!scale_up && clki->min_freq) {
956                                 if (clki->curr_freq == clki->min_freq)
957                                         continue;
958                                 return true;
959                         }
960                 }
961         }
962
963         return false;
964 }
965
966 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
967                                         u64 wait_timeout_us)
968 {
969         unsigned long flags;
970         int ret = 0;
971         u32 tm_doorbell;
972         u32 tr_doorbell;
973         bool timeout = false, do_last_check = false;
974         ktime_t start;
975
976         ufshcd_hold(hba, false);
977         spin_lock_irqsave(hba->host->host_lock, flags);
978         /*
979          * Wait for all the outstanding tasks/transfer requests.
980          * Verify by checking the doorbell registers are clear.
981          */
982         start = ktime_get();
983         do {
984                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
985                         ret = -EBUSY;
986                         goto out;
987                 }
988
989                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
990                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
991                 if (!tm_doorbell && !tr_doorbell) {
992                         timeout = false;
993                         break;
994                 } else if (do_last_check) {
995                         break;
996                 }
997
998                 spin_unlock_irqrestore(hba->host->host_lock, flags);
999                 schedule();
1000                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1001                     wait_timeout_us) {
1002                         timeout = true;
1003                         /*
1004                          * We might have scheduled out for long time so make
1005                          * sure to check if doorbells are cleared by this time
1006                          * or not.
1007                          */
1008                         do_last_check = true;
1009                 }
1010                 spin_lock_irqsave(hba->host->host_lock, flags);
1011         } while (tm_doorbell || tr_doorbell);
1012
1013         if (timeout) {
1014                 dev_err(hba->dev,
1015                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1016                         __func__, tm_doorbell, tr_doorbell);
1017                 ret = -EBUSY;
1018         }
1019 out:
1020         spin_unlock_irqrestore(hba->host->host_lock, flags);
1021         ufshcd_release(hba);
1022         return ret;
1023 }
1024
1025 /**
1026  * ufshcd_scale_gear - scale up/down UFS gear
1027  * @hba: per adapter instance
1028  * @scale_up: True for scaling up gear and false for scaling down
1029  *
1030  * Returns 0 for success,
1031  * Returns -EBUSY if scaling can't happen at this time
1032  * Returns non-zero for any other errors
1033  */
1034 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1035 {
1036         #define UFS_MIN_GEAR_TO_SCALE_DOWN      UFS_HS_G1
1037         int ret = 0;
1038         struct ufs_pa_layer_attr new_pwr_info;
1039
1040         if (scale_up) {
1041                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1042                        sizeof(struct ufs_pa_layer_attr));
1043         } else {
1044                 memcpy(&new_pwr_info, &hba->pwr_info,
1045                        sizeof(struct ufs_pa_layer_attr));
1046
1047                 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1048                     || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1049                         /* save the current power mode */
1050                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1051                                 &hba->pwr_info,
1052                                 sizeof(struct ufs_pa_layer_attr));
1053
1054                         /* scale down gear */
1055                         new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1056                         new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1057                 }
1058         }
1059
1060         /* check if the power mode needs to be changed or not? */
1061         ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1062
1063         if (ret)
1064                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1065                         __func__, ret,
1066                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1067                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1068
1069         return ret;
1070 }
1071
1072 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1073 {
1074         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1075         int ret = 0;
1076         /*
1077          * make sure that there are no outstanding requests when
1078          * clock scaling is in progress
1079          */
1080         ufshcd_scsi_block_requests(hba);
1081         down_write(&hba->clk_scaling_lock);
1082         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1083                 ret = -EBUSY;
1084                 up_write(&hba->clk_scaling_lock);
1085                 ufshcd_scsi_unblock_requests(hba);
1086         }
1087
1088         return ret;
1089 }
1090
1091 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1092 {
1093         up_write(&hba->clk_scaling_lock);
1094         ufshcd_scsi_unblock_requests(hba);
1095 }
1096
1097 /**
1098  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1099  * @hba: per adapter instance
1100  * @scale_up: True for scaling up and false for scalin down
1101  *
1102  * Returns 0 for success,
1103  * Returns -EBUSY if scaling can't happen at this time
1104  * Returns non-zero for any other errors
1105  */
1106 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1107 {
1108         int ret = 0;
1109
1110         /* let's not get into low power until clock scaling is completed */
1111         ufshcd_hold(hba, false);
1112
1113         ret = ufshcd_clock_scaling_prepare(hba);
1114         if (ret)
1115                 return ret;
1116
1117         /* scale down the gear before scaling down clocks */
1118         if (!scale_up) {
1119                 ret = ufshcd_scale_gear(hba, false);
1120                 if (ret)
1121                         goto out;
1122         }
1123
1124         ret = ufshcd_scale_clks(hba, scale_up);
1125         if (ret) {
1126                 if (!scale_up)
1127                         ufshcd_scale_gear(hba, true);
1128                 goto out;
1129         }
1130
1131         /* scale up the gear after scaling up clocks */
1132         if (scale_up) {
1133                 ret = ufshcd_scale_gear(hba, true);
1134                 if (ret) {
1135                         ufshcd_scale_clks(hba, false);
1136                         goto out;
1137                 }
1138         }
1139
1140         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1141
1142 out:
1143         ufshcd_clock_scaling_unprepare(hba);
1144         ufshcd_release(hba);
1145         return ret;
1146 }
1147
1148 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1149 {
1150         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1151                                            clk_scaling.suspend_work);
1152         unsigned long irq_flags;
1153
1154         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1155         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1156                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1157                 return;
1158         }
1159         hba->clk_scaling.is_suspended = true;
1160         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1161
1162         __ufshcd_suspend_clkscaling(hba);
1163 }
1164
1165 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1166 {
1167         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1168                                            clk_scaling.resume_work);
1169         unsigned long irq_flags;
1170
1171         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1172         if (!hba->clk_scaling.is_suspended) {
1173                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1174                 return;
1175         }
1176         hba->clk_scaling.is_suspended = false;
1177         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1178
1179         devfreq_resume_device(hba->devfreq);
1180 }
1181
1182 static int ufshcd_devfreq_target(struct device *dev,
1183                                 unsigned long *freq, u32 flags)
1184 {
1185         int ret = 0;
1186         struct ufs_hba *hba = dev_get_drvdata(dev);
1187         ktime_t start;
1188         bool scale_up, sched_clk_scaling_suspend_work = false;
1189         struct list_head *clk_list = &hba->clk_list_head;
1190         struct ufs_clk_info *clki;
1191         unsigned long irq_flags;
1192
1193         if (!ufshcd_is_clkscaling_supported(hba))
1194                 return -EINVAL;
1195
1196         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1197         if (ufshcd_eh_in_progress(hba)) {
1198                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1199                 return 0;
1200         }
1201
1202         if (!hba->clk_scaling.active_reqs)
1203                 sched_clk_scaling_suspend_work = true;
1204
1205         if (list_empty(clk_list)) {
1206                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1207                 goto out;
1208         }
1209
1210         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1211         scale_up = (*freq == clki->max_freq) ? true : false;
1212         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1213                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1214                 ret = 0;
1215                 goto out; /* no state change required */
1216         }
1217         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1218
1219         start = ktime_get();
1220         ret = ufshcd_devfreq_scale(hba, scale_up);
1221
1222         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1223                 (scale_up ? "up" : "down"),
1224                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1225
1226 out:
1227         if (sched_clk_scaling_suspend_work)
1228                 queue_work(hba->clk_scaling.workq,
1229                            &hba->clk_scaling.suspend_work);
1230
1231         return ret;
1232 }
1233
1234 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1235 {
1236         int *busy = priv;
1237
1238         WARN_ON_ONCE(reserved);
1239         (*busy)++;
1240         return false;
1241 }
1242
1243 /* Whether or not any tag is in use by a request that is in progress. */
1244 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1245 {
1246         struct request_queue *q = hba->cmd_queue;
1247         int busy = 0;
1248
1249         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1250         return busy;
1251 }
1252
1253 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1254                 struct devfreq_dev_status *stat)
1255 {
1256         struct ufs_hba *hba = dev_get_drvdata(dev);
1257         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1258         unsigned long flags;
1259
1260         if (!ufshcd_is_clkscaling_supported(hba))
1261                 return -EINVAL;
1262
1263         memset(stat, 0, sizeof(*stat));
1264
1265         spin_lock_irqsave(hba->host->host_lock, flags);
1266         if (!scaling->window_start_t)
1267                 goto start_window;
1268
1269         if (scaling->is_busy_started)
1270                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1271                                         scaling->busy_start_t));
1272
1273         stat->total_time = jiffies_to_usecs((long)jiffies -
1274                                 (long)scaling->window_start_t);
1275         stat->busy_time = scaling->tot_busy_t;
1276 start_window:
1277         scaling->window_start_t = jiffies;
1278         scaling->tot_busy_t = 0;
1279
1280         if (hba->outstanding_reqs) {
1281                 scaling->busy_start_t = ktime_get();
1282                 scaling->is_busy_started = true;
1283         } else {
1284                 scaling->busy_start_t = 0;
1285                 scaling->is_busy_started = false;
1286         }
1287         spin_unlock_irqrestore(hba->host->host_lock, flags);
1288         return 0;
1289 }
1290
1291 static struct devfreq_dev_profile ufs_devfreq_profile = {
1292         .polling_ms     = 100,
1293         .target         = ufshcd_devfreq_target,
1294         .get_dev_status = ufshcd_devfreq_get_dev_status,
1295 };
1296
1297 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1298 {
1299         struct list_head *clk_list = &hba->clk_list_head;
1300         struct ufs_clk_info *clki;
1301         struct devfreq *devfreq;
1302         int ret;
1303
1304         /* Skip devfreq if we don't have any clocks in the list */
1305         if (list_empty(clk_list))
1306                 return 0;
1307
1308         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1309         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1310         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1311
1312         devfreq = devfreq_add_device(hba->dev,
1313                         &ufs_devfreq_profile,
1314                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1315                         NULL);
1316         if (IS_ERR(devfreq)) {
1317                 ret = PTR_ERR(devfreq);
1318                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1319
1320                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1321                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1322                 return ret;
1323         }
1324
1325         hba->devfreq = devfreq;
1326
1327         return 0;
1328 }
1329
1330 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1331 {
1332         struct list_head *clk_list = &hba->clk_list_head;
1333         struct ufs_clk_info *clki;
1334
1335         if (!hba->devfreq)
1336                 return;
1337
1338         devfreq_remove_device(hba->devfreq);
1339         hba->devfreq = NULL;
1340
1341         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1342         dev_pm_opp_remove(hba->dev, clki->min_freq);
1343         dev_pm_opp_remove(hba->dev, clki->max_freq);
1344 }
1345
1346 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1347 {
1348         unsigned long flags;
1349
1350         devfreq_suspend_device(hba->devfreq);
1351         spin_lock_irqsave(hba->host->host_lock, flags);
1352         hba->clk_scaling.window_start_t = 0;
1353         spin_unlock_irqrestore(hba->host->host_lock, flags);
1354 }
1355
1356 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1357 {
1358         unsigned long flags;
1359         bool suspend = false;
1360
1361         if (!ufshcd_is_clkscaling_supported(hba))
1362                 return;
1363
1364         spin_lock_irqsave(hba->host->host_lock, flags);
1365         if (!hba->clk_scaling.is_suspended) {
1366                 suspend = true;
1367                 hba->clk_scaling.is_suspended = true;
1368         }
1369         spin_unlock_irqrestore(hba->host->host_lock, flags);
1370
1371         if (suspend)
1372                 __ufshcd_suspend_clkscaling(hba);
1373 }
1374
1375 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1376 {
1377         unsigned long flags;
1378         bool resume = false;
1379
1380         if (!ufshcd_is_clkscaling_supported(hba))
1381                 return;
1382
1383         spin_lock_irqsave(hba->host->host_lock, flags);
1384         if (hba->clk_scaling.is_suspended) {
1385                 resume = true;
1386                 hba->clk_scaling.is_suspended = false;
1387         }
1388         spin_unlock_irqrestore(hba->host->host_lock, flags);
1389
1390         if (resume)
1391                 devfreq_resume_device(hba->devfreq);
1392 }
1393
1394 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1395                 struct device_attribute *attr, char *buf)
1396 {
1397         struct ufs_hba *hba = dev_get_drvdata(dev);
1398
1399         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1400 }
1401
1402 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1403                 struct device_attribute *attr, const char *buf, size_t count)
1404 {
1405         struct ufs_hba *hba = dev_get_drvdata(dev);
1406         u32 value;
1407         int err;
1408
1409         if (kstrtou32(buf, 0, &value))
1410                 return -EINVAL;
1411
1412         value = !!value;
1413         if (value == hba->clk_scaling.is_allowed)
1414                 goto out;
1415
1416         pm_runtime_get_sync(hba->dev);
1417         ufshcd_hold(hba, false);
1418
1419         cancel_work_sync(&hba->clk_scaling.suspend_work);
1420         cancel_work_sync(&hba->clk_scaling.resume_work);
1421
1422         hba->clk_scaling.is_allowed = value;
1423
1424         if (value) {
1425                 ufshcd_resume_clkscaling(hba);
1426         } else {
1427                 ufshcd_suspend_clkscaling(hba);
1428                 err = ufshcd_devfreq_scale(hba, true);
1429                 if (err)
1430                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1431                                         __func__, err);
1432         }
1433
1434         ufshcd_release(hba);
1435         pm_runtime_put_sync(hba->dev);
1436 out:
1437         return count;
1438 }
1439
1440 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1441 {
1442         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1443         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1444         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1445         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1446         hba->clk_scaling.enable_attr.attr.mode = 0644;
1447         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1448                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1449 }
1450
1451 static void ufshcd_ungate_work(struct work_struct *work)
1452 {
1453         int ret;
1454         unsigned long flags;
1455         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1456                         clk_gating.ungate_work);
1457
1458         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1459
1460         spin_lock_irqsave(hba->host->host_lock, flags);
1461         if (hba->clk_gating.state == CLKS_ON) {
1462                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1463                 goto unblock_reqs;
1464         }
1465
1466         spin_unlock_irqrestore(hba->host->host_lock, flags);
1467         ufshcd_setup_clocks(hba, true);
1468
1469         /* Exit from hibern8 */
1470         if (ufshcd_can_hibern8_during_gating(hba)) {
1471                 /* Prevent gating in this path */
1472                 hba->clk_gating.is_suspended = true;
1473                 if (ufshcd_is_link_hibern8(hba)) {
1474                         ret = ufshcd_uic_hibern8_exit(hba);
1475                         if (ret)
1476                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1477                                         __func__, ret);
1478                         else
1479                                 ufshcd_set_link_active(hba);
1480                 }
1481                 hba->clk_gating.is_suspended = false;
1482         }
1483 unblock_reqs:
1484         ufshcd_scsi_unblock_requests(hba);
1485 }
1486
1487 /**
1488  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1489  * Also, exit from hibern8 mode and set the link as active.
1490  * @hba: per adapter instance
1491  * @async: This indicates whether caller should ungate clocks asynchronously.
1492  */
1493 int ufshcd_hold(struct ufs_hba *hba, bool async)
1494 {
1495         int rc = 0;
1496         unsigned long flags;
1497
1498         if (!ufshcd_is_clkgating_allowed(hba))
1499                 goto out;
1500         spin_lock_irqsave(hba->host->host_lock, flags);
1501         hba->clk_gating.active_reqs++;
1502
1503         if (ufshcd_eh_in_progress(hba)) {
1504                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1505                 return 0;
1506         }
1507
1508 start:
1509         switch (hba->clk_gating.state) {
1510         case CLKS_ON:
1511                 /*
1512                  * Wait for the ungate work to complete if in progress.
1513                  * Though the clocks may be in ON state, the link could
1514                  * still be in hibner8 state if hibern8 is allowed
1515                  * during clock gating.
1516                  * Make sure we exit hibern8 state also in addition to
1517                  * clocks being ON.
1518                  */
1519                 if (ufshcd_can_hibern8_during_gating(hba) &&
1520                     ufshcd_is_link_hibern8(hba)) {
1521                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1522                         flush_work(&hba->clk_gating.ungate_work);
1523                         spin_lock_irqsave(hba->host->host_lock, flags);
1524                         goto start;
1525                 }
1526                 break;
1527         case REQ_CLKS_OFF:
1528                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1529                         hba->clk_gating.state = CLKS_ON;
1530                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1531                                                 hba->clk_gating.state);
1532                         break;
1533                 }
1534                 /*
1535                  * If we are here, it means gating work is either done or
1536                  * currently running. Hence, fall through to cancel gating
1537                  * work and to enable clocks.
1538                  */
1539                 /* fallthrough */
1540         case CLKS_OFF:
1541                 ufshcd_scsi_block_requests(hba);
1542                 hba->clk_gating.state = REQ_CLKS_ON;
1543                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1544                                         hba->clk_gating.state);
1545                 queue_work(hba->clk_gating.clk_gating_workq,
1546                            &hba->clk_gating.ungate_work);
1547                 /*
1548                  * fall through to check if we should wait for this
1549                  * work to be done or not.
1550                  */
1551                 /* fallthrough */
1552         case REQ_CLKS_ON:
1553                 if (async) {
1554                         rc = -EAGAIN;
1555                         hba->clk_gating.active_reqs--;
1556                         break;
1557                 }
1558
1559                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1560                 flush_work(&hba->clk_gating.ungate_work);
1561                 /* Make sure state is CLKS_ON before returning */
1562                 spin_lock_irqsave(hba->host->host_lock, flags);
1563                 goto start;
1564         default:
1565                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1566                                 __func__, hba->clk_gating.state);
1567                 break;
1568         }
1569         spin_unlock_irqrestore(hba->host->host_lock, flags);
1570 out:
1571         return rc;
1572 }
1573 EXPORT_SYMBOL_GPL(ufshcd_hold);
1574
1575 static void ufshcd_gate_work(struct work_struct *work)
1576 {
1577         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1578                         clk_gating.gate_work.work);
1579         unsigned long flags;
1580
1581         spin_lock_irqsave(hba->host->host_lock, flags);
1582         /*
1583          * In case you are here to cancel this work the gating state
1584          * would be marked as REQ_CLKS_ON. In this case save time by
1585          * skipping the gating work and exit after changing the clock
1586          * state to CLKS_ON.
1587          */
1588         if (hba->clk_gating.is_suspended ||
1589                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1590                 hba->clk_gating.state = CLKS_ON;
1591                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1592                                         hba->clk_gating.state);
1593                 goto rel_lock;
1594         }
1595
1596         if (hba->clk_gating.active_reqs
1597                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1598                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1599                 || hba->active_uic_cmd || hba->uic_async_done)
1600                 goto rel_lock;
1601
1602         spin_unlock_irqrestore(hba->host->host_lock, flags);
1603
1604         /* put the link into hibern8 mode before turning off clocks */
1605         if (ufshcd_can_hibern8_during_gating(hba)) {
1606                 if (ufshcd_uic_hibern8_enter(hba)) {
1607                         hba->clk_gating.state = CLKS_ON;
1608                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1609                                                 hba->clk_gating.state);
1610                         goto out;
1611                 }
1612                 ufshcd_set_link_hibern8(hba);
1613         }
1614
1615         if (!ufshcd_is_link_active(hba))
1616                 ufshcd_setup_clocks(hba, false);
1617         else
1618                 /* If link is active, device ref_clk can't be switched off */
1619                 __ufshcd_setup_clocks(hba, false, true);
1620
1621         /*
1622          * In case you are here to cancel this work the gating state
1623          * would be marked as REQ_CLKS_ON. In this case keep the state
1624          * as REQ_CLKS_ON which would anyway imply that clocks are off
1625          * and a request to turn them on is pending. By doing this way,
1626          * we keep the state machine in tact and this would ultimately
1627          * prevent from doing cancel work multiple times when there are
1628          * new requests arriving before the current cancel work is done.
1629          */
1630         spin_lock_irqsave(hba->host->host_lock, flags);
1631         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1632                 hba->clk_gating.state = CLKS_OFF;
1633                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1634                                         hba->clk_gating.state);
1635         }
1636 rel_lock:
1637         spin_unlock_irqrestore(hba->host->host_lock, flags);
1638 out:
1639         return;
1640 }
1641
1642 /* host lock must be held before calling this variant */
1643 static void __ufshcd_release(struct ufs_hba *hba)
1644 {
1645         if (!ufshcd_is_clkgating_allowed(hba))
1646                 return;
1647
1648         hba->clk_gating.active_reqs--;
1649
1650         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1651                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1652                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1653                 || hba->active_uic_cmd || hba->uic_async_done
1654                 || ufshcd_eh_in_progress(hba))
1655                 return;
1656
1657         hba->clk_gating.state = REQ_CLKS_OFF;
1658         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1659         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1660                            &hba->clk_gating.gate_work,
1661                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1662 }
1663
1664 void ufshcd_release(struct ufs_hba *hba)
1665 {
1666         unsigned long flags;
1667
1668         spin_lock_irqsave(hba->host->host_lock, flags);
1669         __ufshcd_release(hba);
1670         spin_unlock_irqrestore(hba->host->host_lock, flags);
1671 }
1672 EXPORT_SYMBOL_GPL(ufshcd_release);
1673
1674 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1675                 struct device_attribute *attr, char *buf)
1676 {
1677         struct ufs_hba *hba = dev_get_drvdata(dev);
1678
1679         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1680 }
1681
1682 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1683                 struct device_attribute *attr, const char *buf, size_t count)
1684 {
1685         struct ufs_hba *hba = dev_get_drvdata(dev);
1686         unsigned long flags, value;
1687
1688         if (kstrtoul(buf, 0, &value))
1689                 return -EINVAL;
1690
1691         spin_lock_irqsave(hba->host->host_lock, flags);
1692         hba->clk_gating.delay_ms = value;
1693         spin_unlock_irqrestore(hba->host->host_lock, flags);
1694         return count;
1695 }
1696
1697 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1698                 struct device_attribute *attr, char *buf)
1699 {
1700         struct ufs_hba *hba = dev_get_drvdata(dev);
1701
1702         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1703 }
1704
1705 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1706                 struct device_attribute *attr, const char *buf, size_t count)
1707 {
1708         struct ufs_hba *hba = dev_get_drvdata(dev);
1709         unsigned long flags;
1710         u32 value;
1711
1712         if (kstrtou32(buf, 0, &value))
1713                 return -EINVAL;
1714
1715         value = !!value;
1716         if (value == hba->clk_gating.is_enabled)
1717                 goto out;
1718
1719         if (value) {
1720                 ufshcd_release(hba);
1721         } else {
1722                 spin_lock_irqsave(hba->host->host_lock, flags);
1723                 hba->clk_gating.active_reqs++;
1724                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1725         }
1726
1727         hba->clk_gating.is_enabled = value;
1728 out:
1729         return count;
1730 }
1731
1732 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1733 {
1734         char wq_name[sizeof("ufs_clkscaling_00")];
1735
1736         if (!ufshcd_is_clkscaling_supported(hba))
1737                 return;
1738
1739         INIT_WORK(&hba->clk_scaling.suspend_work,
1740                   ufshcd_clk_scaling_suspend_work);
1741         INIT_WORK(&hba->clk_scaling.resume_work,
1742                   ufshcd_clk_scaling_resume_work);
1743
1744         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1745                  hba->host->host_no);
1746         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1747
1748         ufshcd_clkscaling_init_sysfs(hba);
1749 }
1750
1751 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1752 {
1753         if (!ufshcd_is_clkscaling_supported(hba))
1754                 return;
1755
1756         destroy_workqueue(hba->clk_scaling.workq);
1757         ufshcd_devfreq_remove(hba);
1758 }
1759
1760 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1761 {
1762         char wq_name[sizeof("ufs_clk_gating_00")];
1763
1764         if (!ufshcd_is_clkgating_allowed(hba))
1765                 return;
1766
1767         hba->clk_gating.delay_ms = 150;
1768         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1769         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1770
1771         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1772                  hba->host->host_no);
1773         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1774                                                            WQ_MEM_RECLAIM);
1775
1776         hba->clk_gating.is_enabled = true;
1777
1778         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1779         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1780         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1781         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1782         hba->clk_gating.delay_attr.attr.mode = 0644;
1783         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1784                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1785
1786         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1787         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1788         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1789         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1790         hba->clk_gating.enable_attr.attr.mode = 0644;
1791         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1792                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1793 }
1794
1795 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1796 {
1797         if (!ufshcd_is_clkgating_allowed(hba))
1798                 return;
1799         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1800         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1801         cancel_work_sync(&hba->clk_gating.ungate_work);
1802         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1803         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1804 }
1805
1806 /* Must be called with host lock acquired */
1807 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1808 {
1809         bool queue_resume_work = false;
1810
1811         if (!ufshcd_is_clkscaling_supported(hba))
1812                 return;
1813
1814         if (!hba->clk_scaling.active_reqs++)
1815                 queue_resume_work = true;
1816
1817         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1818                 return;
1819
1820         if (queue_resume_work)
1821                 queue_work(hba->clk_scaling.workq,
1822                            &hba->clk_scaling.resume_work);
1823
1824         if (!hba->clk_scaling.window_start_t) {
1825                 hba->clk_scaling.window_start_t = jiffies;
1826                 hba->clk_scaling.tot_busy_t = 0;
1827                 hba->clk_scaling.is_busy_started = false;
1828         }
1829
1830         if (!hba->clk_scaling.is_busy_started) {
1831                 hba->clk_scaling.busy_start_t = ktime_get();
1832                 hba->clk_scaling.is_busy_started = true;
1833         }
1834 }
1835
1836 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1837 {
1838         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1839
1840         if (!ufshcd_is_clkscaling_supported(hba))
1841                 return;
1842
1843         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1844                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1845                                         scaling->busy_start_t));
1846                 scaling->busy_start_t = 0;
1847                 scaling->is_busy_started = false;
1848         }
1849 }
1850 /**
1851  * ufshcd_send_command - Send SCSI or device management commands
1852  * @hba: per adapter instance
1853  * @task_tag: Task tag of the command
1854  */
1855 static inline
1856 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1857 {
1858         hba->lrb[task_tag].issue_time_stamp = ktime_get();
1859         hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1860         ufshcd_clk_scaling_start_busy(hba);
1861         __set_bit(task_tag, &hba->outstanding_reqs);
1862         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1863         /* Make sure that doorbell is committed immediately */
1864         wmb();
1865         ufshcd_add_command_trace(hba, task_tag, "send");
1866 }
1867
1868 /**
1869  * ufshcd_copy_sense_data - Copy sense data in case of check condition
1870  * @lrbp: pointer to local reference block
1871  */
1872 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1873 {
1874         int len;
1875         if (lrbp->sense_buffer &&
1876             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1877                 int len_to_copy;
1878
1879                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1880                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1881
1882                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1883                        len_to_copy);
1884         }
1885 }
1886
1887 /**
1888  * ufshcd_copy_query_response() - Copy the Query Response and the data
1889  * descriptor
1890  * @hba: per adapter instance
1891  * @lrbp: pointer to local reference block
1892  */
1893 static
1894 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1895 {
1896         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1897
1898         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1899
1900         /* Get the descriptor */
1901         if (hba->dev_cmd.query.descriptor &&
1902             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1903                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1904                                 GENERAL_UPIU_REQUEST_SIZE;
1905                 u16 resp_len;
1906                 u16 buf_len;
1907
1908                 /* data segment length */
1909                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1910                                                 MASK_QUERY_DATA_SEG_LEN;
1911                 buf_len = be16_to_cpu(
1912                                 hba->dev_cmd.query.request.upiu_req.length);
1913                 if (likely(buf_len >= resp_len)) {
1914                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1915                 } else {
1916                         dev_warn(hba->dev,
1917                                  "%s: rsp size %d is bigger than buffer size %d",
1918                                  __func__, resp_len, buf_len);
1919                         return -EINVAL;
1920                 }
1921         }
1922
1923         return 0;
1924 }
1925
1926 /**
1927  * ufshcd_hba_capabilities - Read controller capabilities
1928  * @hba: per adapter instance
1929  */
1930 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1931 {
1932         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1933
1934         /* nutrs and nutmrs are 0 based values */
1935         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1936         hba->nutmrs =
1937         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1938 }
1939
1940 /**
1941  * ufshcd_ready_for_uic_cmd - Check if controller is ready
1942  *                            to accept UIC commands
1943  * @hba: per adapter instance
1944  * Return true on success, else false
1945  */
1946 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1947 {
1948         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1949                 return true;
1950         else
1951                 return false;
1952 }
1953
1954 /**
1955  * ufshcd_get_upmcrs - Get the power mode change request status
1956  * @hba: Pointer to adapter instance
1957  *
1958  * This function gets the UPMCRS field of HCS register
1959  * Returns value of UPMCRS field
1960  */
1961 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1962 {
1963         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1964 }
1965
1966 /**
1967  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1968  * @hba: per adapter instance
1969  * @uic_cmd: UIC command
1970  *
1971  * Mutex must be held.
1972  */
1973 static inline void
1974 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1975 {
1976         WARN_ON(hba->active_uic_cmd);
1977
1978         hba->active_uic_cmd = uic_cmd;
1979
1980         /* Write Args */
1981         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1982         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1983         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1984
1985         /* Write UIC Cmd */
1986         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1987                       REG_UIC_COMMAND);
1988 }
1989
1990 /**
1991  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1992  * @hba: per adapter instance
1993  * @uic_cmd: UIC command
1994  *
1995  * Must be called with mutex held.
1996  * Returns 0 only if success.
1997  */
1998 static int
1999 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2000 {
2001         int ret;
2002         unsigned long flags;
2003
2004         if (wait_for_completion_timeout(&uic_cmd->done,
2005                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2006                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2007         else
2008                 ret = -ETIMEDOUT;
2009
2010         spin_lock_irqsave(hba->host->host_lock, flags);
2011         hba->active_uic_cmd = NULL;
2012         spin_unlock_irqrestore(hba->host->host_lock, flags);
2013
2014         return ret;
2015 }
2016
2017 /**
2018  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2019  * @hba: per adapter instance
2020  * @uic_cmd: UIC command
2021  * @completion: initialize the completion only if this is set to true
2022  *
2023  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2024  * with mutex held and host_lock locked.
2025  * Returns 0 only if success.
2026  */
2027 static int
2028 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2029                       bool completion)
2030 {
2031         if (!ufshcd_ready_for_uic_cmd(hba)) {
2032                 dev_err(hba->dev,
2033                         "Controller not ready to accept UIC commands\n");
2034                 return -EIO;
2035         }
2036
2037         if (completion)
2038                 init_completion(&uic_cmd->done);
2039
2040         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2041
2042         return 0;
2043 }
2044
2045 /**
2046  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2047  * @hba: per adapter instance
2048  * @uic_cmd: UIC command
2049  *
2050  * Returns 0 only if success.
2051  */
2052 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2053 {
2054         int ret;
2055         unsigned long flags;
2056
2057         ufshcd_hold(hba, false);
2058         mutex_lock(&hba->uic_cmd_mutex);
2059         ufshcd_add_delay_before_dme_cmd(hba);
2060
2061         spin_lock_irqsave(hba->host->host_lock, flags);
2062         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2063         spin_unlock_irqrestore(hba->host->host_lock, flags);
2064         if (!ret)
2065                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2066
2067         mutex_unlock(&hba->uic_cmd_mutex);
2068
2069         ufshcd_release(hba);
2070         return ret;
2071 }
2072
2073 /**
2074  * ufshcd_map_sg - Map scatter-gather list to prdt
2075  * @hba: per adapter instance
2076  * @lrbp: pointer to local reference block
2077  *
2078  * Returns 0 in case of success, non-zero value in case of failure
2079  */
2080 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2081 {
2082         struct ufshcd_sg_entry *prd_table;
2083         struct scatterlist *sg;
2084         struct scsi_cmnd *cmd;
2085         int sg_segments;
2086         int i;
2087
2088         cmd = lrbp->cmd;
2089         sg_segments = scsi_dma_map(cmd);
2090         if (sg_segments < 0)
2091                 return sg_segments;
2092
2093         if (sg_segments) {
2094                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2095                         lrbp->utr_descriptor_ptr->prd_table_length =
2096                                 cpu_to_le16((u16)(sg_segments *
2097                                         sizeof(struct ufshcd_sg_entry)));
2098                 else
2099                         lrbp->utr_descriptor_ptr->prd_table_length =
2100                                 cpu_to_le16((u16) (sg_segments));
2101
2102                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2103
2104                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2105                         prd_table[i].size  =
2106                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2107                         prd_table[i].base_addr =
2108                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2109                         prd_table[i].upper_addr =
2110                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2111                         prd_table[i].reserved = 0;
2112                 }
2113         } else {
2114                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2115         }
2116
2117         return 0;
2118 }
2119
2120 /**
2121  * ufshcd_enable_intr - enable interrupts
2122  * @hba: per adapter instance
2123  * @intrs: interrupt bits
2124  */
2125 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2126 {
2127         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2128
2129         if (hba->ufs_version == UFSHCI_VERSION_10) {
2130                 u32 rw;
2131                 rw = set & INTERRUPT_MASK_RW_VER_10;
2132                 set = rw | ((set ^ intrs) & intrs);
2133         } else {
2134                 set |= intrs;
2135         }
2136
2137         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2138 }
2139
2140 /**
2141  * ufshcd_disable_intr - disable interrupts
2142  * @hba: per adapter instance
2143  * @intrs: interrupt bits
2144  */
2145 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2146 {
2147         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2148
2149         if (hba->ufs_version == UFSHCI_VERSION_10) {
2150                 u32 rw;
2151                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2152                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2153                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2154
2155         } else {
2156                 set &= ~intrs;
2157         }
2158
2159         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2160 }
2161
2162 /**
2163  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2164  * descriptor according to request
2165  * @lrbp: pointer to local reference block
2166  * @upiu_flags: flags required in the header
2167  * @cmd_dir: requests data direction
2168  */
2169 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2170                         u32 *upiu_flags, enum dma_data_direction cmd_dir)
2171 {
2172         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2173         u32 data_direction;
2174         u32 dword_0;
2175
2176         if (cmd_dir == DMA_FROM_DEVICE) {
2177                 data_direction = UTP_DEVICE_TO_HOST;
2178                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2179         } else if (cmd_dir == DMA_TO_DEVICE) {
2180                 data_direction = UTP_HOST_TO_DEVICE;
2181                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2182         } else {
2183                 data_direction = UTP_NO_DATA_TRANSFER;
2184                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2185         }
2186
2187         dword_0 = data_direction | (lrbp->command_type
2188                                 << UPIU_COMMAND_TYPE_OFFSET);
2189         if (lrbp->intr_cmd)
2190                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2191
2192         /* Transfer request descriptor header fields */
2193         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2194         /* dword_1 is reserved, hence it is set to 0 */
2195         req_desc->header.dword_1 = 0;
2196         /*
2197          * assigning invalid value for command status. Controller
2198          * updates OCS on command completion, with the command
2199          * status
2200          */
2201         req_desc->header.dword_2 =
2202                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2203         /* dword_3 is reserved, hence it is set to 0 */
2204         req_desc->header.dword_3 = 0;
2205
2206         req_desc->prd_table_length = 0;
2207 }
2208
2209 /**
2210  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2211  * for scsi commands
2212  * @lrbp: local reference block pointer
2213  * @upiu_flags: flags
2214  */
2215 static
2216 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2217 {
2218         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2219         unsigned short cdb_len;
2220
2221         /* command descriptor fields */
2222         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2223                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2224                                 lrbp->lun, lrbp->task_tag);
2225         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2226                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2227
2228         /* Total EHS length and Data segment length will be zero */
2229         ucd_req_ptr->header.dword_2 = 0;
2230
2231         ucd_req_ptr->sc.exp_data_transfer_len =
2232                 cpu_to_be32(lrbp->cmd->sdb.length);
2233
2234         cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
2235         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2236         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2237
2238         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2239 }
2240
2241 /**
2242  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2243  * for query requsts
2244  * @hba: UFS hba
2245  * @lrbp: local reference block pointer
2246  * @upiu_flags: flags
2247  */
2248 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2249                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2250 {
2251         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2252         struct ufs_query *query = &hba->dev_cmd.query;
2253         u16 len = be16_to_cpu(query->request.upiu_req.length);
2254
2255         /* Query request header */
2256         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2257                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2258                         lrbp->lun, lrbp->task_tag);
2259         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2260                         0, query->request.query_func, 0, 0);
2261
2262         /* Data segment length only need for WRITE_DESC */
2263         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2264                 ucd_req_ptr->header.dword_2 =
2265                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2266         else
2267                 ucd_req_ptr->header.dword_2 = 0;
2268
2269         /* Copy the Query Request buffer as is */
2270         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2271                         QUERY_OSF_SIZE);
2272
2273         /* Copy the Descriptor */
2274         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2275                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2276
2277         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2278 }
2279
2280 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2281 {
2282         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2283
2284         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2285
2286         /* command descriptor fields */
2287         ucd_req_ptr->header.dword_0 =
2288                 UPIU_HEADER_DWORD(
2289                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2290         /* clear rest of the fields of basic header */
2291         ucd_req_ptr->header.dword_1 = 0;
2292         ucd_req_ptr->header.dword_2 = 0;
2293
2294         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2295 }
2296
2297 /**
2298  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2299  *                           for Device Management Purposes
2300  * @hba: per adapter instance
2301  * @lrbp: pointer to local reference block
2302  */
2303 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2304 {
2305         u32 upiu_flags;
2306         int ret = 0;
2307
2308         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2309             (hba->ufs_version == UFSHCI_VERSION_11))
2310                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2311         else
2312                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2313
2314         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2315         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2316                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2317         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2318                 ufshcd_prepare_utp_nop_upiu(lrbp);
2319         else
2320                 ret = -EINVAL;
2321
2322         return ret;
2323 }
2324
2325 /**
2326  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2327  *                         for SCSI Purposes
2328  * @hba: per adapter instance
2329  * @lrbp: pointer to local reference block
2330  */
2331 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2332 {
2333         u32 upiu_flags;
2334         int ret = 0;
2335
2336         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2337             (hba->ufs_version == UFSHCI_VERSION_11))
2338                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2339         else
2340                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2341
2342         if (likely(lrbp->cmd)) {
2343                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2344                                                 lrbp->cmd->sc_data_direction);
2345                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2346         } else {
2347                 ret = -EINVAL;
2348         }
2349
2350         return ret;
2351 }
2352
2353 /**
2354  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2355  * @upiu_wlun_id: UPIU W-LUN id
2356  *
2357  * Returns SCSI W-LUN id
2358  */
2359 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2360 {
2361         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2362 }
2363
2364 /**
2365  * ufshcd_queuecommand - main entry point for SCSI requests
2366  * @host: SCSI host pointer
2367  * @cmd: command from SCSI Midlayer
2368  *
2369  * Returns 0 for success, non-zero in case of failure
2370  */
2371 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2372 {
2373         struct ufshcd_lrb *lrbp;
2374         struct ufs_hba *hba;
2375         unsigned long flags;
2376         int tag;
2377         int err = 0;
2378
2379         hba = shost_priv(host);
2380
2381         tag = cmd->request->tag;
2382         if (!ufshcd_valid_tag(hba, tag)) {
2383                 dev_err(hba->dev,
2384                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2385                         __func__, tag, cmd, cmd->request);
2386                 BUG();
2387         }
2388
2389         if (!down_read_trylock(&hba->clk_scaling_lock))
2390                 return SCSI_MLQUEUE_HOST_BUSY;
2391
2392         spin_lock_irqsave(hba->host->host_lock, flags);
2393         switch (hba->ufshcd_state) {
2394         case UFSHCD_STATE_OPERATIONAL:
2395                 break;
2396         case UFSHCD_STATE_EH_SCHEDULED:
2397         case UFSHCD_STATE_RESET:
2398                 err = SCSI_MLQUEUE_HOST_BUSY;
2399                 goto out_unlock;
2400         case UFSHCD_STATE_ERROR:
2401                 set_host_byte(cmd, DID_ERROR);
2402                 cmd->scsi_done(cmd);
2403                 goto out_unlock;
2404         default:
2405                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2406                                 __func__, hba->ufshcd_state);
2407                 set_host_byte(cmd, DID_BAD_TARGET);
2408                 cmd->scsi_done(cmd);
2409                 goto out_unlock;
2410         }
2411
2412         /* if error handling is in progress, don't issue commands */
2413         if (ufshcd_eh_in_progress(hba)) {
2414                 set_host_byte(cmd, DID_ERROR);
2415                 cmd->scsi_done(cmd);
2416                 goto out_unlock;
2417         }
2418         spin_unlock_irqrestore(hba->host->host_lock, flags);
2419
2420         hba->req_abort_count = 0;
2421
2422         err = ufshcd_hold(hba, true);
2423         if (err) {
2424                 err = SCSI_MLQUEUE_HOST_BUSY;
2425                 goto out;
2426         }
2427         WARN_ON(hba->clk_gating.state != CLKS_ON);
2428
2429         lrbp = &hba->lrb[tag];
2430
2431         WARN_ON(lrbp->cmd);
2432         lrbp->cmd = cmd;
2433         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2434         lrbp->sense_buffer = cmd->sense_buffer;
2435         lrbp->task_tag = tag;
2436         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2437         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2438         lrbp->req_abort_skip = false;
2439
2440         ufshcd_comp_scsi_upiu(hba, lrbp);
2441
2442         err = ufshcd_map_sg(hba, lrbp);
2443         if (err) {
2444                 lrbp->cmd = NULL;
2445                 ufshcd_release(hba);
2446                 goto out;
2447         }
2448         /* Make sure descriptors are ready before ringing the doorbell */
2449         wmb();
2450
2451         /* issue command to the controller */
2452         spin_lock_irqsave(hba->host->host_lock, flags);
2453         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2454         ufshcd_send_command(hba, tag);
2455 out_unlock:
2456         spin_unlock_irqrestore(hba->host->host_lock, flags);
2457 out:
2458         up_read(&hba->clk_scaling_lock);
2459         return err;
2460 }
2461
2462 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2463                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2464 {
2465         lrbp->cmd = NULL;
2466         lrbp->sense_bufflen = 0;
2467         lrbp->sense_buffer = NULL;
2468         lrbp->task_tag = tag;
2469         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2470         lrbp->intr_cmd = true; /* No interrupt aggregation */
2471         hba->dev_cmd.type = cmd_type;
2472
2473         return ufshcd_comp_devman_upiu(hba, lrbp);
2474 }
2475
2476 static int
2477 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2478 {
2479         int err = 0;
2480         unsigned long flags;
2481         u32 mask = 1 << tag;
2482
2483         /* clear outstanding transaction before retry */
2484         spin_lock_irqsave(hba->host->host_lock, flags);
2485         ufshcd_utrl_clear(hba, tag);
2486         spin_unlock_irqrestore(hba->host->host_lock, flags);
2487
2488         /*
2489          * wait for for h/w to clear corresponding bit in door-bell.
2490          * max. wait is 1 sec.
2491          */
2492         err = ufshcd_wait_for_register(hba,
2493                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2494                         mask, ~mask, 1000, 1000, true);
2495
2496         return err;
2497 }
2498
2499 static int
2500 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2501 {
2502         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2503
2504         /* Get the UPIU response */
2505         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2506                                 UPIU_RSP_CODE_OFFSET;
2507         return query_res->response;
2508 }
2509
2510 /**
2511  * ufshcd_dev_cmd_completion() - handles device management command responses
2512  * @hba: per adapter instance
2513  * @lrbp: pointer to local reference block
2514  */
2515 static int
2516 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2517 {
2518         int resp;
2519         int err = 0;
2520
2521         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2522         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2523
2524         switch (resp) {
2525         case UPIU_TRANSACTION_NOP_IN:
2526                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2527                         err = -EINVAL;
2528                         dev_err(hba->dev, "%s: unexpected response %x\n",
2529                                         __func__, resp);
2530                 }
2531                 break;
2532         case UPIU_TRANSACTION_QUERY_RSP:
2533                 err = ufshcd_check_query_response(hba, lrbp);
2534                 if (!err)
2535                         err = ufshcd_copy_query_response(hba, lrbp);
2536                 break;
2537         case UPIU_TRANSACTION_REJECT_UPIU:
2538                 /* TODO: handle Reject UPIU Response */
2539                 err = -EPERM;
2540                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2541                                 __func__);
2542                 break;
2543         default:
2544                 err = -EINVAL;
2545                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2546                                 __func__, resp);
2547                 break;
2548         }
2549
2550         return err;
2551 }
2552
2553 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2554                 struct ufshcd_lrb *lrbp, int max_timeout)
2555 {
2556         int err = 0;
2557         unsigned long time_left;
2558         unsigned long flags;
2559
2560         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2561                         msecs_to_jiffies(max_timeout));
2562
2563         /* Make sure descriptors are ready before ringing the doorbell */
2564         wmb();
2565         spin_lock_irqsave(hba->host->host_lock, flags);
2566         hba->dev_cmd.complete = NULL;
2567         if (likely(time_left)) {
2568                 err = ufshcd_get_tr_ocs(lrbp);
2569                 if (!err)
2570                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2571         }
2572         spin_unlock_irqrestore(hba->host->host_lock, flags);
2573
2574         if (!time_left) {
2575                 err = -ETIMEDOUT;
2576                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2577                         __func__, lrbp->task_tag);
2578                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2579                         /* successfully cleared the command, retry if needed */
2580                         err = -EAGAIN;
2581                 /*
2582                  * in case of an error, after clearing the doorbell,
2583                  * we also need to clear the outstanding_request
2584                  * field in hba
2585                  */
2586                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2587         }
2588
2589         return err;
2590 }
2591
2592 /**
2593  * ufshcd_exec_dev_cmd - API for sending device management requests
2594  * @hba: UFS hba
2595  * @cmd_type: specifies the type (NOP, Query...)
2596  * @timeout: time in seconds
2597  *
2598  * NOTE: Since there is only one available tag for device management commands,
2599  * it is expected you hold the hba->dev_cmd.lock mutex.
2600  */
2601 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2602                 enum dev_cmd_type cmd_type, int timeout)
2603 {
2604         struct request_queue *q = hba->cmd_queue;
2605         struct request *req;
2606         struct ufshcd_lrb *lrbp;
2607         int err;
2608         int tag;
2609         struct completion wait;
2610         unsigned long flags;
2611
2612         down_read(&hba->clk_scaling_lock);
2613
2614         /*
2615          * Get free slot, sleep if slots are unavailable.
2616          * Even though we use wait_event() which sleeps indefinitely,
2617          * the maximum wait time is bounded by SCSI request timeout.
2618          */
2619         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2620         if (IS_ERR(req)) {
2621                 err = PTR_ERR(req);
2622                 goto out_unlock;
2623         }
2624         tag = req->tag;
2625         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2626
2627         init_completion(&wait);
2628         lrbp = &hba->lrb[tag];
2629         WARN_ON(lrbp->cmd);
2630         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2631         if (unlikely(err))
2632                 goto out_put_tag;
2633
2634         hba->dev_cmd.complete = &wait;
2635
2636         ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2637         /* Make sure descriptors are ready before ringing the doorbell */
2638         wmb();
2639         spin_lock_irqsave(hba->host->host_lock, flags);
2640         ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2641         ufshcd_send_command(hba, tag);
2642         spin_unlock_irqrestore(hba->host->host_lock, flags);
2643
2644         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2645
2646         ufshcd_add_query_upiu_trace(hba, tag,
2647                         err ? "query_complete_err" : "query_complete");
2648
2649 out_put_tag:
2650         blk_put_request(req);
2651 out_unlock:
2652         up_read(&hba->clk_scaling_lock);
2653         return err;
2654 }
2655
2656 /**
2657  * ufshcd_init_query() - init the query response and request parameters
2658  * @hba: per-adapter instance
2659  * @request: address of the request pointer to be initialized
2660  * @response: address of the response pointer to be initialized
2661  * @opcode: operation to perform
2662  * @idn: flag idn to access
2663  * @index: LU number to access
2664  * @selector: query/flag/descriptor further identification
2665  */
2666 static inline void ufshcd_init_query(struct ufs_hba *hba,
2667                 struct ufs_query_req **request, struct ufs_query_res **response,
2668                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2669 {
2670         *request = &hba->dev_cmd.query.request;
2671         *response = &hba->dev_cmd.query.response;
2672         memset(*request, 0, sizeof(struct ufs_query_req));
2673         memset(*response, 0, sizeof(struct ufs_query_res));
2674         (*request)->upiu_req.opcode = opcode;
2675         (*request)->upiu_req.idn = idn;
2676         (*request)->upiu_req.index = index;
2677         (*request)->upiu_req.selector = selector;
2678 }
2679
2680 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2681         enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2682 {
2683         int ret;
2684         int retries;
2685
2686         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2687                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2688                 if (ret)
2689                         dev_dbg(hba->dev,
2690                                 "%s: failed with error %d, retries %d\n",
2691                                 __func__, ret, retries);
2692                 else
2693                         break;
2694         }
2695
2696         if (ret)
2697                 dev_err(hba->dev,
2698                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2699                         __func__, opcode, idn, ret, retries);
2700         return ret;
2701 }
2702
2703 /**
2704  * ufshcd_query_flag() - API function for sending flag query requests
2705  * @hba: per-adapter instance
2706  * @opcode: flag query to perform
2707  * @idn: flag idn to access
2708  * @flag_res: the flag value after the query request completes
2709  *
2710  * Returns 0 for success, non-zero in case of failure
2711  */
2712 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2713                         enum flag_idn idn, bool *flag_res)
2714 {
2715         struct ufs_query_req *request = NULL;
2716         struct ufs_query_res *response = NULL;
2717         int err, index = 0, selector = 0;
2718         int timeout = QUERY_REQ_TIMEOUT;
2719
2720         BUG_ON(!hba);
2721
2722         ufshcd_hold(hba, false);
2723         mutex_lock(&hba->dev_cmd.lock);
2724         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2725                         selector);
2726
2727         switch (opcode) {
2728         case UPIU_QUERY_OPCODE_SET_FLAG:
2729         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2730         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2731                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2732                 break;
2733         case UPIU_QUERY_OPCODE_READ_FLAG:
2734                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2735                 if (!flag_res) {
2736                         /* No dummy reads */
2737                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2738                                         __func__);
2739                         err = -EINVAL;
2740                         goto out_unlock;
2741                 }
2742                 break;
2743         default:
2744                 dev_err(hba->dev,
2745                         "%s: Expected query flag opcode but got = %d\n",
2746                         __func__, opcode);
2747                 err = -EINVAL;
2748                 goto out_unlock;
2749         }
2750
2751         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2752
2753         if (err) {
2754                 dev_err(hba->dev,
2755                         "%s: Sending flag query for idn %d failed, err = %d\n",
2756                         __func__, idn, err);
2757                 goto out_unlock;
2758         }
2759
2760         if (flag_res)
2761                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2762                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2763
2764 out_unlock:
2765         mutex_unlock(&hba->dev_cmd.lock);
2766         ufshcd_release(hba);
2767         return err;
2768 }
2769
2770 /**
2771  * ufshcd_query_attr - API function for sending attribute requests
2772  * @hba: per-adapter instance
2773  * @opcode: attribute opcode
2774  * @idn: attribute idn to access
2775  * @index: index field
2776  * @selector: selector field
2777  * @attr_val: the attribute value after the query request completes
2778  *
2779  * Returns 0 for success, non-zero in case of failure
2780 */
2781 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2782                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2783 {
2784         struct ufs_query_req *request = NULL;
2785         struct ufs_query_res *response = NULL;
2786         int err;
2787
2788         BUG_ON(!hba);
2789
2790         ufshcd_hold(hba, false);
2791         if (!attr_val) {
2792                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2793                                 __func__, opcode);
2794                 err = -EINVAL;
2795                 goto out;
2796         }
2797
2798         mutex_lock(&hba->dev_cmd.lock);
2799         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2800                         selector);
2801
2802         switch (opcode) {
2803         case UPIU_QUERY_OPCODE_WRITE_ATTR:
2804                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2805                 request->upiu_req.value = cpu_to_be32(*attr_val);
2806                 break;
2807         case UPIU_QUERY_OPCODE_READ_ATTR:
2808                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2809                 break;
2810         default:
2811                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2812                                 __func__, opcode);
2813                 err = -EINVAL;
2814                 goto out_unlock;
2815         }
2816
2817         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2818
2819         if (err) {
2820                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2821                                 __func__, opcode, idn, index, err);
2822                 goto out_unlock;
2823         }
2824
2825         *attr_val = be32_to_cpu(response->upiu_res.value);
2826
2827 out_unlock:
2828         mutex_unlock(&hba->dev_cmd.lock);
2829 out:
2830         ufshcd_release(hba);
2831         return err;
2832 }
2833
2834 /**
2835  * ufshcd_query_attr_retry() - API function for sending query
2836  * attribute with retries
2837  * @hba: per-adapter instance
2838  * @opcode: attribute opcode
2839  * @idn: attribute idn to access
2840  * @index: index field
2841  * @selector: selector field
2842  * @attr_val: the attribute value after the query request
2843  * completes
2844  *
2845  * Returns 0 for success, non-zero in case of failure
2846 */
2847 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2848         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2849         u32 *attr_val)
2850 {
2851         int ret = 0;
2852         u32 retries;
2853
2854          for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2855                 ret = ufshcd_query_attr(hba, opcode, idn, index,
2856                                                 selector, attr_val);
2857                 if (ret)
2858                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2859                                 __func__, ret, retries);
2860                 else
2861                         break;
2862         }
2863
2864         if (ret)
2865                 dev_err(hba->dev,
2866                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2867                         __func__, idn, ret, QUERY_REQ_RETRIES);
2868         return ret;
2869 }
2870
2871 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2872                         enum query_opcode opcode, enum desc_idn idn, u8 index,
2873                         u8 selector, u8 *desc_buf, int *buf_len)
2874 {
2875         struct ufs_query_req *request = NULL;
2876         struct ufs_query_res *response = NULL;
2877         int err;
2878
2879         BUG_ON(!hba);
2880
2881         ufshcd_hold(hba, false);
2882         if (!desc_buf) {
2883                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2884                                 __func__, opcode);
2885                 err = -EINVAL;
2886                 goto out;
2887         }
2888
2889         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2890                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2891                                 __func__, *buf_len);
2892                 err = -EINVAL;
2893                 goto out;
2894         }
2895
2896         mutex_lock(&hba->dev_cmd.lock);
2897         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2898                         selector);
2899         hba->dev_cmd.query.descriptor = desc_buf;
2900         request->upiu_req.length = cpu_to_be16(*buf_len);
2901
2902         switch (opcode) {
2903         case UPIU_QUERY_OPCODE_WRITE_DESC:
2904                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2905                 break;
2906         case UPIU_QUERY_OPCODE_READ_DESC:
2907                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2908                 break;
2909         default:
2910                 dev_err(hba->dev,
2911                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2912                                 __func__, opcode);
2913                 err = -EINVAL;
2914                 goto out_unlock;
2915         }
2916
2917         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2918
2919         if (err) {
2920                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2921                                 __func__, opcode, idn, index, err);
2922                 goto out_unlock;
2923         }
2924
2925         *buf_len = be16_to_cpu(response->upiu_res.length);
2926
2927 out_unlock:
2928         hba->dev_cmd.query.descriptor = NULL;
2929         mutex_unlock(&hba->dev_cmd.lock);
2930 out:
2931         ufshcd_release(hba);
2932         return err;
2933 }
2934
2935 /**
2936  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
2937  * @hba: per-adapter instance
2938  * @opcode: attribute opcode
2939  * @idn: attribute idn to access
2940  * @index: index field
2941  * @selector: selector field
2942  * @desc_buf: the buffer that contains the descriptor
2943  * @buf_len: length parameter passed to the device
2944  *
2945  * Returns 0 for success, non-zero in case of failure.
2946  * The buf_len parameter will contain, on return, the length parameter
2947  * received on the response.
2948  */
2949 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2950                                   enum query_opcode opcode,
2951                                   enum desc_idn idn, u8 index,
2952                                   u8 selector,
2953                                   u8 *desc_buf, int *buf_len)
2954 {
2955         int err;
2956         int retries;
2957
2958         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2959                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2960                                                 selector, desc_buf, buf_len);
2961                 if (!err || err == -EINVAL)
2962                         break;
2963         }
2964
2965         return err;
2966 }
2967
2968 /**
2969  * ufshcd_read_desc_length - read the specified descriptor length from header
2970  * @hba: Pointer to adapter instance
2971  * @desc_id: descriptor idn value
2972  * @desc_index: descriptor index
2973  * @desc_length: pointer to variable to read the length of descriptor
2974  *
2975  * Return 0 in case of success, non-zero otherwise
2976  */
2977 static int ufshcd_read_desc_length(struct ufs_hba *hba,
2978         enum desc_idn desc_id,
2979         int desc_index,
2980         int *desc_length)
2981 {
2982         int ret;
2983         u8 header[QUERY_DESC_HDR_SIZE];
2984         int header_len = QUERY_DESC_HDR_SIZE;
2985
2986         if (desc_id >= QUERY_DESC_IDN_MAX)
2987                 return -EINVAL;
2988
2989         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2990                                         desc_id, desc_index, 0, header,
2991                                         &header_len);
2992
2993         if (ret) {
2994                 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2995                         __func__, desc_id);
2996                 return ret;
2997         } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2998                 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2999                         __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3000                         desc_id);
3001                 ret = -EINVAL;
3002         }
3003
3004         *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3005         return ret;
3006
3007 }
3008
3009 /**
3010  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3011  * @hba: Pointer to adapter instance
3012  * @desc_id: descriptor idn value
3013  * @desc_len: mapped desc length (out)
3014  *
3015  * Return 0 in case of success, non-zero otherwise
3016  */
3017 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3018         enum desc_idn desc_id, int *desc_len)
3019 {
3020         switch (desc_id) {
3021         case QUERY_DESC_IDN_DEVICE:
3022                 *desc_len = hba->desc_size.dev_desc;
3023                 break;
3024         case QUERY_DESC_IDN_POWER:
3025                 *desc_len = hba->desc_size.pwr_desc;
3026                 break;
3027         case QUERY_DESC_IDN_GEOMETRY:
3028                 *desc_len = hba->desc_size.geom_desc;
3029                 break;
3030         case QUERY_DESC_IDN_CONFIGURATION:
3031                 *desc_len = hba->desc_size.conf_desc;
3032                 break;
3033         case QUERY_DESC_IDN_UNIT:
3034                 *desc_len = hba->desc_size.unit_desc;
3035                 break;
3036         case QUERY_DESC_IDN_INTERCONNECT:
3037                 *desc_len = hba->desc_size.interc_desc;
3038                 break;
3039         case QUERY_DESC_IDN_STRING:
3040                 *desc_len = QUERY_DESC_MAX_SIZE;
3041                 break;
3042         case QUERY_DESC_IDN_HEALTH:
3043                 *desc_len = hba->desc_size.hlth_desc;
3044                 break;
3045         case QUERY_DESC_IDN_RFU_0:
3046         case QUERY_DESC_IDN_RFU_1:
3047                 *desc_len = 0;
3048                 break;
3049         default:
3050                 *desc_len = 0;
3051                 return -EINVAL;
3052         }
3053         return 0;
3054 }
3055 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3056
3057 /**
3058  * ufshcd_read_desc_param - read the specified descriptor parameter
3059  * @hba: Pointer to adapter instance
3060  * @desc_id: descriptor idn value
3061  * @desc_index: descriptor index
3062  * @param_offset: offset of the parameter to read
3063  * @param_read_buf: pointer to buffer where parameter would be read
3064  * @param_size: sizeof(param_read_buf)
3065  *
3066  * Return 0 in case of success, non-zero otherwise
3067  */
3068 int ufshcd_read_desc_param(struct ufs_hba *hba,
3069                            enum desc_idn desc_id,
3070                            int desc_index,
3071                            u8 param_offset,
3072                            u8 *param_read_buf,
3073                            u8 param_size)
3074 {
3075         int ret;
3076         u8 *desc_buf;
3077         int buff_len;
3078         bool is_kmalloc = true;
3079
3080         /* Safety check */
3081         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3082                 return -EINVAL;
3083
3084         /* Get the max length of descriptor from structure filled up at probe
3085          * time.
3086          */
3087         ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3088
3089         /* Sanity checks */
3090         if (ret || !buff_len) {
3091                 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3092                         __func__);
3093                 return ret;
3094         }
3095
3096         /* Check whether we need temp memory */
3097         if (param_offset != 0 || param_size < buff_len) {
3098                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3099                 if (!desc_buf)
3100                         return -ENOMEM;
3101         } else {
3102                 desc_buf = param_read_buf;
3103                 is_kmalloc = false;
3104         }
3105
3106         /* Request for full descriptor */
3107         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3108                                         desc_id, desc_index, 0,
3109                                         desc_buf, &buff_len);
3110
3111         if (ret) {
3112                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3113                         __func__, desc_id, desc_index, param_offset, ret);
3114                 goto out;
3115         }
3116
3117         /* Sanity check */
3118         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3119                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3120                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3121                 ret = -EINVAL;
3122                 goto out;
3123         }
3124
3125         /* Check wherher we will not copy more data, than available */
3126         if (is_kmalloc && param_size > buff_len)
3127                 param_size = buff_len;
3128
3129         if (is_kmalloc)
3130                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3131 out:
3132         if (is_kmalloc)
3133                 kfree(desc_buf);
3134         return ret;
3135 }
3136
3137 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3138                                    enum desc_idn desc_id,
3139                                    int desc_index,
3140                                    void *buf,
3141                                    u32 size)
3142 {
3143         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3144 }
3145
3146 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3147                                          u8 *buf,
3148                                          u32 size)
3149 {
3150         return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3151 }
3152
3153 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3154 {
3155         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3156 }
3157
3158 /**
3159  * struct uc_string_id - unicode string
3160  *
3161  * @len: size of this descriptor inclusive
3162  * @type: descriptor type
3163  * @uc: unicode string character
3164  */
3165 struct uc_string_id {
3166         u8 len;
3167         u8 type;
3168         wchar_t uc[0];
3169 } __packed;
3170
3171 /* replace non-printable or non-ASCII characters with spaces */
3172 static inline char ufshcd_remove_non_printable(u8 ch)
3173 {
3174         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3175 }
3176
3177 /**
3178  * ufshcd_read_string_desc - read string descriptor
3179  * @hba: pointer to adapter instance
3180  * @desc_index: descriptor index
3181  * @buf: pointer to buffer where descriptor would be read,
3182  *       the caller should free the memory.
3183  * @ascii: if true convert from unicode to ascii characters
3184  *         null terminated string.
3185  *
3186  * Return:
3187  * *      string size on success.
3188  * *      -ENOMEM: on allocation failure
3189  * *      -EINVAL: on a wrong parameter
3190  */
3191 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3192                             u8 **buf, bool ascii)
3193 {
3194         struct uc_string_id *uc_str;
3195         u8 *str;
3196         int ret;
3197
3198         if (!buf)
3199                 return -EINVAL;
3200
3201         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3202         if (!uc_str)
3203                 return -ENOMEM;
3204
3205         ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3206                                desc_index, uc_str,
3207                                QUERY_DESC_MAX_SIZE);
3208         if (ret < 0) {
3209                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3210                         QUERY_REQ_RETRIES, ret);
3211                 str = NULL;
3212                 goto out;
3213         }
3214
3215         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3216                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3217                 str = NULL;
3218                 ret = 0;
3219                 goto out;
3220         }
3221
3222         if (ascii) {
3223                 ssize_t ascii_len;
3224                 int i;
3225                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3226                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3227                 str = kzalloc(ascii_len, GFP_KERNEL);
3228                 if (!str) {
3229                         ret = -ENOMEM;
3230                         goto out;
3231                 }
3232
3233                 /*
3234                  * the descriptor contains string in UTF16 format
3235                  * we need to convert to utf-8 so it can be displayed
3236                  */
3237                 ret = utf16s_to_utf8s(uc_str->uc,
3238                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3239                                       UTF16_BIG_ENDIAN, str, ascii_len);
3240
3241                 /* replace non-printable or non-ASCII characters with spaces */
3242                 for (i = 0; i < ret; i++)
3243                         str[i] = ufshcd_remove_non_printable(str[i]);
3244
3245                 str[ret++] = '\0';
3246
3247         } else {
3248                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3249                 if (!str) {
3250                         ret = -ENOMEM;
3251                         goto out;
3252                 }
3253                 ret = uc_str->len;
3254         }
3255 out:
3256         *buf = str;
3257         kfree(uc_str);
3258         return ret;
3259 }
3260
3261 /**
3262  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3263  * @hba: Pointer to adapter instance
3264  * @lun: lun id
3265  * @param_offset: offset of the parameter to read
3266  * @param_read_buf: pointer to buffer where parameter would be read
3267  * @param_size: sizeof(param_read_buf)
3268  *
3269  * Return 0 in case of success, non-zero otherwise
3270  */
3271 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3272                                               int lun,
3273                                               enum unit_desc_param param_offset,
3274                                               u8 *param_read_buf,
3275                                               u32 param_size)
3276 {
3277         /*
3278          * Unit descriptors are only available for general purpose LUs (LUN id
3279          * from 0 to 7) and RPMB Well known LU.
3280          */
3281         if (!ufs_is_valid_unit_desc_lun(lun))
3282                 return -EOPNOTSUPP;
3283
3284         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3285                                       param_offset, param_read_buf, param_size);
3286 }
3287
3288 /**
3289  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3290  * @hba: per adapter instance
3291  *
3292  * 1. Allocate DMA memory for Command Descriptor array
3293  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3294  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3295  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3296  *      (UTMRDL)
3297  * 4. Allocate memory for local reference block(lrb).
3298  *
3299  * Returns 0 for success, non-zero in case of failure
3300  */
3301 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3302 {
3303         size_t utmrdl_size, utrdl_size, ucdl_size;
3304
3305         /* Allocate memory for UTP command descriptors */
3306         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3307         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3308                                                   ucdl_size,
3309                                                   &hba->ucdl_dma_addr,
3310                                                   GFP_KERNEL);
3311
3312         /*
3313          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3314          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3315          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3316          * be aligned to 128 bytes as well
3317          */
3318         if (!hba->ucdl_base_addr ||
3319             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3320                 dev_err(hba->dev,
3321                         "Command Descriptor Memory allocation failed\n");
3322                 goto out;
3323         }
3324
3325         /*
3326          * Allocate memory for UTP Transfer descriptors
3327          * UFSHCI requires 1024 byte alignment of UTRD
3328          */
3329         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3330         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3331                                                    utrdl_size,
3332                                                    &hba->utrdl_dma_addr,
3333                                                    GFP_KERNEL);
3334         if (!hba->utrdl_base_addr ||
3335             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3336                 dev_err(hba->dev,
3337                         "Transfer Descriptor Memory allocation failed\n");
3338                 goto out;
3339         }
3340
3341         /*
3342          * Allocate memory for UTP Task Management descriptors
3343          * UFSHCI requires 1024 byte alignment of UTMRD
3344          */
3345         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3346         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3347                                                     utmrdl_size,
3348                                                     &hba->utmrdl_dma_addr,
3349                                                     GFP_KERNEL);
3350         if (!hba->utmrdl_base_addr ||
3351             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3352                 dev_err(hba->dev,
3353                 "Task Management Descriptor Memory allocation failed\n");
3354                 goto out;
3355         }
3356
3357         /* Allocate memory for local reference block */
3358         hba->lrb = devm_kcalloc(hba->dev,
3359                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3360                                 GFP_KERNEL);
3361         if (!hba->lrb) {
3362                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3363                 goto out;
3364         }
3365         return 0;
3366 out:
3367         return -ENOMEM;
3368 }
3369
3370 /**
3371  * ufshcd_host_memory_configure - configure local reference block with
3372  *                              memory offsets
3373  * @hba: per adapter instance
3374  *
3375  * Configure Host memory space
3376  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3377  * address.
3378  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3379  * and PRDT offset.
3380  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3381  * into local reference block.
3382  */
3383 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3384 {
3385         struct utp_transfer_cmd_desc *cmd_descp;
3386         struct utp_transfer_req_desc *utrdlp;
3387         dma_addr_t cmd_desc_dma_addr;
3388         dma_addr_t cmd_desc_element_addr;
3389         u16 response_offset;
3390         u16 prdt_offset;
3391         int cmd_desc_size;
3392         int i;
3393
3394         utrdlp = hba->utrdl_base_addr;
3395         cmd_descp = hba->ucdl_base_addr;
3396
3397         response_offset =
3398                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3399         prdt_offset =
3400                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3401
3402         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3403         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3404
3405         for (i = 0; i < hba->nutrs; i++) {
3406                 /* Configure UTRD with command descriptor base address */
3407                 cmd_desc_element_addr =
3408                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3409                 utrdlp[i].command_desc_base_addr_lo =
3410                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3411                 utrdlp[i].command_desc_base_addr_hi =
3412                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3413
3414                 /* Response upiu and prdt offset should be in double words */
3415                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3416                         utrdlp[i].response_upiu_offset =
3417                                 cpu_to_le16(response_offset);
3418                         utrdlp[i].prd_table_offset =
3419                                 cpu_to_le16(prdt_offset);
3420                         utrdlp[i].response_upiu_length =
3421                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3422                 } else {
3423                         utrdlp[i].response_upiu_offset =
3424                                 cpu_to_le16((response_offset >> 2));
3425                         utrdlp[i].prd_table_offset =
3426                                 cpu_to_le16((prdt_offset >> 2));
3427                         utrdlp[i].response_upiu_length =
3428                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3429                 }
3430
3431                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3432                 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3433                                 (i * sizeof(struct utp_transfer_req_desc));
3434                 hba->lrb[i].ucd_req_ptr =
3435                         (struct utp_upiu_req *)(cmd_descp + i);
3436                 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3437                 hba->lrb[i].ucd_rsp_ptr =
3438                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3439                 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3440                                 response_offset;
3441                 hba->lrb[i].ucd_prdt_ptr =
3442                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3443                 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3444                                 prdt_offset;
3445         }
3446 }
3447
3448 /**
3449  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3450  * @hba: per adapter instance
3451  *
3452  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3453  * in order to initialize the Unipro link startup procedure.
3454  * Once the Unipro links are up, the device connected to the controller
3455  * is detected.
3456  *
3457  * Returns 0 on success, non-zero value on failure
3458  */
3459 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3460 {
3461         struct uic_command uic_cmd = {0};
3462         int ret;
3463
3464         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3465
3466         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3467         if (ret)
3468                 dev_dbg(hba->dev,
3469                         "dme-link-startup: error code %d\n", ret);
3470         return ret;
3471 }
3472 /**
3473  * ufshcd_dme_reset - UIC command for DME_RESET
3474  * @hba: per adapter instance
3475  *
3476  * DME_RESET command is issued in order to reset UniPro stack.
3477  * This function now deal with cold reset.
3478  *
3479  * Returns 0 on success, non-zero value on failure
3480  */
3481 static int ufshcd_dme_reset(struct ufs_hba *hba)
3482 {
3483         struct uic_command uic_cmd = {0};
3484         int ret;
3485
3486         uic_cmd.command = UIC_CMD_DME_RESET;
3487
3488         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3489         if (ret)
3490                 dev_err(hba->dev,
3491                         "dme-reset: error code %d\n", ret);
3492
3493         return ret;
3494 }
3495
3496 /**
3497  * ufshcd_dme_enable - UIC command for DME_ENABLE
3498  * @hba: per adapter instance
3499  *
3500  * DME_ENABLE command is issued in order to enable UniPro stack.
3501  *
3502  * Returns 0 on success, non-zero value on failure
3503  */
3504 static int ufshcd_dme_enable(struct ufs_hba *hba)
3505 {
3506         struct uic_command uic_cmd = {0};
3507         int ret;
3508
3509         uic_cmd.command = UIC_CMD_DME_ENABLE;
3510
3511         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3512         if (ret)
3513                 dev_err(hba->dev,
3514                         "dme-reset: error code %d\n", ret);
3515
3516         return ret;
3517 }
3518
3519 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3520 {
3521         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3522         unsigned long min_sleep_time_us;
3523
3524         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3525                 return;
3526
3527         /*
3528          * last_dme_cmd_tstamp will be 0 only for 1st call to
3529          * this function
3530          */
3531         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3532                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3533         } else {
3534                 unsigned long delta =
3535                         (unsigned long) ktime_to_us(
3536                                 ktime_sub(ktime_get(),
3537                                 hba->last_dme_cmd_tstamp));
3538
3539                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3540                         min_sleep_time_us =
3541                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3542                 else
3543                         return; /* no more delay required */
3544         }
3545
3546         /* allow sleep for extra 50us if needed */
3547         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3548 }
3549
3550 /**
3551  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3552  * @hba: per adapter instance
3553  * @attr_sel: uic command argument1
3554  * @attr_set: attribute set type as uic command argument2
3555  * @mib_val: setting value as uic command argument3
3556  * @peer: indicate whether peer or local
3557  *
3558  * Returns 0 on success, non-zero value on failure
3559  */
3560 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3561                         u8 attr_set, u32 mib_val, u8 peer)
3562 {
3563         struct uic_command uic_cmd = {0};
3564         static const char *const action[] = {
3565                 "dme-set",
3566                 "dme-peer-set"
3567         };
3568         const char *set = action[!!peer];
3569         int ret;
3570         int retries = UFS_UIC_COMMAND_RETRIES;
3571
3572         uic_cmd.command = peer ?
3573                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3574         uic_cmd.argument1 = attr_sel;
3575         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3576         uic_cmd.argument3 = mib_val;
3577
3578         do {
3579                 /* for peer attributes we retry upon failure */
3580                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3581                 if (ret)
3582                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3583                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3584         } while (ret && peer && --retries);
3585
3586         if (ret)
3587                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3588                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3589                         UFS_UIC_COMMAND_RETRIES - retries);
3590
3591         return ret;
3592 }
3593 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3594
3595 /**
3596  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3597  * @hba: per adapter instance
3598  * @attr_sel: uic command argument1
3599  * @mib_val: the value of the attribute as returned by the UIC command
3600  * @peer: indicate whether peer or local
3601  *
3602  * Returns 0 on success, non-zero value on failure
3603  */
3604 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3605                         u32 *mib_val, u8 peer)
3606 {
3607         struct uic_command uic_cmd = {0};
3608         static const char *const action[] = {
3609                 "dme-get",
3610                 "dme-peer-get"
3611         };
3612         const char *get = action[!!peer];
3613         int ret;
3614         int retries = UFS_UIC_COMMAND_RETRIES;
3615         struct ufs_pa_layer_attr orig_pwr_info;
3616         struct ufs_pa_layer_attr temp_pwr_info;
3617         bool pwr_mode_change = false;
3618
3619         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3620                 orig_pwr_info = hba->pwr_info;
3621                 temp_pwr_info = orig_pwr_info;
3622
3623                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3624                     orig_pwr_info.pwr_rx == FAST_MODE) {
3625                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3626                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3627                         pwr_mode_change = true;
3628                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3629                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3630                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3631                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3632                         pwr_mode_change = true;
3633                 }
3634                 if (pwr_mode_change) {
3635                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3636                         if (ret)
3637                                 goto out;
3638                 }
3639         }
3640
3641         uic_cmd.command = peer ?
3642                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3643         uic_cmd.argument1 = attr_sel;
3644
3645         do {
3646                 /* for peer attributes we retry upon failure */
3647                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3648                 if (ret)
3649                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3650                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3651         } while (ret && peer && --retries);
3652
3653         if (ret)
3654                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3655                         get, UIC_GET_ATTR_ID(attr_sel),
3656                         UFS_UIC_COMMAND_RETRIES - retries);
3657
3658         if (mib_val && !ret)
3659                 *mib_val = uic_cmd.argument3;
3660
3661         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3662             && pwr_mode_change)
3663                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3664 out:
3665         return ret;
3666 }
3667 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3668
3669 /**
3670  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3671  * state) and waits for it to take effect.
3672  *
3673  * @hba: per adapter instance
3674  * @cmd: UIC command to execute
3675  *
3676  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3677  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3678  * and device UniPro link and hence it's final completion would be indicated by
3679  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3680  * addition to normal UIC command completion Status (UCCS). This function only
3681  * returns after the relevant status bits indicate the completion.
3682  *
3683  * Returns 0 on success, non-zero value on failure
3684  */
3685 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3686 {
3687         struct completion uic_async_done;
3688         unsigned long flags;
3689         u8 status;
3690         int ret;
3691         bool reenable_intr = false;
3692
3693         mutex_lock(&hba->uic_cmd_mutex);
3694         init_completion(&uic_async_done);
3695         ufshcd_add_delay_before_dme_cmd(hba);
3696
3697         spin_lock_irqsave(hba->host->host_lock, flags);
3698         hba->uic_async_done = &uic_async_done;
3699         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3700                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3701                 /*
3702                  * Make sure UIC command completion interrupt is disabled before
3703                  * issuing UIC command.
3704                  */
3705                 wmb();
3706                 reenable_intr = true;
3707         }
3708         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3709         spin_unlock_irqrestore(hba->host->host_lock, flags);
3710         if (ret) {
3711                 dev_err(hba->dev,
3712                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3713                         cmd->command, cmd->argument3, ret);
3714                 goto out;
3715         }
3716
3717         if (!wait_for_completion_timeout(hba->uic_async_done,
3718                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3719                 dev_err(hba->dev,
3720                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3721                         cmd->command, cmd->argument3);
3722                 ret = -ETIMEDOUT;
3723                 goto out;
3724         }
3725
3726         status = ufshcd_get_upmcrs(hba);
3727         if (status != PWR_LOCAL) {
3728                 dev_err(hba->dev,
3729                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3730                         cmd->command, status);
3731                 ret = (status != PWR_OK) ? status : -1;
3732         }
3733 out:
3734         if (ret) {
3735                 ufshcd_print_host_state(hba);
3736                 ufshcd_print_pwr_info(hba);
3737                 ufshcd_print_host_regs(hba);
3738         }
3739
3740         spin_lock_irqsave(hba->host->host_lock, flags);
3741         hba->active_uic_cmd = NULL;
3742         hba->uic_async_done = NULL;
3743         if (reenable_intr)
3744                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3745         spin_unlock_irqrestore(hba->host->host_lock, flags);
3746         mutex_unlock(&hba->uic_cmd_mutex);
3747
3748         return ret;
3749 }
3750
3751 /**
3752  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3753  *                              using DME_SET primitives.
3754  * @hba: per adapter instance
3755  * @mode: powr mode value
3756  *
3757  * Returns 0 on success, non-zero value on failure
3758  */
3759 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3760 {
3761         struct uic_command uic_cmd = {0};
3762         int ret;
3763
3764         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3765                 ret = ufshcd_dme_set(hba,
3766                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3767                 if (ret) {
3768                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3769                                                 __func__, ret);
3770                         goto out;
3771                 }
3772         }
3773
3774         uic_cmd.command = UIC_CMD_DME_SET;
3775         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3776         uic_cmd.argument3 = mode;
3777         ufshcd_hold(hba, false);
3778         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3779         ufshcd_release(hba);
3780
3781 out:
3782         return ret;
3783 }
3784
3785 static int ufshcd_link_recovery(struct ufs_hba *hba)
3786 {
3787         int ret;
3788         unsigned long flags;
3789
3790         spin_lock_irqsave(hba->host->host_lock, flags);
3791         hba->ufshcd_state = UFSHCD_STATE_RESET;
3792         ufshcd_set_eh_in_progress(hba);
3793         spin_unlock_irqrestore(hba->host->host_lock, flags);
3794
3795         /* Reset the attached device */
3796         ufshcd_vops_device_reset(hba);
3797
3798         ret = ufshcd_host_reset_and_restore(hba);
3799
3800         spin_lock_irqsave(hba->host->host_lock, flags);
3801         if (ret)
3802                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3803         ufshcd_clear_eh_in_progress(hba);
3804         spin_unlock_irqrestore(hba->host->host_lock, flags);
3805
3806         if (ret)
3807                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3808                         __func__, ret);
3809
3810         return ret;
3811 }
3812
3813 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3814 {
3815         int ret;
3816         struct uic_command uic_cmd = {0};
3817         ktime_t start = ktime_get();
3818
3819         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3820
3821         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3822         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3823         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3824                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3825
3826         if (ret) {
3827                 int err;
3828
3829                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3830                         __func__, ret);
3831
3832                 /*
3833                  * If link recovery fails then return error code returned from
3834                  * ufshcd_link_recovery().
3835                  * If link recovery succeeds then return -EAGAIN to attempt
3836                  * hibern8 enter retry again.
3837                  */
3838                 err = ufshcd_link_recovery(hba);
3839                 if (err) {
3840                         dev_err(hba->dev, "%s: link recovery failed", __func__);
3841                         ret = err;
3842                 } else {
3843                         ret = -EAGAIN;
3844                 }
3845         } else
3846                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3847                                                                 POST_CHANGE);
3848
3849         return ret;
3850 }
3851
3852 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3853 {
3854         int ret = 0, retries;
3855
3856         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3857                 ret = __ufshcd_uic_hibern8_enter(hba);
3858                 if (!ret)
3859                         goto out;
3860         }
3861 out:
3862         return ret;
3863 }
3864
3865 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3866 {
3867         struct uic_command uic_cmd = {0};
3868         int ret;
3869         ktime_t start = ktime_get();
3870
3871         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3872
3873         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3874         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3875         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3876                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3877
3878         if (ret) {
3879                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3880                         __func__, ret);
3881                 ret = ufshcd_link_recovery(hba);
3882         } else {
3883                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3884                                                                 POST_CHANGE);
3885                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3886                 hba->ufs_stats.hibern8_exit_cnt++;
3887         }
3888
3889         return ret;
3890 }
3891
3892 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3893 {
3894         unsigned long flags;
3895
3896         if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
3897                 return;
3898
3899         spin_lock_irqsave(hba->host->host_lock, flags);
3900         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3901         spin_unlock_irqrestore(hba->host->host_lock, flags);
3902 }
3903
3904  /**
3905  * ufshcd_init_pwr_info - setting the POR (power on reset)
3906  * values in hba power info
3907  * @hba: per-adapter instance
3908  */
3909 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3910 {
3911         hba->pwr_info.gear_rx = UFS_PWM_G1;
3912         hba->pwr_info.gear_tx = UFS_PWM_G1;
3913         hba->pwr_info.lane_rx = 1;
3914         hba->pwr_info.lane_tx = 1;
3915         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3916         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3917         hba->pwr_info.hs_rate = 0;
3918 }
3919
3920 /**
3921  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3922  * @hba: per-adapter instance
3923  */
3924 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3925 {
3926         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3927
3928         if (hba->max_pwr_info.is_valid)
3929                 return 0;
3930
3931         pwr_info->pwr_tx = FAST_MODE;
3932         pwr_info->pwr_rx = FAST_MODE;
3933         pwr_info->hs_rate = PA_HS_MODE_B;
3934
3935         /* Get the connected lane count */
3936         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3937                         &pwr_info->lane_rx);
3938         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3939                         &pwr_info->lane_tx);
3940
3941         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3942                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3943                                 __func__,
3944                                 pwr_info->lane_rx,
3945                                 pwr_info->lane_tx);
3946                 return -EINVAL;
3947         }
3948
3949         /*
3950          * First, get the maximum gears of HS speed.
3951          * If a zero value, it means there is no HSGEAR capability.
3952          * Then, get the maximum gears of PWM speed.
3953          */
3954         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3955         if (!pwr_info->gear_rx) {
3956                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3957                                 &pwr_info->gear_rx);
3958                 if (!pwr_info->gear_rx) {
3959                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3960                                 __func__, pwr_info->gear_rx);
3961                         return -EINVAL;
3962                 }
3963                 pwr_info->pwr_rx = SLOW_MODE;
3964         }
3965
3966         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3967                         &pwr_info->gear_tx);
3968         if (!pwr_info->gear_tx) {
3969                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3970                                 &pwr_info->gear_tx);
3971                 if (!pwr_info->gear_tx) {
3972                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3973                                 __func__, pwr_info->gear_tx);
3974                         return -EINVAL;
3975                 }
3976                 pwr_info->pwr_tx = SLOW_MODE;
3977         }
3978
3979         hba->max_pwr_info.is_valid = true;
3980         return 0;
3981 }
3982
3983 static int ufshcd_change_power_mode(struct ufs_hba *hba,
3984                              struct ufs_pa_layer_attr *pwr_mode)
3985 {
3986         int ret;
3987
3988         /* if already configured to the requested pwr_mode */
3989         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3990             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3991             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3992             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3993             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3994             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3995             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3996                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3997                 return 0;
3998         }
3999
4000         /*
4001          * Configure attributes for power mode change with below.
4002          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4003          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4004          * - PA_HSSERIES
4005          */
4006         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4007         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4008                         pwr_mode->lane_rx);
4009         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4010                         pwr_mode->pwr_rx == FAST_MODE)
4011                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4012         else
4013                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4014
4015         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4016         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4017                         pwr_mode->lane_tx);
4018         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4019                         pwr_mode->pwr_tx == FAST_MODE)
4020                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4021         else
4022                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4023
4024         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4025             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4026             pwr_mode->pwr_rx == FAST_MODE ||
4027             pwr_mode->pwr_tx == FAST_MODE)
4028                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4029                                                 pwr_mode->hs_rate);
4030
4031         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4032                         DL_FC0ProtectionTimeOutVal_Default);
4033         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4034                         DL_TC0ReplayTimeOutVal_Default);
4035         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4036                         DL_AFC0ReqTimeOutVal_Default);
4037         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4038                         DL_FC1ProtectionTimeOutVal_Default);
4039         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4040                         DL_TC1ReplayTimeOutVal_Default);
4041         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4042                         DL_AFC1ReqTimeOutVal_Default);
4043
4044         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4045                         DL_FC0ProtectionTimeOutVal_Default);
4046         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4047                         DL_TC0ReplayTimeOutVal_Default);
4048         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4049                         DL_AFC0ReqTimeOutVal_Default);
4050
4051         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4052                         | pwr_mode->pwr_tx);
4053
4054         if (ret) {
4055                 dev_err(hba->dev,
4056                         "%s: power mode change failed %d\n", __func__, ret);
4057         } else {
4058                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4059                                                                 pwr_mode);
4060
4061                 memcpy(&hba->pwr_info, pwr_mode,
4062                         sizeof(struct ufs_pa_layer_attr));
4063         }
4064
4065         return ret;
4066 }
4067
4068 /**
4069  * ufshcd_config_pwr_mode - configure a new power mode
4070  * @hba: per-adapter instance
4071  * @desired_pwr_mode: desired power configuration
4072  */
4073 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4074                 struct ufs_pa_layer_attr *desired_pwr_mode)
4075 {
4076         struct ufs_pa_layer_attr final_params = { 0 };
4077         int ret;
4078
4079         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4080                                         desired_pwr_mode, &final_params);
4081
4082         if (ret)
4083                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4084
4085         ret = ufshcd_change_power_mode(hba, &final_params);
4086         if (!ret)
4087                 ufshcd_print_pwr_info(hba);
4088
4089         return ret;
4090 }
4091 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4092
4093 /**
4094  * ufshcd_complete_dev_init() - checks device readiness
4095  * @hba: per-adapter instance
4096  *
4097  * Set fDeviceInit flag and poll until device toggles it.
4098  */
4099 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4100 {
4101         int i;
4102         int err;
4103         bool flag_res = 1;
4104
4105         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4106                 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4107         if (err) {
4108                 dev_err(hba->dev,
4109                         "%s setting fDeviceInit flag failed with error %d\n",
4110                         __func__, err);
4111                 goto out;
4112         }
4113
4114         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4115         for (i = 0; i < 1000 && !err && flag_res; i++)
4116                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4117                         QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4118
4119         if (err)
4120                 dev_err(hba->dev,
4121                         "%s reading fDeviceInit flag failed with error %d\n",
4122                         __func__, err);
4123         else if (flag_res)
4124                 dev_err(hba->dev,
4125                         "%s fDeviceInit was not cleared by the device\n",
4126                         __func__);
4127
4128 out:
4129         return err;
4130 }
4131
4132 /**
4133  * ufshcd_make_hba_operational - Make UFS controller operational
4134  * @hba: per adapter instance
4135  *
4136  * To bring UFS host controller to operational state,
4137  * 1. Enable required interrupts
4138  * 2. Configure interrupt aggregation
4139  * 3. Program UTRL and UTMRL base address
4140  * 4. Configure run-stop-registers
4141  *
4142  * Returns 0 on success, non-zero value on failure
4143  */
4144 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4145 {
4146         int err = 0;
4147         u32 reg;
4148
4149         /* Enable required interrupts */
4150         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4151
4152         /* Configure interrupt aggregation */
4153         if (ufshcd_is_intr_aggr_allowed(hba))
4154                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4155         else
4156                 ufshcd_disable_intr_aggr(hba);
4157
4158         /* Configure UTRL and UTMRL base address registers */
4159         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4160                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4161         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4162                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4163         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4164                         REG_UTP_TASK_REQ_LIST_BASE_L);
4165         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4166                         REG_UTP_TASK_REQ_LIST_BASE_H);
4167
4168         /*
4169          * Make sure base address and interrupt setup are updated before
4170          * enabling the run/stop registers below.
4171          */
4172         wmb();
4173
4174         /*
4175          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4176          */
4177         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4178         if (!(ufshcd_get_lists_status(reg))) {
4179                 ufshcd_enable_run_stop_reg(hba);
4180         } else {
4181                 dev_err(hba->dev,
4182                         "Host controller not ready to process requests");
4183                 err = -EIO;
4184                 goto out;
4185         }
4186
4187 out:
4188         return err;
4189 }
4190
4191 /**
4192  * ufshcd_hba_stop - Send controller to reset state
4193  * @hba: per adapter instance
4194  * @can_sleep: perform sleep or just spin
4195  */
4196 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4197 {
4198         int err;
4199
4200         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4201         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4202                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4203                                         10, 1, can_sleep);
4204         if (err)
4205                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4206 }
4207
4208 /**
4209  * ufshcd_hba_execute_hce - initialize the controller
4210  * @hba: per adapter instance
4211  *
4212  * The controller resets itself and controller firmware initialization
4213  * sequence kicks off. When controller is ready it will set
4214  * the Host Controller Enable bit to 1.
4215  *
4216  * Returns 0 on success, non-zero value on failure
4217  */
4218 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4219 {
4220         int retry;
4221
4222         if (!ufshcd_is_hba_active(hba))
4223                 /* change controller state to "reset state" */
4224                 ufshcd_hba_stop(hba, true);
4225
4226         /* UniPro link is disabled at this point */
4227         ufshcd_set_link_off(hba);
4228
4229         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4230
4231         /* start controller initialization sequence */
4232         ufshcd_hba_start(hba);
4233
4234         /*
4235          * To initialize a UFS host controller HCE bit must be set to 1.
4236          * During initialization the HCE bit value changes from 1->0->1.
4237          * When the host controller completes initialization sequence
4238          * it sets the value of HCE bit to 1. The same HCE bit is read back
4239          * to check if the controller has completed initialization sequence.
4240          * So without this delay the value HCE = 1, set in the previous
4241          * instruction might be read back.
4242          * This delay can be changed based on the controller.
4243          */
4244         usleep_range(1000, 1100);
4245
4246         /* wait for the host controller to complete initialization */
4247         retry = 10;
4248         while (ufshcd_is_hba_active(hba)) {
4249                 if (retry) {
4250                         retry--;
4251                 } else {
4252                         dev_err(hba->dev,
4253                                 "Controller enable failed\n");
4254                         return -EIO;
4255                 }
4256                 usleep_range(5000, 5100);
4257         }
4258
4259         /* enable UIC related interrupts */
4260         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4261
4262         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4263
4264         return 0;
4265 }
4266
4267 static int ufshcd_hba_enable(struct ufs_hba *hba)
4268 {
4269         int ret;
4270
4271         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4272                 ufshcd_set_link_off(hba);
4273                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4274
4275                 /* enable UIC related interrupts */
4276                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4277                 ret = ufshcd_dme_reset(hba);
4278                 if (!ret) {
4279                         ret = ufshcd_dme_enable(hba);
4280                         if (!ret)
4281                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4282                         if (ret)
4283                                 dev_err(hba->dev,
4284                                         "Host controller enable failed with non-hce\n");
4285                 }
4286         } else {
4287                 ret = ufshcd_hba_execute_hce(hba);
4288         }
4289
4290         return ret;
4291 }
4292 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4293 {
4294         int tx_lanes, i, err = 0;
4295
4296         if (!peer)
4297                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4298                                &tx_lanes);
4299         else
4300                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4301                                     &tx_lanes);
4302         for (i = 0; i < tx_lanes; i++) {
4303                 if (!peer)
4304                         err = ufshcd_dme_set(hba,
4305                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4306                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4307                                         0);
4308                 else
4309                         err = ufshcd_dme_peer_set(hba,
4310                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4311                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4312                                         0);
4313                 if (err) {
4314                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4315                                 __func__, peer, i, err);
4316                         break;
4317                 }
4318         }
4319
4320         return err;
4321 }
4322
4323 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4324 {
4325         return ufshcd_disable_tx_lcc(hba, true);
4326 }
4327
4328 static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4329                                    u32 reg)
4330 {
4331         reg_hist->reg[reg_hist->pos] = reg;
4332         reg_hist->tstamp[reg_hist->pos] = ktime_get();
4333         reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4334 }
4335
4336 /**
4337  * ufshcd_link_startup - Initialize unipro link startup
4338  * @hba: per adapter instance
4339  *
4340  * Returns 0 for success, non-zero in case of failure
4341  */
4342 static int ufshcd_link_startup(struct ufs_hba *hba)
4343 {
4344         int ret;
4345         int retries = DME_LINKSTARTUP_RETRIES;
4346         bool link_startup_again = false;
4347
4348         /*
4349          * If UFS device isn't active then we will have to issue link startup
4350          * 2 times to make sure the device state move to active.
4351          */
4352         if (!ufshcd_is_ufs_dev_active(hba))
4353                 link_startup_again = true;
4354
4355 link_startup:
4356         do {
4357                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4358
4359                 ret = ufshcd_dme_link_startup(hba);
4360
4361                 /* check if device is detected by inter-connect layer */
4362                 if (!ret && !ufshcd_is_device_present(hba)) {
4363                         ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4364                                                0);
4365                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4366                         ret = -ENXIO;
4367                         goto out;
4368                 }
4369
4370                 /*
4371                  * DME link lost indication is only received when link is up,
4372                  * but we can't be sure if the link is up until link startup
4373                  * succeeds. So reset the local Uni-Pro and try again.
4374                  */
4375                 if (ret && ufshcd_hba_enable(hba)) {
4376                         ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4377                                                (u32)ret);
4378                         goto out;
4379                 }
4380         } while (ret && retries--);
4381
4382         if (ret) {
4383                 /* failed to get the link up... retire */
4384                 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4385                                        (u32)ret);
4386                 goto out;
4387         }
4388
4389         if (link_startup_again) {
4390                 link_startup_again = false;
4391                 retries = DME_LINKSTARTUP_RETRIES;
4392                 goto link_startup;
4393         }
4394
4395         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4396         ufshcd_init_pwr_info(hba);
4397         ufshcd_print_pwr_info(hba);
4398
4399         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4400                 ret = ufshcd_disable_device_tx_lcc(hba);
4401                 if (ret)
4402                         goto out;
4403         }
4404
4405         /* Include any host controller configuration via UIC commands */
4406         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4407         if (ret)
4408                 goto out;
4409
4410         ret = ufshcd_make_hba_operational(hba);
4411 out:
4412         if (ret) {
4413                 dev_err(hba->dev, "link startup failed %d\n", ret);
4414                 ufshcd_print_host_state(hba);
4415                 ufshcd_print_pwr_info(hba);
4416                 ufshcd_print_host_regs(hba);
4417         }
4418         return ret;
4419 }
4420
4421 /**
4422  * ufshcd_verify_dev_init() - Verify device initialization
4423  * @hba: per-adapter instance
4424  *
4425  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4426  * device Transport Protocol (UTP) layer is ready after a reset.
4427  * If the UTP layer at the device side is not initialized, it may
4428  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4429  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4430  */
4431 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4432 {
4433         int err = 0;
4434         int retries;
4435
4436         ufshcd_hold(hba, false);
4437         mutex_lock(&hba->dev_cmd.lock);
4438         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4439                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4440                                                NOP_OUT_TIMEOUT);
4441
4442                 if (!err || err == -ETIMEDOUT)
4443                         break;
4444
4445                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4446         }
4447         mutex_unlock(&hba->dev_cmd.lock);
4448         ufshcd_release(hba);
4449
4450         if (err)
4451                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4452         return err;
4453 }
4454
4455 /**
4456  * ufshcd_set_queue_depth - set lun queue depth
4457  * @sdev: pointer to SCSI device
4458  *
4459  * Read bLUQueueDepth value and activate scsi tagged command
4460  * queueing. For WLUN, queue depth is set to 1. For best-effort
4461  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4462  * value that host can queue.
4463  */
4464 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4465 {
4466         int ret = 0;
4467         u8 lun_qdepth;
4468         struct ufs_hba *hba;
4469
4470         hba = shost_priv(sdev->host);
4471
4472         lun_qdepth = hba->nutrs;
4473         ret = ufshcd_read_unit_desc_param(hba,
4474                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4475                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4476                                           &lun_qdepth,
4477                                           sizeof(lun_qdepth));
4478
4479         /* Some WLUN doesn't support unit descriptor */
4480         if (ret == -EOPNOTSUPP)
4481                 lun_qdepth = 1;
4482         else if (!lun_qdepth)
4483                 /* eventually, we can figure out the real queue depth */
4484                 lun_qdepth = hba->nutrs;
4485         else
4486                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4487
4488         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4489                         __func__, lun_qdepth);
4490         scsi_change_queue_depth(sdev, lun_qdepth);
4491 }
4492
4493 /*
4494  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4495  * @hba: per-adapter instance
4496  * @lun: UFS device lun id
4497  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4498  *
4499  * Returns 0 in case of success and b_lu_write_protect status would be returned
4500  * @b_lu_write_protect parameter.
4501  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4502  * Returns -EINVAL in case of invalid parameters passed to this function.
4503  */
4504 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4505                             u8 lun,
4506                             u8 *b_lu_write_protect)
4507 {
4508         int ret;
4509
4510         if (!b_lu_write_protect)
4511                 ret = -EINVAL;
4512         /*
4513          * According to UFS device spec, RPMB LU can't be write
4514          * protected so skip reading bLUWriteProtect parameter for
4515          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4516          */
4517         else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4518                 ret = -ENOTSUPP;
4519         else
4520                 ret = ufshcd_read_unit_desc_param(hba,
4521                                           lun,
4522                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4523                                           b_lu_write_protect,
4524                                           sizeof(*b_lu_write_protect));
4525         return ret;
4526 }
4527
4528 /**
4529  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4530  * status
4531  * @hba: per-adapter instance
4532  * @sdev: pointer to SCSI device
4533  *
4534  */
4535 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4536                                                     struct scsi_device *sdev)
4537 {
4538         if (hba->dev_info.f_power_on_wp_en &&
4539             !hba->dev_info.is_lu_power_on_wp) {
4540                 u8 b_lu_write_protect;
4541
4542                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4543                                       &b_lu_write_protect) &&
4544                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4545                         hba->dev_info.is_lu_power_on_wp = true;
4546         }
4547 }
4548
4549 /**
4550  * ufshcd_slave_alloc - handle initial SCSI device configurations
4551  * @sdev: pointer to SCSI device
4552  *
4553  * Returns success
4554  */
4555 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4556 {
4557         struct ufs_hba *hba;
4558
4559         hba = shost_priv(sdev->host);
4560
4561         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4562         sdev->use_10_for_ms = 1;
4563
4564         /* DBD field should be set to 1 in mode sense(10) */
4565         sdev->set_dbd_for_ms = 1;
4566
4567         /* allow SCSI layer to restart the device in case of errors */
4568         sdev->allow_restart = 1;
4569
4570         /* REPORT SUPPORTED OPERATION CODES is not supported */
4571         sdev->no_report_opcodes = 1;
4572
4573         /* WRITE_SAME command is not supported */
4574         sdev->no_write_same = 1;
4575
4576         ufshcd_set_queue_depth(sdev);
4577
4578         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4579
4580         return 0;
4581 }
4582
4583 /**
4584  * ufshcd_change_queue_depth - change queue depth
4585  * @sdev: pointer to SCSI device
4586  * @depth: required depth to set
4587  *
4588  * Change queue depth and make sure the max. limits are not crossed.
4589  */
4590 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4591 {
4592         struct ufs_hba *hba = shost_priv(sdev->host);
4593
4594         if (depth > hba->nutrs)
4595                 depth = hba->nutrs;
4596         return scsi_change_queue_depth(sdev, depth);
4597 }
4598
4599 /**
4600  * ufshcd_slave_configure - adjust SCSI device configurations
4601  * @sdev: pointer to SCSI device
4602  */
4603 static int ufshcd_slave_configure(struct scsi_device *sdev)
4604 {
4605         struct ufs_hba *hba = shost_priv(sdev->host);
4606         struct request_queue *q = sdev->request_queue;
4607
4608         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4609
4610         if (ufshcd_is_rpm_autosuspend_allowed(hba))
4611                 sdev->rpm_autosuspend = 1;
4612
4613         return 0;
4614 }
4615
4616 /**
4617  * ufshcd_slave_destroy - remove SCSI device configurations
4618  * @sdev: pointer to SCSI device
4619  */
4620 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4621 {
4622         struct ufs_hba *hba;
4623
4624         hba = shost_priv(sdev->host);
4625         /* Drop the reference as it won't be needed anymore */
4626         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4627                 unsigned long flags;
4628
4629                 spin_lock_irqsave(hba->host->host_lock, flags);
4630                 hba->sdev_ufs_device = NULL;
4631                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4632         }
4633 }
4634
4635 /**
4636  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4637  * @lrbp: pointer to local reference block of completed command
4638  * @scsi_status: SCSI command status
4639  *
4640  * Returns value base on SCSI command status
4641  */
4642 static inline int
4643 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4644 {
4645         int result = 0;
4646
4647         switch (scsi_status) {
4648         case SAM_STAT_CHECK_CONDITION:
4649                 ufshcd_copy_sense_data(lrbp);
4650                 /* fallthrough */
4651         case SAM_STAT_GOOD:
4652                 result |= DID_OK << 16 |
4653                           COMMAND_COMPLETE << 8 |
4654                           scsi_status;
4655                 break;
4656         case SAM_STAT_TASK_SET_FULL:
4657         case SAM_STAT_BUSY:
4658         case SAM_STAT_TASK_ABORTED:
4659                 ufshcd_copy_sense_data(lrbp);
4660                 result |= scsi_status;
4661                 break;
4662         default:
4663                 result |= DID_ERROR << 16;
4664                 break;
4665         } /* end of switch */
4666
4667         return result;
4668 }
4669
4670 /**
4671  * ufshcd_transfer_rsp_status - Get overall status of the response
4672  * @hba: per adapter instance
4673  * @lrbp: pointer to local reference block of completed command
4674  *
4675  * Returns result of the command to notify SCSI midlayer
4676  */
4677 static inline int
4678 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4679 {
4680         int result = 0;
4681         int scsi_status;
4682         int ocs;
4683
4684         /* overall command status of utrd */
4685         ocs = ufshcd_get_tr_ocs(lrbp);
4686
4687         switch (ocs) {
4688         case OCS_SUCCESS:
4689                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4690                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4691                 switch (result) {
4692                 case UPIU_TRANSACTION_RESPONSE:
4693                         /*
4694                          * get the response UPIU result to extract
4695                          * the SCSI command status
4696                          */
4697                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4698
4699                         /*
4700                          * get the result based on SCSI status response
4701                          * to notify the SCSI midlayer of the command status
4702                          */
4703                         scsi_status = result & MASK_SCSI_STATUS;
4704                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4705
4706                         /*
4707                          * Currently we are only supporting BKOPs exception
4708                          * events hence we can ignore BKOPs exception event
4709                          * during power management callbacks. BKOPs exception
4710                          * event is not expected to be raised in runtime suspend
4711                          * callback as it allows the urgent bkops.
4712                          * During system suspend, we are anyway forcefully
4713                          * disabling the bkops and if urgent bkops is needed
4714                          * it will be enabled on system resume. Long term
4715                          * solution could be to abort the system suspend if
4716                          * UFS device needs urgent BKOPs.
4717                          */
4718                         if (!hba->pm_op_in_progress &&
4719                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4720                                 schedule_work(&hba->eeh_work);
4721                         break;
4722                 case UPIU_TRANSACTION_REJECT_UPIU:
4723                         /* TODO: handle Reject UPIU Response */
4724                         result = DID_ERROR << 16;
4725                         dev_err(hba->dev,
4726                                 "Reject UPIU not fully implemented\n");
4727                         break;
4728                 default:
4729                         dev_err(hba->dev,
4730                                 "Unexpected request response code = %x\n",
4731                                 result);
4732                         result = DID_ERROR << 16;
4733                         break;
4734                 }
4735                 break;
4736         case OCS_ABORTED:
4737                 result |= DID_ABORT << 16;
4738                 break;
4739         case OCS_INVALID_COMMAND_STATUS:
4740                 result |= DID_REQUEUE << 16;
4741                 break;
4742         case OCS_INVALID_CMD_TABLE_ATTR:
4743         case OCS_INVALID_PRDT_ATTR:
4744         case OCS_MISMATCH_DATA_BUF_SIZE:
4745         case OCS_MISMATCH_RESP_UPIU_SIZE:
4746         case OCS_PEER_COMM_FAILURE:
4747         case OCS_FATAL_ERROR:
4748         default:
4749                 result |= DID_ERROR << 16;
4750                 dev_err(hba->dev,
4751                                 "OCS error from controller = %x for tag %d\n",
4752                                 ocs, lrbp->task_tag);
4753                 ufshcd_print_host_regs(hba);
4754                 ufshcd_print_host_state(hba);
4755                 break;
4756         } /* end of switch */
4757
4758         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4759                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4760         return result;
4761 }
4762
4763 /**
4764  * ufshcd_uic_cmd_compl - handle completion of uic command
4765  * @hba: per adapter instance
4766  * @intr_status: interrupt status generated by the controller
4767  *
4768  * Returns
4769  *  IRQ_HANDLED - If interrupt is valid
4770  *  IRQ_NONE    - If invalid interrupt
4771  */
4772 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4773 {
4774         irqreturn_t retval = IRQ_NONE;
4775
4776         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4777                 hba->active_uic_cmd->argument2 |=
4778                         ufshcd_get_uic_cmd_result(hba);
4779                 hba->active_uic_cmd->argument3 =
4780                         ufshcd_get_dme_attr_val(hba);
4781                 complete(&hba->active_uic_cmd->done);
4782                 retval = IRQ_HANDLED;
4783         }
4784
4785         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
4786                 complete(hba->uic_async_done);
4787                 retval = IRQ_HANDLED;
4788         }
4789         return retval;
4790 }
4791
4792 /**
4793  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4794  * @hba: per adapter instance
4795  * @completed_reqs: requests to complete
4796  */
4797 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4798                                         unsigned long completed_reqs)
4799 {
4800         struct ufshcd_lrb *lrbp;
4801         struct scsi_cmnd *cmd;
4802         int result;
4803         int index;
4804
4805         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4806                 lrbp = &hba->lrb[index];
4807                 cmd = lrbp->cmd;
4808                 if (cmd) {
4809                         ufshcd_add_command_trace(hba, index, "complete");
4810                         result = ufshcd_transfer_rsp_status(hba, lrbp);
4811                         scsi_dma_unmap(cmd);
4812                         cmd->result = result;
4813                         /* Mark completed command as NULL in LRB */
4814                         lrbp->cmd = NULL;
4815                         lrbp->compl_time_stamp = ktime_get();
4816                         /* Do not touch lrbp after scsi done */
4817                         cmd->scsi_done(cmd);
4818                         __ufshcd_release(hba);
4819                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4820                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4821                         lrbp->compl_time_stamp = ktime_get();
4822                         if (hba->dev_cmd.complete) {
4823                                 ufshcd_add_command_trace(hba, index,
4824                                                 "dev_complete");
4825                                 complete(hba->dev_cmd.complete);
4826                         }
4827                 }
4828                 if (ufshcd_is_clkscaling_supported(hba))
4829                         hba->clk_scaling.active_reqs--;
4830         }
4831
4832         /* clear corresponding bits of completed commands */
4833         hba->outstanding_reqs ^= completed_reqs;
4834
4835         ufshcd_clk_scaling_update_busy(hba);
4836 }
4837
4838 /**
4839  * ufshcd_transfer_req_compl - handle SCSI and query command completion
4840  * @hba: per adapter instance
4841  *
4842  * Returns
4843  *  IRQ_HANDLED - If interrupt is valid
4844  *  IRQ_NONE    - If invalid interrupt
4845  */
4846 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
4847 {
4848         unsigned long completed_reqs;
4849         u32 tr_doorbell;
4850
4851         /* Resetting interrupt aggregation counters first and reading the
4852          * DOOR_BELL afterward allows us to handle all the completed requests.
4853          * In order to prevent other interrupts starvation the DB is read once
4854          * after reset. The down side of this solution is the possibility of
4855          * false interrupt if device completes another request after resetting
4856          * aggregation and before reading the DB.
4857          */
4858         if (ufshcd_is_intr_aggr_allowed(hba) &&
4859             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4860                 ufshcd_reset_intr_aggr(hba);
4861
4862         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4863         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4864
4865         if (completed_reqs) {
4866                 __ufshcd_transfer_req_compl(hba, completed_reqs);
4867                 return IRQ_HANDLED;
4868         } else {
4869                 return IRQ_NONE;
4870         }
4871 }
4872
4873 /**
4874  * ufshcd_disable_ee - disable exception event
4875  * @hba: per-adapter instance
4876  * @mask: exception event to disable
4877  *
4878  * Disables exception event in the device so that the EVENT_ALERT
4879  * bit is not set.
4880  *
4881  * Returns zero on success, non-zero error value on failure.
4882  */
4883 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4884 {
4885         int err = 0;
4886         u32 val;
4887
4888         if (!(hba->ee_ctrl_mask & mask))
4889                 goto out;
4890
4891         val = hba->ee_ctrl_mask & ~mask;
4892         val &= MASK_EE_STATUS;
4893         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4894                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4895         if (!err)
4896                 hba->ee_ctrl_mask &= ~mask;
4897 out:
4898         return err;
4899 }
4900
4901 /**
4902  * ufshcd_enable_ee - enable exception event
4903  * @hba: per-adapter instance
4904  * @mask: exception event to enable
4905  *
4906  * Enable corresponding exception event in the device to allow
4907  * device to alert host in critical scenarios.
4908  *
4909  * Returns zero on success, non-zero error value on failure.
4910  */
4911 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4912 {
4913         int err = 0;
4914         u32 val;
4915
4916         if (hba->ee_ctrl_mask & mask)
4917                 goto out;
4918
4919         val = hba->ee_ctrl_mask | mask;
4920         val &= MASK_EE_STATUS;
4921         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4922                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4923         if (!err)
4924                 hba->ee_ctrl_mask |= mask;
4925 out:
4926         return err;
4927 }
4928
4929 /**
4930  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4931  * @hba: per-adapter instance
4932  *
4933  * Allow device to manage background operations on its own. Enabling
4934  * this might lead to inconsistent latencies during normal data transfers
4935  * as the device is allowed to manage its own way of handling background
4936  * operations.
4937  *
4938  * Returns zero on success, non-zero on failure.
4939  */
4940 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4941 {
4942         int err = 0;
4943
4944         if (hba->auto_bkops_enabled)
4945                 goto out;
4946
4947         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4948                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
4949         if (err) {
4950                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4951                                 __func__, err);
4952                 goto out;
4953         }
4954
4955         hba->auto_bkops_enabled = true;
4956         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4957
4958         /* No need of URGENT_BKOPS exception from the device */
4959         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4960         if (err)
4961                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4962                                 __func__, err);
4963 out:
4964         return err;
4965 }
4966
4967 /**
4968  * ufshcd_disable_auto_bkops - block device in doing background operations
4969  * @hba: per-adapter instance
4970  *
4971  * Disabling background operations improves command response latency but
4972  * has drawback of device moving into critical state where the device is
4973  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4974  * host is idle so that BKOPS are managed effectively without any negative
4975  * impacts.
4976  *
4977  * Returns zero on success, non-zero on failure.
4978  */
4979 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4980 {
4981         int err = 0;
4982
4983         if (!hba->auto_bkops_enabled)
4984                 goto out;
4985
4986         /*
4987          * If host assisted BKOPs is to be enabled, make sure
4988          * urgent bkops exception is allowed.
4989          */
4990         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4991         if (err) {
4992                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4993                                 __func__, err);
4994                 goto out;
4995         }
4996
4997         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4998                         QUERY_FLAG_IDN_BKOPS_EN, NULL);
4999         if (err) {
5000                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5001                                 __func__, err);
5002                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5003                 goto out;
5004         }
5005
5006         hba->auto_bkops_enabled = false;
5007         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5008         hba->is_urgent_bkops_lvl_checked = false;
5009 out:
5010         return err;
5011 }
5012
5013 /**
5014  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5015  * @hba: per adapter instance
5016  *
5017  * After a device reset the device may toggle the BKOPS_EN flag
5018  * to default value. The s/w tracking variables should be updated
5019  * as well. This function would change the auto-bkops state based on
5020  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5021  */
5022 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5023 {
5024         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5025                 hba->auto_bkops_enabled = false;
5026                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5027                 ufshcd_enable_auto_bkops(hba);
5028         } else {
5029                 hba->auto_bkops_enabled = true;
5030                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5031                 ufshcd_disable_auto_bkops(hba);
5032         }
5033         hba->is_urgent_bkops_lvl_checked = false;
5034 }
5035
5036 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5037 {
5038         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5039                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5040 }
5041
5042 /**
5043  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5044  * @hba: per-adapter instance
5045  * @status: bkops_status value
5046  *
5047  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5048  * flag in the device to permit background operations if the device
5049  * bkops_status is greater than or equal to "status" argument passed to
5050  * this function, disable otherwise.
5051  *
5052  * Returns 0 for success, non-zero in case of failure.
5053  *
5054  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5055  * to know whether auto bkops is enabled or disabled after this function
5056  * returns control to it.
5057  */
5058 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5059                              enum bkops_status status)
5060 {
5061         int err;
5062         u32 curr_status = 0;
5063
5064         err = ufshcd_get_bkops_status(hba, &curr_status);
5065         if (err) {
5066                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5067                                 __func__, err);
5068                 goto out;
5069         } else if (curr_status > BKOPS_STATUS_MAX) {
5070                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5071                                 __func__, curr_status);
5072                 err = -EINVAL;
5073                 goto out;
5074         }
5075
5076         if (curr_status >= status)
5077                 err = ufshcd_enable_auto_bkops(hba);
5078         else
5079                 err = ufshcd_disable_auto_bkops(hba);
5080         hba->urgent_bkops_lvl = curr_status;
5081 out:
5082         return err;
5083 }
5084
5085 /**
5086  * ufshcd_urgent_bkops - handle urgent bkops exception event
5087  * @hba: per-adapter instance
5088  *
5089  * Enable fBackgroundOpsEn flag in the device to permit background
5090  * operations.
5091  *
5092  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5093  * and negative error value for any other failure.
5094  */
5095 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5096 {
5097         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5098 }
5099
5100 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5101 {
5102         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5103                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5104 }
5105
5106 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5107 {
5108         int err;
5109         u32 curr_status = 0;
5110
5111         if (hba->is_urgent_bkops_lvl_checked)
5112                 goto enable_auto_bkops;
5113
5114         err = ufshcd_get_bkops_status(hba, &curr_status);
5115         if (err) {
5116                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5117                                 __func__, err);
5118                 goto out;
5119         }
5120
5121         /*
5122          * We are seeing that some devices are raising the urgent bkops
5123          * exception events even when BKOPS status doesn't indicate performace
5124          * impacted or critical. Handle these device by determining their urgent
5125          * bkops status at runtime.
5126          */
5127         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5128                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5129                                 __func__, curr_status);
5130                 /* update the current status as the urgent bkops level */
5131                 hba->urgent_bkops_lvl = curr_status;
5132                 hba->is_urgent_bkops_lvl_checked = true;
5133         }
5134
5135 enable_auto_bkops:
5136         err = ufshcd_enable_auto_bkops(hba);
5137 out:
5138         if (err < 0)
5139                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5140                                 __func__, err);
5141 }
5142
5143 /**
5144  * ufshcd_exception_event_handler - handle exceptions raised by device
5145  * @work: pointer to work data
5146  *
5147  * Read bExceptionEventStatus attribute from the device and handle the
5148  * exception event accordingly.
5149  */
5150 static void ufshcd_exception_event_handler(struct work_struct *work)
5151 {
5152         struct ufs_hba *hba;
5153         int err;
5154         u32 status = 0;
5155         hba = container_of(work, struct ufs_hba, eeh_work);
5156
5157         pm_runtime_get_sync(hba->dev);
5158         scsi_block_requests(hba->host);
5159         err = ufshcd_get_ee_status(hba, &status);
5160         if (err) {
5161                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5162                                 __func__, err);
5163                 goto out;
5164         }
5165
5166         status &= hba->ee_ctrl_mask;
5167
5168         if (status & MASK_EE_URGENT_BKOPS)
5169                 ufshcd_bkops_exception_event_handler(hba);
5170
5171 out:
5172         scsi_unblock_requests(hba->host);
5173         pm_runtime_put_sync(hba->dev);
5174         return;
5175 }
5176
5177 /* Complete requests that have door-bell cleared */
5178 static void ufshcd_complete_requests(struct ufs_hba *hba)
5179 {
5180         ufshcd_transfer_req_compl(hba);
5181         ufshcd_tmc_handler(hba);
5182 }
5183
5184 /**
5185  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5186  *                              to recover from the DL NAC errors or not.
5187  * @hba: per-adapter instance
5188  *
5189  * Returns true if error handling is required, false otherwise
5190  */
5191 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5192 {
5193         unsigned long flags;
5194         bool err_handling = true;
5195
5196         spin_lock_irqsave(hba->host->host_lock, flags);
5197         /*
5198          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5199          * device fatal error and/or DL NAC & REPLAY timeout errors.
5200          */
5201         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5202                 goto out;
5203
5204         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5205             ((hba->saved_err & UIC_ERROR) &&
5206              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5207                 goto out;
5208
5209         if ((hba->saved_err & UIC_ERROR) &&
5210             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5211                 int err;
5212                 /*
5213                  * wait for 50ms to see if we can get any other errors or not.
5214                  */
5215                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5216                 msleep(50);
5217                 spin_lock_irqsave(hba->host->host_lock, flags);
5218
5219                 /*
5220                  * now check if we have got any other severe errors other than
5221                  * DL NAC error?
5222                  */
5223                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5224                     ((hba->saved_err & UIC_ERROR) &&
5225                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5226                         goto out;
5227
5228                 /*
5229                  * As DL NAC is the only error received so far, send out NOP
5230                  * command to confirm if link is still active or not.
5231                  *   - If we don't get any response then do error recovery.
5232                  *   - If we get response then clear the DL NAC error bit.
5233                  */
5234
5235                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5236                 err = ufshcd_verify_dev_init(hba);
5237                 spin_lock_irqsave(hba->host->host_lock, flags);
5238
5239                 if (err)
5240                         goto out;
5241
5242                 /* Link seems to be alive hence ignore the DL NAC errors */
5243                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5244                         hba->saved_err &= ~UIC_ERROR;
5245                 /* clear NAC error */
5246                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5247                 if (!hba->saved_uic_err) {
5248                         err_handling = false;
5249                         goto out;
5250                 }
5251         }
5252 out:
5253         spin_unlock_irqrestore(hba->host->host_lock, flags);
5254         return err_handling;
5255 }
5256
5257 /**
5258  * ufshcd_err_handler - handle UFS errors that require s/w attention
5259  * @work: pointer to work structure
5260  */
5261 static void ufshcd_err_handler(struct work_struct *work)
5262 {
5263         struct ufs_hba *hba;
5264         unsigned long flags;
5265         u32 err_xfer = 0;
5266         u32 err_tm = 0;
5267         int err = 0;
5268         int tag;
5269         bool needs_reset = false;
5270
5271         hba = container_of(work, struct ufs_hba, eh_work);
5272
5273         pm_runtime_get_sync(hba->dev);
5274         ufshcd_hold(hba, false);
5275
5276         spin_lock_irqsave(hba->host->host_lock, flags);
5277         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5278                 goto out;
5279
5280         hba->ufshcd_state = UFSHCD_STATE_RESET;
5281         ufshcd_set_eh_in_progress(hba);
5282
5283         /* Complete requests that have door-bell cleared by h/w */
5284         ufshcd_complete_requests(hba);
5285
5286         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5287                 bool ret;
5288
5289                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5290                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5291                 ret = ufshcd_quirk_dl_nac_errors(hba);
5292                 spin_lock_irqsave(hba->host->host_lock, flags);
5293                 if (!ret)
5294                         goto skip_err_handling;
5295         }
5296         if ((hba->saved_err & INT_FATAL_ERRORS) ||
5297             (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
5298             ((hba->saved_err & UIC_ERROR) &&
5299             (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5300                                    UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5301                                    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5302                 needs_reset = true;
5303
5304         /*
5305          * if host reset is required then skip clearing the pending
5306          * transfers forcefully because they will get cleared during
5307          * host reset and restore
5308          */
5309         if (needs_reset)
5310                 goto skip_pending_xfer_clear;
5311
5312         /* release lock as clear command might sleep */
5313         spin_unlock_irqrestore(hba->host->host_lock, flags);
5314         /* Clear pending transfer requests */
5315         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5316                 if (ufshcd_clear_cmd(hba, tag)) {
5317                         err_xfer = true;
5318                         goto lock_skip_pending_xfer_clear;
5319                 }
5320         }
5321
5322         /* Clear pending task management requests */
5323         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5324                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5325                         err_tm = true;
5326                         goto lock_skip_pending_xfer_clear;
5327                 }
5328         }
5329
5330 lock_skip_pending_xfer_clear:
5331         spin_lock_irqsave(hba->host->host_lock, flags);
5332
5333         /* Complete the requests that are cleared by s/w */
5334         ufshcd_complete_requests(hba);
5335
5336         if (err_xfer || err_tm)
5337                 needs_reset = true;
5338
5339 skip_pending_xfer_clear:
5340         /* Fatal errors need reset */
5341         if (needs_reset) {
5342                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5343
5344                 /*
5345                  * ufshcd_reset_and_restore() does the link reinitialization
5346                  * which will need atleast one empty doorbell slot to send the
5347                  * device management commands (NOP and query commands).
5348                  * If there is no slot empty at this moment then free up last
5349                  * slot forcefully.
5350                  */
5351                 if (hba->outstanding_reqs == max_doorbells)
5352                         __ufshcd_transfer_req_compl(hba,
5353                                                     (1UL << (hba->nutrs - 1)));
5354
5355                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5356                 err = ufshcd_reset_and_restore(hba);
5357                 spin_lock_irqsave(hba->host->host_lock, flags);
5358                 if (err) {
5359                         dev_err(hba->dev, "%s: reset and restore failed\n",
5360                                         __func__);
5361                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
5362                 }
5363                 /*
5364                  * Inform scsi mid-layer that we did reset and allow to handle
5365                  * Unit Attention properly.
5366                  */
5367                 scsi_report_bus_reset(hba->host, 0);
5368                 hba->saved_err = 0;
5369                 hba->saved_uic_err = 0;
5370         }
5371
5372 skip_err_handling:
5373         if (!needs_reset) {
5374                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5375                 if (hba->saved_err || hba->saved_uic_err)
5376                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5377                             __func__, hba->saved_err, hba->saved_uic_err);
5378         }
5379
5380         ufshcd_clear_eh_in_progress(hba);
5381
5382 out:
5383         spin_unlock_irqrestore(hba->host->host_lock, flags);
5384         ufshcd_scsi_unblock_requests(hba);
5385         ufshcd_release(hba);
5386         pm_runtime_put_sync(hba->dev);
5387 }
5388
5389 /**
5390  * ufshcd_update_uic_error - check and set fatal UIC error flags.
5391  * @hba: per-adapter instance
5392  *
5393  * Returns
5394  *  IRQ_HANDLED - If interrupt is valid
5395  *  IRQ_NONE    - If invalid interrupt
5396  */
5397 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
5398 {
5399         u32 reg;
5400         irqreturn_t retval = IRQ_NONE;
5401
5402         /* PHY layer lane error */
5403         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5404         /* Ignore LINERESET indication, as this is not an error */
5405         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5406             (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5407                 /*
5408                  * To know whether this error is fatal or not, DB timeout
5409                  * must be checked but this error is handled separately.
5410                  */
5411                 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5412                 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5413                 retval |= IRQ_HANDLED;
5414         }
5415
5416         /* PA_INIT_ERROR is fatal and needs UIC reset */
5417         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5418         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5419             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
5420                 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5421
5422                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5423                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5424                 else if (hba->dev_quirks &
5425                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5426                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5427                                 hba->uic_error |=
5428                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5429                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5430                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5431                 }
5432                 retval |= IRQ_HANDLED;
5433         }
5434
5435         /* UIC NL/TL/DME errors needs software retry */
5436         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5437         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5438             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
5439                 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5440                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5441                 retval |= IRQ_HANDLED;
5442         }
5443
5444         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5445         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5446             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
5447                 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
5448                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5449                 retval |= IRQ_HANDLED;
5450         }
5451
5452         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5453         if ((reg & UIC_DME_ERROR) &&
5454             (reg & UIC_DME_ERROR_CODE_MASK)) {
5455                 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
5456                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5457                 retval |= IRQ_HANDLED;
5458         }
5459
5460         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5461                         __func__, hba->uic_error);
5462         return retval;
5463 }
5464
5465 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5466                                          u32 intr_mask)
5467 {
5468         if (!ufshcd_is_auto_hibern8_supported(hba))
5469                 return false;
5470
5471         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5472                 return false;
5473
5474         if (hba->active_uic_cmd &&
5475             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5476             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5477                 return false;
5478
5479         return true;
5480 }
5481
5482 /**
5483  * ufshcd_check_errors - Check for errors that need s/w attention
5484  * @hba: per-adapter instance
5485  *
5486  * Returns
5487  *  IRQ_HANDLED - If interrupt is valid
5488  *  IRQ_NONE    - If invalid interrupt
5489  */
5490 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
5491 {
5492         bool queue_eh_work = false;
5493         irqreturn_t retval = IRQ_NONE;
5494
5495         if (hba->errors & INT_FATAL_ERRORS) {
5496                 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
5497                 queue_eh_work = true;
5498         }
5499
5500         if (hba->errors & UIC_ERROR) {
5501                 hba->uic_error = 0;
5502                 retval = ufshcd_update_uic_error(hba);
5503                 if (hba->uic_error)
5504                         queue_eh_work = true;
5505         }
5506
5507         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5508                 dev_err(hba->dev,
5509                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5510                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5511                         "Enter" : "Exit",
5512                         hba->errors, ufshcd_get_upmcrs(hba));
5513                 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5514                                        hba->errors);
5515                 queue_eh_work = true;
5516         }
5517
5518         if (queue_eh_work) {
5519                 /*
5520                  * update the transfer error masks to sticky bits, let's do this
5521                  * irrespective of current ufshcd_state.
5522                  */
5523                 hba->saved_err |= hba->errors;
5524                 hba->saved_uic_err |= hba->uic_error;
5525
5526                 /* handle fatal errors only when link is functional */
5527                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5528                         /* block commands from scsi mid-layer */
5529                         ufshcd_scsi_block_requests(hba);
5530
5531                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5532
5533                         /* dump controller state before resetting */
5534                         if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5535                                 bool pr_prdt = !!(hba->saved_err &
5536                                                 SYSTEM_BUS_FATAL_ERROR);
5537
5538                                 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5539                                         __func__, hba->saved_err,
5540                                         hba->saved_uic_err);
5541
5542                                 ufshcd_print_host_regs(hba);
5543                                 ufshcd_print_pwr_info(hba);
5544                                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5545                                 ufshcd_print_trs(hba, hba->outstanding_reqs,
5546                                                         pr_prdt);
5547                         }
5548                         schedule_work(&hba->eh_work);
5549                 }
5550                 retval |= IRQ_HANDLED;
5551         }
5552         /*
5553          * if (!queue_eh_work) -
5554          * Other errors are either non-fatal where host recovers
5555          * itself without s/w intervention or errors that will be
5556          * handled by the SCSI core layer.
5557          */
5558         return retval;
5559 }
5560
5561 struct ctm_info {
5562         struct ufs_hba  *hba;
5563         unsigned long   pending;
5564         unsigned int    ncpl;
5565 };
5566
5567 static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5568 {
5569         struct ctm_info *const ci = priv;
5570         struct completion *c;
5571
5572         WARN_ON_ONCE(reserved);
5573         if (test_bit(req->tag, &ci->pending))
5574                 return true;
5575         ci->ncpl++;
5576         c = req->end_io_data;
5577         if (c)
5578                 complete(c);
5579         return true;
5580 }
5581
5582 /**
5583  * ufshcd_tmc_handler - handle task management function completion
5584  * @hba: per adapter instance
5585  *
5586  * Returns
5587  *  IRQ_HANDLED - If interrupt is valid
5588  *  IRQ_NONE    - If invalid interrupt
5589  */
5590 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
5591 {
5592         struct request_queue *q = hba->tmf_queue;
5593         struct ctm_info ci = {
5594                 .hba     = hba,
5595                 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5596         };
5597
5598         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5599         return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
5600 }
5601
5602 /**
5603  * ufshcd_sl_intr - Interrupt service routine
5604  * @hba: per adapter instance
5605  * @intr_status: contains interrupts generated by the controller
5606  *
5607  * Returns
5608  *  IRQ_HANDLED - If interrupt is valid
5609  *  IRQ_NONE    - If invalid interrupt
5610  */
5611 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5612 {
5613         irqreturn_t retval = IRQ_NONE;
5614
5615         hba->errors = UFSHCD_ERROR_MASK & intr_status;
5616
5617         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5618                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5619
5620         if (hba->errors)
5621                 retval |= ufshcd_check_errors(hba);
5622
5623         if (intr_status & UFSHCD_UIC_MASK)
5624                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
5625
5626         if (intr_status & UTP_TASK_REQ_COMPL)
5627                 retval |= ufshcd_tmc_handler(hba);
5628
5629         if (intr_status & UTP_TRANSFER_REQ_COMPL)
5630                 retval |= ufshcd_transfer_req_compl(hba);
5631
5632         return retval;
5633 }
5634
5635 /**
5636  * ufshcd_intr - Main interrupt service routine
5637  * @irq: irq number
5638  * @__hba: pointer to adapter instance
5639  *
5640  * Returns
5641  *  IRQ_HANDLED - If interrupt is valid
5642  *  IRQ_NONE    - If invalid interrupt
5643  */
5644 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5645 {
5646         u32 intr_status, enabled_intr_status;
5647         irqreturn_t retval = IRQ_NONE;
5648         struct ufs_hba *hba = __hba;
5649         int retries = hba->nutrs;
5650
5651         spin_lock(hba->host->host_lock);
5652         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5653
5654         /*
5655          * There could be max of hba->nutrs reqs in flight and in worst case
5656          * if the reqs get finished 1 by 1 after the interrupt status is
5657          * read, make sure we handle them by checking the interrupt status
5658          * again in a loop until we process all of the reqs before returning.
5659          */
5660         do {
5661                 enabled_intr_status =
5662                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5663                 if (intr_status)
5664                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5665                 if (enabled_intr_status)
5666                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
5667
5668                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5669         } while (intr_status && --retries);
5670
5671         if (retval == IRQ_NONE) {
5672                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
5673                                         __func__, intr_status);
5674                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
5675         }
5676
5677         spin_unlock(hba->host->host_lock);
5678         return retval;
5679 }
5680
5681 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5682 {
5683         int err = 0;
5684         u32 mask = 1 << tag;
5685         unsigned long flags;
5686
5687         if (!test_bit(tag, &hba->outstanding_tasks))
5688                 goto out;
5689
5690         spin_lock_irqsave(hba->host->host_lock, flags);
5691         ufshcd_utmrl_clear(hba, tag);
5692         spin_unlock_irqrestore(hba->host->host_lock, flags);
5693
5694         /* poll for max. 1 sec to clear door bell register by h/w */
5695         err = ufshcd_wait_for_register(hba,
5696                         REG_UTP_TASK_REQ_DOOR_BELL,
5697                         mask, 0, 1000, 1000, true);
5698 out:
5699         return err;
5700 }
5701
5702 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5703                 struct utp_task_req_desc *treq, u8 tm_function)
5704 {
5705         struct request_queue *q = hba->tmf_queue;
5706         struct Scsi_Host *host = hba->host;
5707         DECLARE_COMPLETION_ONSTACK(wait);
5708         struct request *req;
5709         unsigned long flags;
5710         int free_slot, task_tag, err;
5711
5712         /*
5713          * Get free slot, sleep if slots are unavailable.
5714          * Even though we use wait_event() which sleeps indefinitely,
5715          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5716          */
5717         req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
5718         req->end_io_data = &wait;
5719         free_slot = req->tag;
5720         WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
5721         ufshcd_hold(hba, false);
5722
5723         spin_lock_irqsave(host->host_lock, flags);
5724         task_tag = hba->nutrs + free_slot;
5725
5726         treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5727
5728         memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
5729         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5730
5731         /* send command to the controller */
5732         __set_bit(free_slot, &hba->outstanding_tasks);
5733
5734         /* Make sure descriptors are ready before ringing the task doorbell */
5735         wmb();
5736
5737         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5738         /* Make sure that doorbell is committed immediately */
5739         wmb();
5740
5741         spin_unlock_irqrestore(host->host_lock, flags);
5742
5743         ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5744
5745         /* wait until the task management command is completed */
5746         err = wait_for_completion_io_timeout(&wait,
5747                         msecs_to_jiffies(TM_CMD_TIMEOUT));
5748         if (!err) {
5749                 /*
5750                  * Make sure that ufshcd_compl_tm() does not trigger a
5751                  * use-after-free.
5752                  */
5753                 req->end_io_data = NULL;
5754                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5755                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5756                                 __func__, tm_function);
5757                 if (ufshcd_clear_tm_cmd(hba, free_slot))
5758                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5759                                         __func__, free_slot);
5760                 err = -ETIMEDOUT;
5761         } else {
5762                 err = 0;
5763                 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5764
5765                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5766         }
5767
5768         spin_lock_irqsave(hba->host->host_lock, flags);
5769         __clear_bit(free_slot, &hba->outstanding_tasks);
5770         spin_unlock_irqrestore(hba->host->host_lock, flags);
5771
5772         blk_put_request(req);
5773
5774         ufshcd_release(hba);
5775         return err;
5776 }
5777
5778 /**
5779  * ufshcd_issue_tm_cmd - issues task management commands to controller
5780  * @hba: per adapter instance
5781  * @lun_id: LUN ID to which TM command is sent
5782  * @task_id: task ID to which the TM command is applicable
5783  * @tm_function: task management function opcode
5784  * @tm_response: task management service response return value
5785  *
5786  * Returns non-zero value on error, zero on success.
5787  */
5788 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5789                 u8 tm_function, u8 *tm_response)
5790 {
5791         struct utp_task_req_desc treq = { { 0 }, };
5792         int ocs_value, err;
5793
5794         /* Configure task request descriptor */
5795         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5796         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5797
5798         /* Configure task request UPIU */
5799         treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
5800                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
5801         treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
5802
5803         /*
5804          * The host shall provide the same value for LUN field in the basic
5805          * header and for Input Parameter.
5806          */
5807         treq.input_param1 = cpu_to_be32(lun_id);
5808         treq.input_param2 = cpu_to_be32(task_id);
5809
5810         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
5811         if (err == -ETIMEDOUT)
5812                 return err;
5813
5814         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5815         if (ocs_value != OCS_SUCCESS)
5816                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5817                                 __func__, ocs_value);
5818         else if (tm_response)
5819                 *tm_response = be32_to_cpu(treq.output_param1) &
5820                                 MASK_TM_SERVICE_RESP;
5821         return err;
5822 }
5823
5824 /**
5825  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
5826  * @hba:        per-adapter instance
5827  * @req_upiu:   upiu request
5828  * @rsp_upiu:   upiu reply
5829  * @desc_buff:  pointer to descriptor buffer, NULL if NA
5830  * @buff_len:   descriptor size, 0 if NA
5831  * @cmd_type:   specifies the type (NOP, Query...)
5832  * @desc_op:    descriptor operation
5833  *
5834  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
5835  * Therefore, it "rides" the device management infrastructure: uses its tag and
5836  * tasks work queues.
5837  *
5838  * Since there is only one available tag for device management commands,
5839  * the caller is expected to hold the hba->dev_cmd.lock mutex.
5840  */
5841 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5842                                         struct utp_upiu_req *req_upiu,
5843                                         struct utp_upiu_req *rsp_upiu,
5844                                         u8 *desc_buff, int *buff_len,
5845                                         enum dev_cmd_type cmd_type,
5846                                         enum query_opcode desc_op)
5847 {
5848         struct request_queue *q = hba->cmd_queue;
5849         struct request *req;
5850         struct ufshcd_lrb *lrbp;
5851         int err = 0;
5852         int tag;
5853         struct completion wait;
5854         unsigned long flags;
5855         u32 upiu_flags;
5856
5857         down_read(&hba->clk_scaling_lock);
5858
5859         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
5860         if (IS_ERR(req)) {
5861                 err = PTR_ERR(req);
5862                 goto out_unlock;
5863         }
5864         tag = req->tag;
5865         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5866
5867         init_completion(&wait);
5868         lrbp = &hba->lrb[tag];
5869         WARN_ON(lrbp->cmd);
5870
5871         lrbp->cmd = NULL;
5872         lrbp->sense_bufflen = 0;
5873         lrbp->sense_buffer = NULL;
5874         lrbp->task_tag = tag;
5875         lrbp->lun = 0;
5876         lrbp->intr_cmd = true;
5877         hba->dev_cmd.type = cmd_type;
5878
5879         switch (hba->ufs_version) {
5880         case UFSHCI_VERSION_10:
5881         case UFSHCI_VERSION_11:
5882                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
5883                 break;
5884         default:
5885                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5886                 break;
5887         }
5888
5889         /* update the task tag in the request upiu */
5890         req_upiu->header.dword_0 |= cpu_to_be32(tag);
5891
5892         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
5893
5894         /* just copy the upiu request as it is */
5895         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
5896         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
5897                 /* The Data Segment Area is optional depending upon the query
5898                  * function value. for WRITE DESCRIPTOR, the data segment
5899                  * follows right after the tsf.
5900                  */
5901                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
5902                 *buff_len = 0;
5903         }
5904
5905         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5906
5907         hba->dev_cmd.complete = &wait;
5908
5909         /* Make sure descriptors are ready before ringing the doorbell */
5910         wmb();
5911         spin_lock_irqsave(hba->host->host_lock, flags);
5912         ufshcd_send_command(hba, tag);
5913         spin_unlock_irqrestore(hba->host->host_lock, flags);
5914
5915         /*
5916          * ignore the returning value here - ufshcd_check_query_response is
5917          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
5918          * read the response directly ignoring all errors.
5919          */
5920         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
5921
5922         /* just copy the upiu response as it is */
5923         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
5924         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
5925                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
5926                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
5927                                MASK_QUERY_DATA_SEG_LEN;
5928
5929                 if (*buff_len >= resp_len) {
5930                         memcpy(desc_buff, descp, resp_len);
5931                         *buff_len = resp_len;
5932                 } else {
5933                         dev_warn(hba->dev,
5934                                  "%s: rsp size %d is bigger than buffer size %d",
5935                                  __func__, resp_len, *buff_len);
5936                         *buff_len = 0;
5937                         err = -EINVAL;
5938                 }
5939         }
5940
5941         blk_put_request(req);
5942 out_unlock:
5943         up_read(&hba->clk_scaling_lock);
5944         return err;
5945 }
5946
5947 /**
5948  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
5949  * @hba:        per-adapter instance
5950  * @req_upiu:   upiu request
5951  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
5952  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
5953  * @desc_buff:  pointer to descriptor buffer, NULL if NA
5954  * @buff_len:   descriptor size, 0 if NA
5955  * @desc_op:    descriptor operation
5956  *
5957  * Supports UTP Transfer requests (nop and query), and UTP Task
5958  * Management requests.
5959  * It is up to the caller to fill the upiu conent properly, as it will
5960  * be copied without any further input validations.
5961  */
5962 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
5963                              struct utp_upiu_req *req_upiu,
5964                              struct utp_upiu_req *rsp_upiu,
5965                              int msgcode,
5966                              u8 *desc_buff, int *buff_len,
5967                              enum query_opcode desc_op)
5968 {
5969         int err;
5970         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
5971         struct utp_task_req_desc treq = { { 0 }, };
5972         int ocs_value;
5973         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
5974
5975         switch (msgcode) {
5976         case UPIU_TRANSACTION_NOP_OUT:
5977                 cmd_type = DEV_CMD_TYPE_NOP;
5978                 /* fall through */
5979         case UPIU_TRANSACTION_QUERY_REQ:
5980                 ufshcd_hold(hba, false);
5981                 mutex_lock(&hba->dev_cmd.lock);
5982                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
5983                                                    desc_buff, buff_len,
5984                                                    cmd_type, desc_op);
5985                 mutex_unlock(&hba->dev_cmd.lock);
5986                 ufshcd_release(hba);
5987
5988                 break;
5989         case UPIU_TRANSACTION_TASK_REQ:
5990                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5991                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5992
5993                 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
5994
5995                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
5996                 if (err == -ETIMEDOUT)
5997                         break;
5998
5999                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6000                 if (ocs_value != OCS_SUCCESS) {
6001                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6002                                 ocs_value);
6003                         break;
6004                 }
6005
6006                 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6007
6008                 break;
6009         default:
6010                 err = -EINVAL;
6011
6012                 break;
6013         }
6014
6015         return err;
6016 }
6017
6018 /**
6019  * ufshcd_eh_device_reset_handler - device reset handler registered to
6020  *                                    scsi layer.
6021  * @cmd: SCSI command pointer
6022  *
6023  * Returns SUCCESS/FAILED
6024  */
6025 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6026 {
6027         struct Scsi_Host *host;
6028         struct ufs_hba *hba;
6029         unsigned int tag;
6030         u32 pos;
6031         int err;
6032         u8 resp = 0xF;
6033         struct ufshcd_lrb *lrbp;
6034         unsigned long flags;
6035
6036         host = cmd->device->host;
6037         hba = shost_priv(host);
6038         tag = cmd->request->tag;
6039
6040         lrbp = &hba->lrb[tag];
6041         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6042         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6043                 if (!err)
6044                         err = resp;
6045                 goto out;
6046         }
6047
6048         /* clear the commands that were pending for corresponding LUN */
6049         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6050                 if (hba->lrb[pos].lun == lrbp->lun) {
6051                         err = ufshcd_clear_cmd(hba, pos);
6052                         if (err)
6053                                 break;
6054                 }
6055         }
6056         spin_lock_irqsave(host->host_lock, flags);
6057         ufshcd_transfer_req_compl(hba);
6058         spin_unlock_irqrestore(host->host_lock, flags);
6059
6060 out:
6061         hba->req_abort_count = 0;
6062         ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
6063         if (!err) {
6064                 err = SUCCESS;
6065         } else {
6066                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6067                 err = FAILED;
6068         }
6069         return err;
6070 }
6071
6072 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6073 {
6074         struct ufshcd_lrb *lrbp;
6075         int tag;
6076
6077         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6078                 lrbp = &hba->lrb[tag];
6079                 lrbp->req_abort_skip = true;
6080         }
6081 }
6082
6083 /**
6084  * ufshcd_abort - abort a specific command
6085  * @cmd: SCSI command pointer
6086  *
6087  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6088  * command, and in host controller by clearing the door-bell register. There can
6089  * be race between controller sending the command to the device while abort is
6090  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6091  * really issued and then try to abort it.
6092  *
6093  * Returns SUCCESS/FAILED
6094  */
6095 static int ufshcd_abort(struct scsi_cmnd *cmd)
6096 {
6097         struct Scsi_Host *host;
6098         struct ufs_hba *hba;
6099         unsigned long flags;
6100         unsigned int tag;
6101         int err = 0;
6102         int poll_cnt;
6103         u8 resp = 0xF;
6104         struct ufshcd_lrb *lrbp;
6105         u32 reg;
6106
6107         host = cmd->device->host;
6108         hba = shost_priv(host);
6109         tag = cmd->request->tag;
6110         lrbp = &hba->lrb[tag];
6111         if (!ufshcd_valid_tag(hba, tag)) {
6112                 dev_err(hba->dev,
6113                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6114                         __func__, tag, cmd, cmd->request);
6115                 BUG();
6116         }
6117
6118         /*
6119          * Task abort to the device W-LUN is illegal. When this command
6120          * will fail, due to spec violation, scsi err handling next step
6121          * will be to send LU reset which, again, is a spec violation.
6122          * To avoid these unnecessary/illegal step we skip to the last error
6123          * handling stage: reset and restore.
6124          */
6125         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6126                 return ufshcd_eh_host_reset_handler(cmd);
6127
6128         ufshcd_hold(hba, false);
6129         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6130         /* If command is already aborted/completed, return SUCCESS */
6131         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6132                 dev_err(hba->dev,
6133                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6134                         __func__, tag, hba->outstanding_reqs, reg);
6135                 goto out;
6136         }
6137
6138         if (!(reg & (1 << tag))) {
6139                 dev_err(hba->dev,
6140                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6141                 __func__, tag);
6142         }
6143
6144         /* Print Transfer Request of aborted task */
6145         dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6146
6147         /*
6148          * Print detailed info about aborted request.
6149          * As more than one request might get aborted at the same time,
6150          * print full information only for the first aborted request in order
6151          * to reduce repeated printouts. For other aborted requests only print
6152          * basic details.
6153          */
6154         scsi_print_command(hba->lrb[tag].cmd);
6155         if (!hba->req_abort_count) {
6156                 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6157                 ufshcd_print_host_regs(hba);
6158                 ufshcd_print_host_state(hba);
6159                 ufshcd_print_pwr_info(hba);
6160                 ufshcd_print_trs(hba, 1 << tag, true);
6161         } else {
6162                 ufshcd_print_trs(hba, 1 << tag, false);
6163         }
6164         hba->req_abort_count++;
6165
6166         /* Skip task abort in case previous aborts failed and report failure */
6167         if (lrbp->req_abort_skip) {
6168                 err = -EIO;
6169                 goto out;
6170         }
6171
6172         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6173                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6174                                 UFS_QUERY_TASK, &resp);
6175                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6176                         /* cmd pending in the device */
6177                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6178                                 __func__, tag);
6179                         break;
6180                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6181                         /*
6182                          * cmd not pending in the device, check if it is
6183                          * in transition.
6184                          */
6185                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6186                                 __func__, tag);
6187                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6188                         if (reg & (1 << tag)) {
6189                                 /* sleep for max. 200us to stabilize */
6190                                 usleep_range(100, 200);
6191                                 continue;
6192                         }
6193                         /* command completed already */
6194                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6195                                 __func__, tag);
6196                         goto out;
6197                 } else {
6198                         dev_err(hba->dev,
6199                                 "%s: no response from device. tag = %d, err %d\n",
6200                                 __func__, tag, err);
6201                         if (!err)
6202                                 err = resp; /* service response error */
6203                         goto out;
6204                 }
6205         }
6206
6207         if (!poll_cnt) {
6208                 err = -EBUSY;
6209                 goto out;
6210         }
6211
6212         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6213                         UFS_ABORT_TASK, &resp);
6214         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6215                 if (!err) {
6216                         err = resp; /* service response error */
6217                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6218                                 __func__, tag, err);
6219                 }
6220                 goto out;
6221         }
6222
6223         err = ufshcd_clear_cmd(hba, tag);
6224         if (err) {
6225                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6226                         __func__, tag, err);
6227                 goto out;
6228         }
6229
6230         scsi_dma_unmap(cmd);
6231
6232         spin_lock_irqsave(host->host_lock, flags);
6233         ufshcd_outstanding_req_clear(hba, tag);
6234         hba->lrb[tag].cmd = NULL;
6235         spin_unlock_irqrestore(host->host_lock, flags);
6236
6237 out:
6238         if (!err) {
6239                 err = SUCCESS;
6240         } else {
6241                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6242                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6243                 err = FAILED;
6244         }
6245
6246         /*
6247          * This ufshcd_release() corresponds to the original scsi cmd that got
6248          * aborted here (as we won't get any IRQ for it).
6249          */
6250         ufshcd_release(hba);
6251         return err;
6252 }
6253
6254 /**
6255  * ufshcd_host_reset_and_restore - reset and restore host controller
6256  * @hba: per-adapter instance
6257  *
6258  * Note that host controller reset may issue DME_RESET to
6259  * local and remote (device) Uni-Pro stack and the attributes
6260  * are reset to default state.
6261  *
6262  * Returns zero on success, non-zero on failure
6263  */
6264 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6265 {
6266         int err;
6267         unsigned long flags;
6268
6269         /*
6270          * Stop the host controller and complete the requests
6271          * cleared by h/w
6272          */
6273         spin_lock_irqsave(hba->host->host_lock, flags);
6274         ufshcd_hba_stop(hba, false);
6275         hba->silence_err_logs = true;
6276         ufshcd_complete_requests(hba);
6277         hba->silence_err_logs = false;
6278         spin_unlock_irqrestore(hba->host->host_lock, flags);
6279
6280         /* scale up clocks to max frequency before full reinitialization */
6281         ufshcd_scale_clks(hba, true);
6282
6283         err = ufshcd_hba_enable(hba);
6284         if (err)
6285                 goto out;
6286
6287         /* Establish the link again and restore the device */
6288         err = ufshcd_probe_hba(hba);
6289
6290         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6291                 err = -EIO;
6292 out:
6293         if (err)
6294                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6295         ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6296         return err;
6297 }
6298
6299 /**
6300  * ufshcd_reset_and_restore - reset and re-initialize host/device
6301  * @hba: per-adapter instance
6302  *
6303  * Reset and recover device, host and re-establish link. This
6304  * is helpful to recover the communication in fatal error conditions.
6305  *
6306  * Returns zero on success, non-zero on failure
6307  */
6308 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6309 {
6310         int err = 0;
6311         int retries = MAX_HOST_RESET_RETRIES;
6312
6313         do {
6314                 /* Reset the attached device */
6315                 ufshcd_vops_device_reset(hba);
6316
6317                 err = ufshcd_host_reset_and_restore(hba);
6318         } while (err && --retries);
6319
6320         return err;
6321 }
6322
6323 /**
6324  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6325  * @cmd: SCSI command pointer
6326  *
6327  * Returns SUCCESS/FAILED
6328  */
6329 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6330 {
6331         int err;
6332         unsigned long flags;
6333         struct ufs_hba *hba;
6334
6335         hba = shost_priv(cmd->device->host);
6336
6337         ufshcd_hold(hba, false);
6338         /*
6339          * Check if there is any race with fatal error handling.
6340          * If so, wait for it to complete. Even though fatal error
6341          * handling does reset and restore in some cases, don't assume
6342          * anything out of it. We are just avoiding race here.
6343          */
6344         do {
6345                 spin_lock_irqsave(hba->host->host_lock, flags);
6346                 if (!(work_pending(&hba->eh_work) ||
6347                             hba->ufshcd_state == UFSHCD_STATE_RESET ||
6348                             hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6349                         break;
6350                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6351                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6352                 flush_work(&hba->eh_work);
6353         } while (1);
6354
6355         hba->ufshcd_state = UFSHCD_STATE_RESET;
6356         ufshcd_set_eh_in_progress(hba);
6357         spin_unlock_irqrestore(hba->host->host_lock, flags);
6358
6359         err = ufshcd_reset_and_restore(hba);
6360
6361         spin_lock_irqsave(hba->host->host_lock, flags);
6362         if (!err) {
6363                 err = SUCCESS;
6364                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6365         } else {
6366                 err = FAILED;
6367                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6368         }
6369         ufshcd_clear_eh_in_progress(hba);
6370         spin_unlock_irqrestore(hba->host->host_lock, flags);
6371
6372         ufshcd_release(hba);
6373         return err;
6374 }
6375
6376 /**
6377  * ufshcd_get_max_icc_level - calculate the ICC level
6378  * @sup_curr_uA: max. current supported by the regulator
6379  * @start_scan: row at the desc table to start scan from
6380  * @buff: power descriptor buffer
6381  *
6382  * Returns calculated max ICC level for specific regulator
6383  */
6384 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6385 {
6386         int i;
6387         int curr_uA;
6388         u16 data;
6389         u16 unit;
6390
6391         for (i = start_scan; i >= 0; i--) {
6392                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6393                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6394                                                 ATTR_ICC_LVL_UNIT_OFFSET;
6395                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6396                 switch (unit) {
6397                 case UFSHCD_NANO_AMP:
6398                         curr_uA = curr_uA / 1000;
6399                         break;
6400                 case UFSHCD_MILI_AMP:
6401                         curr_uA = curr_uA * 1000;
6402                         break;
6403                 case UFSHCD_AMP:
6404                         curr_uA = curr_uA * 1000 * 1000;
6405                         break;
6406                 case UFSHCD_MICRO_AMP:
6407                 default:
6408                         break;
6409                 }
6410                 if (sup_curr_uA >= curr_uA)
6411                         break;
6412         }
6413         if (i < 0) {
6414                 i = 0;
6415                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6416         }
6417
6418         return (u32)i;
6419 }
6420
6421 /**
6422  * ufshcd_calc_icc_level - calculate the max ICC level
6423  * In case regulators are not initialized we'll return 0
6424  * @hba: per-adapter instance
6425  * @desc_buf: power descriptor buffer to extract ICC levels from.
6426  * @len: length of desc_buff
6427  *
6428  * Returns calculated ICC level
6429  */
6430 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6431                                                         u8 *desc_buf, int len)
6432 {
6433         u32 icc_level = 0;
6434
6435         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6436                                                 !hba->vreg_info.vccq2) {
6437                 dev_err(hba->dev,
6438                         "%s: Regulator capability was not set, actvIccLevel=%d",
6439                                                         __func__, icc_level);
6440                 goto out;
6441         }
6442
6443         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6444                 icc_level = ufshcd_get_max_icc_level(
6445                                 hba->vreg_info.vcc->max_uA,
6446                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6447                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6448
6449         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6450                 icc_level = ufshcd_get_max_icc_level(
6451                                 hba->vreg_info.vccq->max_uA,
6452                                 icc_level,
6453                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6454
6455         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6456                 icc_level = ufshcd_get_max_icc_level(
6457                                 hba->vreg_info.vccq2->max_uA,
6458                                 icc_level,
6459                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6460 out:
6461         return icc_level;
6462 }
6463
6464 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6465 {
6466         int ret;
6467         int buff_len = hba->desc_size.pwr_desc;
6468         u8 *desc_buf;
6469
6470         desc_buf = kmalloc(buff_len, GFP_KERNEL);
6471         if (!desc_buf)
6472                 return;
6473
6474         ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6475         if (ret) {
6476                 dev_err(hba->dev,
6477                         "%s: Failed reading power descriptor.len = %d ret = %d",
6478                         __func__, buff_len, ret);
6479                 goto out;
6480         }
6481
6482         hba->init_prefetch_data.icc_level =
6483                         ufshcd_find_max_sup_active_icc_level(hba,
6484                         desc_buf, buff_len);
6485         dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6486                         __func__, hba->init_prefetch_data.icc_level);
6487
6488         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6489                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6490                 &hba->init_prefetch_data.icc_level);
6491
6492         if (ret)
6493                 dev_err(hba->dev,
6494                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6495                         __func__, hba->init_prefetch_data.icc_level , ret);
6496
6497 out:
6498         kfree(desc_buf);
6499 }
6500
6501 /**
6502  * ufshcd_scsi_add_wlus - Adds required W-LUs
6503  * @hba: per-adapter instance
6504  *
6505  * UFS device specification requires the UFS devices to support 4 well known
6506  * logical units:
6507  *      "REPORT_LUNS" (address: 01h)
6508  *      "UFS Device" (address: 50h)
6509  *      "RPMB" (address: 44h)
6510  *      "BOOT" (address: 30h)
6511  * UFS device's power management needs to be controlled by "POWER CONDITION"
6512  * field of SSU (START STOP UNIT) command. But this "power condition" field
6513  * will take effect only when its sent to "UFS device" well known logical unit
6514  * hence we require the scsi_device instance to represent this logical unit in
6515  * order for the UFS host driver to send the SSU command for power management.
6516  *
6517  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6518  * Block) LU so user space process can control this LU. User space may also
6519  * want to have access to BOOT LU.
6520  *
6521  * This function adds scsi device instances for each of all well known LUs
6522  * (except "REPORT LUNS" LU).
6523  *
6524  * Returns zero on success (all required W-LUs are added successfully),
6525  * non-zero error value on failure (if failed to add any of the required W-LU).
6526  */
6527 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6528 {
6529         int ret = 0;
6530         struct scsi_device *sdev_rpmb;
6531         struct scsi_device *sdev_boot;
6532
6533         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6534                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6535         if (IS_ERR(hba->sdev_ufs_device)) {
6536                 ret = PTR_ERR(hba->sdev_ufs_device);
6537                 hba->sdev_ufs_device = NULL;
6538                 goto out;
6539         }
6540         scsi_device_put(hba->sdev_ufs_device);
6541
6542         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6543                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6544         if (IS_ERR(sdev_rpmb)) {
6545                 ret = PTR_ERR(sdev_rpmb);
6546                 goto remove_sdev_ufs_device;
6547         }
6548         scsi_device_put(sdev_rpmb);
6549
6550         sdev_boot = __scsi_add_device(hba->host, 0, 0,
6551                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6552         if (IS_ERR(sdev_boot))
6553                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6554         else
6555                 scsi_device_put(sdev_boot);
6556         goto out;
6557
6558 remove_sdev_ufs_device:
6559         scsi_remove_device(hba->sdev_ufs_device);
6560 out:
6561         return ret;
6562 }
6563
6564 static int ufs_get_device_desc(struct ufs_hba *hba,
6565                                struct ufs_dev_desc *dev_desc)
6566 {
6567         int err;
6568         size_t buff_len;
6569         u8 model_index;
6570         u8 *desc_buf;
6571
6572         if (!dev_desc)
6573                 return -EINVAL;
6574
6575         buff_len = max_t(size_t, hba->desc_size.dev_desc,
6576                          QUERY_DESC_MAX_SIZE + 1);
6577         desc_buf = kmalloc(buff_len, GFP_KERNEL);
6578         if (!desc_buf) {
6579                 err = -ENOMEM;
6580                 goto out;
6581         }
6582
6583         err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6584         if (err) {
6585                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6586                         __func__, err);
6587                 goto out;
6588         }
6589
6590         /*
6591          * getting vendor (manufacturerID) and Bank Index in big endian
6592          * format
6593          */
6594         dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6595                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6596
6597         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6598         err = ufshcd_read_string_desc(hba, model_index,
6599                                       &dev_desc->model, SD_ASCII_STD);
6600         if (err < 0) {
6601                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6602                         __func__, err);
6603                 goto out;
6604         }
6605
6606         /*
6607          * ufshcd_read_string_desc returns size of the string
6608          * reset the error value
6609          */
6610         err = 0;
6611
6612 out:
6613         kfree(desc_buf);
6614         return err;
6615 }
6616
6617 static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
6618 {
6619         kfree(dev_desc->model);
6620         dev_desc->model = NULL;
6621 }
6622
6623 static void ufs_fixup_device_setup(struct ufs_hba *hba,
6624                                    struct ufs_dev_desc *dev_desc)
6625 {
6626         struct ufs_dev_fix *f;
6627
6628         for (f = ufs_fixups; f->quirk; f++) {
6629                 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6630                      f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6631                      ((dev_desc->model &&
6632                        STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
6633                       !strcmp(f->card.model, UFS_ANY_MODEL)))
6634                         hba->dev_quirks |= f->quirk;
6635         }
6636 }
6637
6638 /**
6639  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6640  * @hba: per-adapter instance
6641  *
6642  * PA_TActivate parameter can be tuned manually if UniPro version is less than
6643  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6644  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6645  * the hibern8 exit latency.
6646  *
6647  * Returns zero on success, non-zero error value on failure.
6648  */
6649 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6650 {
6651         int ret = 0;
6652         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6653
6654         ret = ufshcd_dme_peer_get(hba,
6655                                   UIC_ARG_MIB_SEL(
6656                                         RX_MIN_ACTIVATETIME_CAPABILITY,
6657                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6658                                   &peer_rx_min_activatetime);
6659         if (ret)
6660                 goto out;
6661
6662         /* make sure proper unit conversion is applied */
6663         tuned_pa_tactivate =
6664                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6665                  / PA_TACTIVATE_TIME_UNIT_US);
6666         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6667                              tuned_pa_tactivate);
6668
6669 out:
6670         return ret;
6671 }
6672
6673 /**
6674  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6675  * @hba: per-adapter instance
6676  *
6677  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6678  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6679  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6680  * This optimal value can help reduce the hibern8 exit latency.
6681  *
6682  * Returns zero on success, non-zero error value on failure.
6683  */
6684 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6685 {
6686         int ret = 0;
6687         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6688         u32 max_hibern8_time, tuned_pa_hibern8time;
6689
6690         ret = ufshcd_dme_get(hba,
6691                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6692                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6693                                   &local_tx_hibern8_time_cap);
6694         if (ret)
6695                 goto out;
6696
6697         ret = ufshcd_dme_peer_get(hba,
6698                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6699                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6700                                   &peer_rx_hibern8_time_cap);
6701         if (ret)
6702                 goto out;
6703
6704         max_hibern8_time = max(local_tx_hibern8_time_cap,
6705                                peer_rx_hibern8_time_cap);
6706         /* make sure proper unit conversion is applied */
6707         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6708                                 / PA_HIBERN8_TIME_UNIT_US);
6709         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6710                              tuned_pa_hibern8time);
6711 out:
6712         return ret;
6713 }
6714
6715 /**
6716  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6717  * less than device PA_TACTIVATE time.
6718  * @hba: per-adapter instance
6719  *
6720  * Some UFS devices require host PA_TACTIVATE to be lower than device
6721  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6722  * for such devices.
6723  *
6724  * Returns zero on success, non-zero error value on failure.
6725  */
6726 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6727 {
6728         int ret = 0;
6729         u32 granularity, peer_granularity;
6730         u32 pa_tactivate, peer_pa_tactivate;
6731         u32 pa_tactivate_us, peer_pa_tactivate_us;
6732         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6733
6734         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6735                                   &granularity);
6736         if (ret)
6737                 goto out;
6738
6739         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6740                                   &peer_granularity);
6741         if (ret)
6742                 goto out;
6743
6744         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6745             (granularity > PA_GRANULARITY_MAX_VAL)) {
6746                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6747                         __func__, granularity);
6748                 return -EINVAL;
6749         }
6750
6751         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6752             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6753                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6754                         __func__, peer_granularity);
6755                 return -EINVAL;
6756         }
6757
6758         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6759         if (ret)
6760                 goto out;
6761
6762         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6763                                   &peer_pa_tactivate);
6764         if (ret)
6765                 goto out;
6766
6767         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6768         peer_pa_tactivate_us = peer_pa_tactivate *
6769                              gran_to_us_table[peer_granularity - 1];
6770
6771         if (pa_tactivate_us > peer_pa_tactivate_us) {
6772                 u32 new_peer_pa_tactivate;
6773
6774                 new_peer_pa_tactivate = pa_tactivate_us /
6775                                       gran_to_us_table[peer_granularity - 1];
6776                 new_peer_pa_tactivate++;
6777                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6778                                           new_peer_pa_tactivate);
6779         }
6780
6781 out:
6782         return ret;
6783 }
6784
6785 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6786 {
6787         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6788                 ufshcd_tune_pa_tactivate(hba);
6789                 ufshcd_tune_pa_hibern8time(hba);
6790         }
6791
6792         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6793                 /* set 1ms timeout for PA_TACTIVATE */
6794                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6795
6796         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6797                 ufshcd_quirk_tune_host_pa_tactivate(hba);
6798
6799         ufshcd_vops_apply_dev_quirks(hba);
6800 }
6801
6802 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6803 {
6804         hba->ufs_stats.hibern8_exit_cnt = 0;
6805         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6806         hba->req_abort_count = 0;
6807 }
6808
6809 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6810 {
6811         int err;
6812
6813         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6814                 &hba->desc_size.dev_desc);
6815         if (err)
6816                 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6817
6818         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6819                 &hba->desc_size.pwr_desc);
6820         if (err)
6821                 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6822
6823         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6824                 &hba->desc_size.interc_desc);
6825         if (err)
6826                 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6827
6828         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6829                 &hba->desc_size.conf_desc);
6830         if (err)
6831                 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6832
6833         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6834                 &hba->desc_size.unit_desc);
6835         if (err)
6836                 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6837
6838         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6839                 &hba->desc_size.geom_desc);
6840         if (err)
6841                 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6842
6843         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6844                 &hba->desc_size.hlth_desc);
6845         if (err)
6846                 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6847 }
6848
6849 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
6850         {19200000, REF_CLK_FREQ_19_2_MHZ},
6851         {26000000, REF_CLK_FREQ_26_MHZ},
6852         {38400000, REF_CLK_FREQ_38_4_MHZ},
6853         {52000000, REF_CLK_FREQ_52_MHZ},
6854         {0, REF_CLK_FREQ_INVAL},
6855 };
6856
6857 static enum ufs_ref_clk_freq
6858 ufs_get_bref_clk_from_hz(unsigned long freq)
6859 {
6860         int i;
6861
6862         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
6863                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
6864                         return ufs_ref_clk_freqs[i].val;
6865
6866         return REF_CLK_FREQ_INVAL;
6867 }
6868
6869 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
6870 {
6871         unsigned long freq;
6872
6873         freq = clk_get_rate(refclk);
6874
6875         hba->dev_ref_clk_freq =
6876                 ufs_get_bref_clk_from_hz(freq);
6877
6878         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
6879                 dev_err(hba->dev,
6880                 "invalid ref_clk setting = %ld\n", freq);
6881 }
6882
6883 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6884 {
6885         int err;
6886         u32 ref_clk;
6887         u32 freq = hba->dev_ref_clk_freq;
6888
6889         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6890                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6891
6892         if (err) {
6893                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
6894                         err);
6895                 goto out;
6896         }
6897
6898         if (ref_clk == freq)
6899                 goto out; /* nothing to update */
6900
6901         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6902                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
6903
6904         if (err) {
6905                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
6906                         ufs_ref_clk_freqs[freq].freq_hz);
6907                 goto out;
6908         }
6909
6910         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
6911                         ufs_ref_clk_freqs[freq].freq_hz);
6912
6913 out:
6914         return err;
6915 }
6916
6917 /**
6918  * ufshcd_probe_hba - probe hba to detect device and initialize
6919  * @hba: per-adapter instance
6920  *
6921  * Execute link-startup and verify device initialization
6922  */
6923 static int ufshcd_probe_hba(struct ufs_hba *hba)
6924 {
6925         struct ufs_dev_desc card = {0};
6926         int ret;
6927         ktime_t start = ktime_get();
6928
6929         ret = ufshcd_link_startup(hba);
6930         if (ret)
6931                 goto out;
6932
6933         /* set the default level for urgent bkops */
6934         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6935         hba->is_urgent_bkops_lvl_checked = false;
6936
6937         /* Debug counters initialization */
6938         ufshcd_clear_dbg_ufs_stats(hba);
6939
6940         /* UniPro link is active now */
6941         ufshcd_set_link_active(hba);
6942
6943         ret = ufshcd_verify_dev_init(hba);
6944         if (ret)
6945                 goto out;
6946
6947         ret = ufshcd_complete_dev_init(hba);
6948         if (ret)
6949                 goto out;
6950
6951         /* Init check for device descriptor sizes */
6952         ufshcd_init_desc_sizes(hba);
6953
6954         ret = ufs_get_device_desc(hba, &card);
6955         if (ret) {
6956                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6957                         __func__, ret);
6958                 goto out;
6959         }
6960
6961         ufs_fixup_device_setup(hba, &card);
6962         ufs_put_device_desc(&card);
6963
6964         ufshcd_tune_unipro_params(hba);
6965
6966         /* UFS device is also active now */
6967         ufshcd_set_ufs_dev_active(hba);
6968         ufshcd_force_reset_auto_bkops(hba);
6969         hba->wlun_dev_clr_ua = true;
6970
6971         if (ufshcd_get_max_pwr_mode(hba)) {
6972                 dev_err(hba->dev,
6973                         "%s: Failed getting max supported power mode\n",
6974                         __func__);
6975         } else {
6976                 /*
6977                  * Set the right value to bRefClkFreq before attempting to
6978                  * switch to HS gears.
6979                  */
6980                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
6981                         ufshcd_set_dev_ref_clk(hba);
6982                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6983                 if (ret) {
6984                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6985                                         __func__, ret);
6986                         goto out;
6987                 }
6988         }
6989
6990         /* set the state as operational after switching to desired gear */
6991         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6992
6993         /* Enable Auto-Hibernate if configured */
6994         ufshcd_auto_hibern8_enable(hba);
6995
6996         /*
6997          * If we are in error handling context or in power management callbacks
6998          * context, no need to scan the host
6999          */
7000         if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7001                 bool flag;
7002
7003                 /* clear any previous UFS device information */
7004                 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7005                 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7006                                 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
7007                         hba->dev_info.f_power_on_wp_en = flag;
7008
7009                 if (!hba->is_init_prefetch)
7010                         ufshcd_init_icc_levels(hba);
7011
7012                 /* Add required well known logical units to scsi mid layer */
7013                 if (ufshcd_scsi_add_wlus(hba))
7014                         goto out;
7015
7016                 /* Initialize devfreq after UFS device is detected */
7017                 if (ufshcd_is_clkscaling_supported(hba)) {
7018                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
7019                                 &hba->pwr_info,
7020                                 sizeof(struct ufs_pa_layer_attr));
7021                         hba->clk_scaling.saved_pwr_info.is_valid = true;
7022                         if (!hba->devfreq) {
7023                                 ret = ufshcd_devfreq_init(hba);
7024                                 if (ret)
7025                                         goto out;
7026                         }
7027                         hba->clk_scaling.is_allowed = true;
7028                 }
7029
7030                 ufs_bsg_probe(hba);
7031
7032                 scsi_scan_host(hba->host);
7033                 pm_runtime_put_sync(hba->dev);
7034         }
7035
7036         if (!hba->is_init_prefetch)
7037                 hba->is_init_prefetch = true;
7038
7039 out:
7040         /*
7041          * If we failed to initialize the device or the device is not
7042          * present, turn off the power/clocks etc.
7043          */
7044         if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7045                 pm_runtime_put_sync(hba->dev);
7046                 ufshcd_exit_clk_scaling(hba);
7047                 ufshcd_hba_exit(hba);
7048         }
7049
7050         trace_ufshcd_init(dev_name(hba->dev), ret,
7051                 ktime_to_us(ktime_sub(ktime_get(), start)),
7052                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7053         return ret;
7054 }
7055
7056 /**
7057  * ufshcd_async_scan - asynchronous execution for probing hba
7058  * @data: data pointer to pass to this function
7059  * @cookie: cookie data
7060  */
7061 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7062 {
7063         struct ufs_hba *hba = (struct ufs_hba *)data;
7064
7065         ufshcd_probe_hba(hba);
7066 }
7067
7068 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7069 {
7070         unsigned long flags;
7071         struct Scsi_Host *host;
7072         struct ufs_hba *hba;
7073         int index;
7074         bool found = false;
7075
7076         if (!scmd || !scmd->device || !scmd->device->host)
7077                 return BLK_EH_DONE;
7078
7079         host = scmd->device->host;
7080         hba = shost_priv(host);
7081         if (!hba)
7082                 return BLK_EH_DONE;
7083
7084         spin_lock_irqsave(host->host_lock, flags);
7085
7086         for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7087                 if (hba->lrb[index].cmd == scmd) {
7088                         found = true;
7089                         break;
7090                 }
7091         }
7092
7093         spin_unlock_irqrestore(host->host_lock, flags);
7094
7095         /*
7096          * Bypass SCSI error handling and reset the block layer timer if this
7097          * SCSI command was not actually dispatched to UFS driver, otherwise
7098          * let SCSI layer handle the error as usual.
7099          */
7100         return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
7101 }
7102
7103 static const struct attribute_group *ufshcd_driver_groups[] = {
7104         &ufs_sysfs_unit_descriptor_group,
7105         &ufs_sysfs_lun_attributes_group,
7106         NULL,
7107 };
7108
7109 static struct scsi_host_template ufshcd_driver_template = {
7110         .module                 = THIS_MODULE,
7111         .name                   = UFSHCD,
7112         .proc_name              = UFSHCD,
7113         .queuecommand           = ufshcd_queuecommand,
7114         .slave_alloc            = ufshcd_slave_alloc,
7115         .slave_configure        = ufshcd_slave_configure,
7116         .slave_destroy          = ufshcd_slave_destroy,
7117         .change_queue_depth     = ufshcd_change_queue_depth,
7118         .eh_abort_handler       = ufshcd_abort,
7119         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7120         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7121         .eh_timed_out           = ufshcd_eh_timed_out,
7122         .this_id                = -1,
7123         .sg_tablesize           = SG_ALL,
7124         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7125         .can_queue              = UFSHCD_CAN_QUEUE,
7126         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
7127         .max_host_blocked       = 1,
7128         .track_queue_depth      = 1,
7129         .sdev_groups            = ufshcd_driver_groups,
7130         .dma_boundary           = PAGE_SIZE - 1,
7131         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
7132 };
7133
7134 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7135                                    int ua)
7136 {
7137         int ret;
7138
7139         if (!vreg)
7140                 return 0;
7141
7142         /*
7143          * "set_load" operation shall be required on those regulators
7144          * which specifically configured current limitation. Otherwise
7145          * zero max_uA may cause unexpected behavior when regulator is
7146          * enabled or set as high power mode.
7147          */
7148         if (!vreg->max_uA)
7149                 return 0;
7150
7151         ret = regulator_set_load(vreg->reg, ua);
7152         if (ret < 0) {
7153                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7154                                 __func__, vreg->name, ua, ret);
7155         }
7156
7157         return ret;
7158 }
7159
7160 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7161                                          struct ufs_vreg *vreg)
7162 {
7163         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7164 }
7165
7166 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7167                                          struct ufs_vreg *vreg)
7168 {
7169         if (!vreg)
7170                 return 0;
7171
7172         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7173 }
7174
7175 static int ufshcd_config_vreg(struct device *dev,
7176                 struct ufs_vreg *vreg, bool on)
7177 {
7178         int ret = 0;
7179         struct regulator *reg;
7180         const char *name;
7181         int min_uV, uA_load;
7182
7183         BUG_ON(!vreg);
7184
7185         reg = vreg->reg;
7186         name = vreg->name;
7187
7188         if (regulator_count_voltages(reg) > 0) {
7189                 if (vreg->min_uV && vreg->max_uV) {
7190                         min_uV = on ? vreg->min_uV : 0;
7191                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7192                         if (ret) {
7193                                 dev_err(dev,
7194                                         "%s: %s set voltage failed, err=%d\n",
7195                                         __func__, name, ret);
7196                                 goto out;
7197                         }
7198                 }
7199
7200                 uA_load = on ? vreg->max_uA : 0;
7201                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7202                 if (ret)
7203                         goto out;
7204         }
7205 out:
7206         return ret;
7207 }
7208
7209 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7210 {
7211         int ret = 0;
7212
7213         if (!vreg || vreg->enabled)
7214                 goto out;
7215
7216         ret = ufshcd_config_vreg(dev, vreg, true);
7217         if (!ret)
7218                 ret = regulator_enable(vreg->reg);
7219
7220         if (!ret)
7221                 vreg->enabled = true;
7222         else
7223                 dev_err(dev, "%s: %s enable failed, err=%d\n",
7224                                 __func__, vreg->name, ret);
7225 out:
7226         return ret;
7227 }
7228
7229 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7230 {
7231         int ret = 0;
7232
7233         if (!vreg || !vreg->enabled)
7234                 goto out;
7235
7236         ret = regulator_disable(vreg->reg);
7237
7238         if (!ret) {
7239                 /* ignore errors on applying disable config */
7240                 ufshcd_config_vreg(dev, vreg, false);
7241                 vreg->enabled = false;
7242         } else {
7243                 dev_err(dev, "%s: %s disable failed, err=%d\n",
7244                                 __func__, vreg->name, ret);
7245         }
7246 out:
7247         return ret;
7248 }
7249
7250 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7251 {
7252         int ret = 0;
7253         struct device *dev = hba->dev;
7254         struct ufs_vreg_info *info = &hba->vreg_info;
7255
7256         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7257         if (ret)
7258                 goto out;
7259
7260         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7261         if (ret)
7262                 goto out;
7263
7264         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7265         if (ret)
7266                 goto out;
7267
7268 out:
7269         if (ret) {
7270                 ufshcd_toggle_vreg(dev, info->vccq2, false);
7271                 ufshcd_toggle_vreg(dev, info->vccq, false);
7272                 ufshcd_toggle_vreg(dev, info->vcc, false);
7273         }
7274         return ret;
7275 }
7276
7277 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7278 {
7279         struct ufs_vreg_info *info = &hba->vreg_info;
7280
7281         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7282 }
7283
7284 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7285 {
7286         int ret = 0;
7287
7288         if (!vreg)
7289                 goto out;
7290
7291         vreg->reg = devm_regulator_get(dev, vreg->name);
7292         if (IS_ERR(vreg->reg)) {
7293                 ret = PTR_ERR(vreg->reg);
7294                 dev_err(dev, "%s: %s get failed, err=%d\n",
7295                                 __func__, vreg->name, ret);
7296         }
7297 out:
7298         return ret;
7299 }
7300
7301 static int ufshcd_init_vreg(struct ufs_hba *hba)
7302 {
7303         int ret = 0;
7304         struct device *dev = hba->dev;
7305         struct ufs_vreg_info *info = &hba->vreg_info;
7306
7307         ret = ufshcd_get_vreg(dev, info->vcc);
7308         if (ret)
7309                 goto out;
7310
7311         ret = ufshcd_get_vreg(dev, info->vccq);
7312         if (ret)
7313                 goto out;
7314
7315         ret = ufshcd_get_vreg(dev, info->vccq2);
7316 out:
7317         return ret;
7318 }
7319
7320 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7321 {
7322         struct ufs_vreg_info *info = &hba->vreg_info;
7323
7324         if (info)
7325                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7326
7327         return 0;
7328 }
7329
7330 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7331                                         bool skip_ref_clk)
7332 {
7333         int ret = 0;
7334         struct ufs_clk_info *clki;
7335         struct list_head *head = &hba->clk_list_head;
7336         unsigned long flags;
7337         ktime_t start = ktime_get();
7338         bool clk_state_changed = false;
7339
7340         if (list_empty(head))
7341                 goto out;
7342
7343         /*
7344          * vendor specific setup_clocks ops may depend on clocks managed by
7345          * this standard driver hence call the vendor specific setup_clocks
7346          * before disabling the clocks managed here.
7347          */
7348         if (!on) {
7349                 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7350                 if (ret)
7351                         return ret;
7352         }
7353
7354         list_for_each_entry(clki, head, list) {
7355                 if (!IS_ERR_OR_NULL(clki->clk)) {
7356                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7357                                 continue;
7358
7359                         clk_state_changed = on ^ clki->enabled;
7360                         if (on && !clki->enabled) {
7361                                 ret = clk_prepare_enable(clki->clk);
7362                                 if (ret) {
7363                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7364                                                 __func__, clki->name, ret);
7365                                         goto out;
7366                                 }
7367                         } else if (!on && clki->enabled) {
7368                                 clk_disable_unprepare(clki->clk);
7369                         }
7370                         clki->enabled = on;
7371                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7372                                         clki->name, on ? "en" : "dis");
7373                 }
7374         }
7375
7376         /*
7377          * vendor specific setup_clocks ops may depend on clocks managed by
7378          * this standard driver hence call the vendor specific setup_clocks
7379          * after enabling the clocks managed here.
7380          */
7381         if (on) {
7382                 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7383                 if (ret)
7384                         return ret;
7385         }
7386
7387 out:
7388         if (ret) {
7389                 list_for_each_entry(clki, head, list) {
7390                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7391                                 clk_disable_unprepare(clki->clk);
7392                 }
7393         } else if (!ret && on) {
7394                 spin_lock_irqsave(hba->host->host_lock, flags);
7395                 hba->clk_gating.state = CLKS_ON;
7396                 trace_ufshcd_clk_gating(dev_name(hba->dev),
7397                                         hba->clk_gating.state);
7398                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7399         }
7400
7401         if (clk_state_changed)
7402                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7403                         (on ? "on" : "off"),
7404                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7405         return ret;
7406 }
7407
7408 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7409 {
7410         return  __ufshcd_setup_clocks(hba, on, false);
7411 }
7412
7413 static int ufshcd_init_clocks(struct ufs_hba *hba)
7414 {
7415         int ret = 0;
7416         struct ufs_clk_info *clki;
7417         struct device *dev = hba->dev;
7418         struct list_head *head = &hba->clk_list_head;
7419
7420         if (list_empty(head))
7421                 goto out;
7422
7423         list_for_each_entry(clki, head, list) {
7424                 if (!clki->name)
7425                         continue;
7426
7427                 clki->clk = devm_clk_get(dev, clki->name);
7428                 if (IS_ERR(clki->clk)) {
7429                         ret = PTR_ERR(clki->clk);
7430                         dev_err(dev, "%s: %s clk get failed, %d\n",
7431                                         __func__, clki->name, ret);
7432                         goto out;
7433                 }
7434
7435                 /*
7436                  * Parse device ref clk freq as per device tree "ref_clk".
7437                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7438                  * in ufshcd_alloc_host().
7439                  */
7440                 if (!strcmp(clki->name, "ref_clk"))
7441                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7442
7443                 if (clki->max_freq) {
7444                         ret = clk_set_rate(clki->clk, clki->max_freq);
7445                         if (ret) {
7446                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7447                                         __func__, clki->name,
7448                                         clki->max_freq, ret);
7449                                 goto out;
7450                         }
7451                         clki->curr_freq = clki->max_freq;
7452                 }
7453                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7454                                 clki->name, clk_get_rate(clki->clk));
7455         }
7456 out:
7457         return ret;
7458 }
7459
7460 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7461 {
7462         int err = 0;
7463
7464         if (!hba->vops)
7465                 goto out;
7466
7467         err = ufshcd_vops_init(hba);
7468         if (err)
7469                 goto out;
7470
7471         err = ufshcd_vops_setup_regulators(hba, true);
7472         if (err)
7473                 goto out_exit;
7474
7475         goto out;
7476
7477 out_exit:
7478         ufshcd_vops_exit(hba);
7479 out:
7480         if (err)
7481                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7482                         __func__, ufshcd_get_var_name(hba), err);
7483         return err;
7484 }
7485
7486 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7487 {
7488         if (!hba->vops)
7489                 return;
7490
7491         ufshcd_vops_setup_regulators(hba, false);
7492
7493         ufshcd_vops_exit(hba);
7494 }
7495
7496 static int ufshcd_hba_init(struct ufs_hba *hba)
7497 {
7498         int err;
7499
7500         /*
7501          * Handle host controller power separately from the UFS device power
7502          * rails as it will help controlling the UFS host controller power
7503          * collapse easily which is different than UFS device power collapse.
7504          * Also, enable the host controller power before we go ahead with rest
7505          * of the initialization here.
7506          */
7507         err = ufshcd_init_hba_vreg(hba);
7508         if (err)
7509                 goto out;
7510
7511         err = ufshcd_setup_hba_vreg(hba, true);
7512         if (err)
7513                 goto out;
7514
7515         err = ufshcd_init_clocks(hba);
7516         if (err)
7517                 goto out_disable_hba_vreg;
7518
7519         err = ufshcd_setup_clocks(hba, true);
7520         if (err)
7521                 goto out_disable_hba_vreg;
7522
7523         err = ufshcd_init_vreg(hba);
7524         if (err)
7525                 goto out_disable_clks;
7526
7527         err = ufshcd_setup_vreg(hba, true);
7528         if (err)
7529                 goto out_disable_clks;
7530
7531         err = ufshcd_variant_hba_init(hba);
7532         if (err)
7533                 goto out_disable_vreg;
7534
7535         hba->is_powered = true;
7536         goto out;
7537
7538 out_disable_vreg:
7539         ufshcd_setup_vreg(hba, false);
7540 out_disable_clks:
7541         ufshcd_setup_clocks(hba, false);
7542 out_disable_hba_vreg:
7543         ufshcd_setup_hba_vreg(hba, false);
7544 out:
7545         return err;
7546 }
7547
7548 static void ufshcd_hba_exit(struct ufs_hba *hba)
7549 {
7550         if (hba->is_powered) {
7551                 ufshcd_variant_hba_exit(hba);
7552                 ufshcd_setup_vreg(hba, false);
7553                 ufshcd_suspend_clkscaling(hba);
7554                 if (ufshcd_is_clkscaling_supported(hba))
7555                         if (hba->devfreq)
7556                                 ufshcd_suspend_clkscaling(hba);
7557                 ufshcd_setup_clocks(hba, false);
7558                 ufshcd_setup_hba_vreg(hba, false);
7559                 hba->is_powered = false;
7560         }
7561 }
7562
7563 static int
7564 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7565 {
7566         unsigned char cmd[6] = {REQUEST_SENSE,
7567                                 0,
7568                                 0,
7569                                 0,
7570                                 UFS_SENSE_SIZE,
7571                                 0};
7572         char *buffer;
7573         int ret;
7574
7575         buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
7576         if (!buffer) {
7577                 ret = -ENOMEM;
7578                 goto out;
7579         }
7580
7581         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7582                         UFS_SENSE_SIZE, NULL, NULL,
7583                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7584         if (ret)
7585                 pr_err("%s: failed with err %d\n", __func__, ret);
7586
7587         kfree(buffer);
7588 out:
7589         return ret;
7590 }
7591
7592 /**
7593  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7594  *                           power mode
7595  * @hba: per adapter instance
7596  * @pwr_mode: device power mode to set
7597  *
7598  * Returns 0 if requested power mode is set successfully
7599  * Returns non-zero if failed to set the requested power mode
7600  */
7601 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7602                                      enum ufs_dev_pwr_mode pwr_mode)
7603 {
7604         unsigned char cmd[6] = { START_STOP };
7605         struct scsi_sense_hdr sshdr;
7606         struct scsi_device *sdp;
7607         unsigned long flags;
7608         int ret;
7609
7610         spin_lock_irqsave(hba->host->host_lock, flags);
7611         sdp = hba->sdev_ufs_device;
7612         if (sdp) {
7613                 ret = scsi_device_get(sdp);
7614                 if (!ret && !scsi_device_online(sdp)) {
7615                         ret = -ENODEV;
7616                         scsi_device_put(sdp);
7617                 }
7618         } else {
7619                 ret = -ENODEV;
7620         }
7621         spin_unlock_irqrestore(hba->host->host_lock, flags);
7622
7623         if (ret)
7624                 return ret;
7625
7626         /*
7627          * If scsi commands fail, the scsi mid-layer schedules scsi error-
7628          * handling, which would wait for host to be resumed. Since we know
7629          * we are functional while we are here, skip host resume in error
7630          * handling context.
7631          */
7632         hba->host->eh_noresume = 1;
7633         if (hba->wlun_dev_clr_ua) {
7634                 ret = ufshcd_send_request_sense(hba, sdp);
7635                 if (ret)
7636                         goto out;
7637                 /* Unit attention condition is cleared now */
7638                 hba->wlun_dev_clr_ua = false;
7639         }
7640
7641         cmd[4] = pwr_mode << 4;
7642
7643         /*
7644          * Current function would be generally called from the power management
7645          * callbacks hence set the RQF_PM flag so that it doesn't resume the
7646          * already suspended childs.
7647          */
7648         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7649                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7650         if (ret) {
7651                 sdev_printk(KERN_WARNING, sdp,
7652                             "START_STOP failed for power mode: %d, result %x\n",
7653                             pwr_mode, ret);
7654                 if (driver_byte(ret) == DRIVER_SENSE)
7655                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
7656         }
7657
7658         if (!ret)
7659                 hba->curr_dev_pwr_mode = pwr_mode;
7660 out:
7661         scsi_device_put(sdp);
7662         hba->host->eh_noresume = 0;
7663         return ret;
7664 }
7665
7666 static int ufshcd_link_state_transition(struct ufs_hba *hba,
7667                                         enum uic_link_state req_link_state,
7668                                         int check_for_bkops)
7669 {
7670         int ret = 0;
7671
7672         if (req_link_state == hba->uic_link_state)
7673                 return 0;
7674
7675         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7676                 ret = ufshcd_uic_hibern8_enter(hba);
7677                 if (!ret)
7678                         ufshcd_set_link_hibern8(hba);
7679                 else
7680                         goto out;
7681         }
7682         /*
7683          * If autobkops is enabled, link can't be turned off because
7684          * turning off the link would also turn off the device.
7685          */
7686         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7687                    (!check_for_bkops || (check_for_bkops &&
7688                     !hba->auto_bkops_enabled))) {
7689                 /*
7690                  * Let's make sure that link is in low power mode, we are doing
7691                  * this currently by putting the link in Hibern8. Otherway to
7692                  * put the link in low power mode is to send the DME end point
7693                  * to device and then send the DME reset command to local
7694                  * unipro. But putting the link in hibern8 is much faster.
7695                  */
7696                 ret = ufshcd_uic_hibern8_enter(hba);
7697                 if (ret)
7698                         goto out;
7699                 /*
7700                  * Change controller state to "reset state" which
7701                  * should also put the link in off/reset state
7702                  */
7703                 ufshcd_hba_stop(hba, true);
7704                 /*
7705                  * TODO: Check if we need any delay to make sure that
7706                  * controller is reset
7707                  */
7708                 ufshcd_set_link_off(hba);
7709         }
7710
7711 out:
7712         return ret;
7713 }
7714
7715 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7716 {
7717         /*
7718          * It seems some UFS devices may keep drawing more than sleep current
7719          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7720          * To avoid this situation, add 2ms delay before putting these UFS
7721          * rails in LPM mode.
7722          */
7723         if (!ufshcd_is_link_active(hba) &&
7724             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7725                 usleep_range(2000, 2100);
7726
7727         /*
7728          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7729          * power.
7730          *
7731          * If UFS device and link is in OFF state, all power supplies (VCC,
7732          * VCCQ, VCCQ2) can be turned off if power on write protect is not
7733          * required. If UFS link is inactive (Hibern8 or OFF state) and device
7734          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7735          *
7736          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7737          * in low power state which would save some power.
7738          */
7739         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7740             !hba->dev_info.is_lu_power_on_wp) {
7741                 ufshcd_setup_vreg(hba, false);
7742         } else if (!ufshcd_is_ufs_dev_active(hba)) {
7743                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7744                 if (!ufshcd_is_link_active(hba)) {
7745                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7746                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7747                 }
7748         }
7749 }
7750
7751 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7752 {
7753         int ret = 0;
7754
7755         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7756             !hba->dev_info.is_lu_power_on_wp) {
7757                 ret = ufshcd_setup_vreg(hba, true);
7758         } else if (!ufshcd_is_ufs_dev_active(hba)) {
7759                 if (!ret && !ufshcd_is_link_active(hba)) {
7760                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7761                         if (ret)
7762                                 goto vcc_disable;
7763                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7764                         if (ret)
7765                                 goto vccq_lpm;
7766                 }
7767                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7768         }
7769         goto out;
7770
7771 vccq_lpm:
7772         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7773 vcc_disable:
7774         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7775 out:
7776         return ret;
7777 }
7778
7779 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7780 {
7781         if (ufshcd_is_link_off(hba))
7782                 ufshcd_setup_hba_vreg(hba, false);
7783 }
7784
7785 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7786 {
7787         if (ufshcd_is_link_off(hba))
7788                 ufshcd_setup_hba_vreg(hba, true);
7789 }
7790
7791 /**
7792  * ufshcd_suspend - helper function for suspend operations
7793  * @hba: per adapter instance
7794  * @pm_op: desired low power operation type
7795  *
7796  * This function will try to put the UFS device and link into low power
7797  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7798  * (System PM level).
7799  *
7800  * If this function is called during shutdown, it will make sure that
7801  * both UFS device and UFS link is powered off.
7802  *
7803  * NOTE: UFS device & link must be active before we enter in this function.
7804  *
7805  * Returns 0 for success and non-zero for failure
7806  */
7807 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7808 {
7809         int ret = 0;
7810         enum ufs_pm_level pm_lvl;
7811         enum ufs_dev_pwr_mode req_dev_pwr_mode;
7812         enum uic_link_state req_link_state;
7813
7814         hba->pm_op_in_progress = 1;
7815         if (!ufshcd_is_shutdown_pm(pm_op)) {
7816                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7817                          hba->rpm_lvl : hba->spm_lvl;
7818                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7819                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7820         } else {
7821                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7822                 req_link_state = UIC_LINK_OFF_STATE;
7823         }
7824
7825         /*
7826          * If we can't transition into any of the low power modes
7827          * just gate the clocks.
7828          */
7829         ufshcd_hold(hba, false);
7830         hba->clk_gating.is_suspended = true;
7831
7832         if (hba->clk_scaling.is_allowed) {
7833                 cancel_work_sync(&hba->clk_scaling.suspend_work);
7834                 cancel_work_sync(&hba->clk_scaling.resume_work);
7835                 ufshcd_suspend_clkscaling(hba);
7836         }
7837
7838         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7839                         req_link_state == UIC_LINK_ACTIVE_STATE) {
7840                 goto disable_clks;
7841         }
7842
7843         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7844             (req_link_state == hba->uic_link_state))
7845                 goto enable_gating;
7846
7847         /* UFS device & link must be active before we enter in this function */
7848         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7849                 ret = -EINVAL;
7850                 goto enable_gating;
7851         }
7852
7853         if (ufshcd_is_runtime_pm(pm_op)) {
7854                 if (ufshcd_can_autobkops_during_suspend(hba)) {
7855                         /*
7856                          * The device is idle with no requests in the queue,
7857                          * allow background operations if bkops status shows
7858                          * that performance might be impacted.
7859                          */
7860                         ret = ufshcd_urgent_bkops(hba);
7861                         if (ret)
7862                                 goto enable_gating;
7863                 } else {
7864                         /* make sure that auto bkops is disabled */
7865                         ufshcd_disable_auto_bkops(hba);
7866                 }
7867         }
7868
7869         if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7870              ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7871                !ufshcd_is_runtime_pm(pm_op))) {
7872                 /* ensure that bkops is disabled */
7873                 ufshcd_disable_auto_bkops(hba);
7874                 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7875                 if (ret)
7876                         goto enable_gating;
7877         }
7878
7879         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7880         if (ret)
7881                 goto set_dev_active;
7882
7883         ufshcd_vreg_set_lpm(hba);
7884
7885 disable_clks:
7886         /*
7887          * Call vendor specific suspend callback. As these callbacks may access
7888          * vendor specific host controller register space call them before the
7889          * host clocks are ON.
7890          */
7891         ret = ufshcd_vops_suspend(hba, pm_op);
7892         if (ret)
7893                 goto set_link_active;
7894
7895         if (!ufshcd_is_link_active(hba))
7896                 ufshcd_setup_clocks(hba, false);
7897         else
7898                 /* If link is active, device ref_clk can't be switched off */
7899                 __ufshcd_setup_clocks(hba, false, true);
7900
7901         hba->clk_gating.state = CLKS_OFF;
7902         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7903         /*
7904          * Disable the host irq as host controller as there won't be any
7905          * host controller transaction expected till resume.
7906          */
7907         ufshcd_disable_irq(hba);
7908         /* Put the host controller in low power mode if possible */
7909         ufshcd_hba_vreg_set_lpm(hba);
7910         goto out;
7911
7912 set_link_active:
7913         if (hba->clk_scaling.is_allowed)
7914                 ufshcd_resume_clkscaling(hba);
7915         ufshcd_vreg_set_hpm(hba);
7916         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7917                 ufshcd_set_link_active(hba);
7918         else if (ufshcd_is_link_off(hba))
7919                 ufshcd_host_reset_and_restore(hba);
7920 set_dev_active:
7921         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7922                 ufshcd_disable_auto_bkops(hba);
7923 enable_gating:
7924         if (hba->clk_scaling.is_allowed)
7925                 ufshcd_resume_clkscaling(hba);
7926         hba->clk_gating.is_suspended = false;
7927         ufshcd_release(hba);
7928 out:
7929         hba->pm_op_in_progress = 0;
7930         if (ret)
7931                 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
7932         return ret;
7933 }
7934
7935 /**
7936  * ufshcd_resume - helper function for resume operations
7937  * @hba: per adapter instance
7938  * @pm_op: runtime PM or system PM
7939  *
7940  * This function basically brings the UFS device, UniPro link and controller
7941  * to active state.
7942  *
7943  * Returns 0 for success and non-zero for failure
7944  */
7945 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7946 {
7947         int ret;
7948         enum uic_link_state old_link_state;
7949
7950         hba->pm_op_in_progress = 1;
7951         old_link_state = hba->uic_link_state;
7952
7953         ufshcd_hba_vreg_set_hpm(hba);
7954         /* Make sure clocks are enabled before accessing controller */
7955         ret = ufshcd_setup_clocks(hba, true);
7956         if (ret)
7957                 goto out;
7958
7959         /* enable the host irq as host controller would be active soon */
7960         ufshcd_enable_irq(hba);
7961
7962         ret = ufshcd_vreg_set_hpm(hba);
7963         if (ret)
7964                 goto disable_irq_and_vops_clks;
7965
7966         /*
7967          * Call vendor specific resume callback. As these callbacks may access
7968          * vendor specific host controller register space call them when the
7969          * host clocks are ON.
7970          */
7971         ret = ufshcd_vops_resume(hba, pm_op);
7972         if (ret)
7973                 goto disable_vreg;
7974
7975         if (ufshcd_is_link_hibern8(hba)) {
7976                 ret = ufshcd_uic_hibern8_exit(hba);
7977                 if (!ret)
7978                         ufshcd_set_link_active(hba);
7979                 else
7980                         goto vendor_suspend;
7981         } else if (ufshcd_is_link_off(hba)) {
7982                 ret = ufshcd_host_reset_and_restore(hba);
7983                 /*
7984                  * ufshcd_host_reset_and_restore() should have already
7985                  * set the link state as active
7986                  */
7987                 if (ret || !ufshcd_is_link_active(hba))
7988                         goto vendor_suspend;
7989         }
7990
7991         if (!ufshcd_is_ufs_dev_active(hba)) {
7992                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7993                 if (ret)
7994                         goto set_old_link_state;
7995         }
7996
7997         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7998                 ufshcd_enable_auto_bkops(hba);
7999         else
8000                 /*
8001                  * If BKOPs operations are urgently needed at this moment then
8002                  * keep auto-bkops enabled or else disable it.
8003                  */
8004                 ufshcd_urgent_bkops(hba);
8005
8006         hba->clk_gating.is_suspended = false;
8007
8008         if (hba->clk_scaling.is_allowed)
8009                 ufshcd_resume_clkscaling(hba);
8010
8011         /* Enable Auto-Hibernate if configured */
8012         ufshcd_auto_hibern8_enable(hba);
8013
8014         /* Schedule clock gating in case of no access to UFS device yet */
8015         ufshcd_release(hba);
8016
8017         goto out;
8018
8019 set_old_link_state:
8020         ufshcd_link_state_transition(hba, old_link_state, 0);
8021 vendor_suspend:
8022         ufshcd_vops_suspend(hba, pm_op);
8023 disable_vreg:
8024         ufshcd_vreg_set_lpm(hba);
8025 disable_irq_and_vops_clks:
8026         ufshcd_disable_irq(hba);
8027         if (hba->clk_scaling.is_allowed)
8028                 ufshcd_suspend_clkscaling(hba);
8029         ufshcd_setup_clocks(hba, false);
8030 out:
8031         hba->pm_op_in_progress = 0;
8032         if (ret)
8033                 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
8034         return ret;
8035 }
8036
8037 /**
8038  * ufshcd_system_suspend - system suspend routine
8039  * @hba: per adapter instance
8040  *
8041  * Check the description of ufshcd_suspend() function for more details.
8042  *
8043  * Returns 0 for success and non-zero for failure
8044  */
8045 int ufshcd_system_suspend(struct ufs_hba *hba)
8046 {
8047         int ret = 0;
8048         ktime_t start = ktime_get();
8049
8050         if (!hba || !hba->is_powered)
8051                 return 0;
8052
8053         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8054              hba->curr_dev_pwr_mode) &&
8055             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8056              hba->uic_link_state))
8057                 goto out;
8058
8059         if (pm_runtime_suspended(hba->dev)) {
8060                 /*
8061                  * UFS device and/or UFS link low power states during runtime
8062                  * suspend seems to be different than what is expected during
8063                  * system suspend. Hence runtime resume the devic & link and
8064                  * let the system suspend low power states to take effect.
8065                  * TODO: If resume takes longer time, we might have optimize
8066                  * it in future by not resuming everything if possible.
8067                  */
8068                 ret = ufshcd_runtime_resume(hba);
8069                 if (ret)
8070                         goto out;
8071         }
8072
8073         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8074 out:
8075         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8076                 ktime_to_us(ktime_sub(ktime_get(), start)),
8077                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8078         if (!ret)
8079                 hba->is_sys_suspended = true;
8080         return ret;
8081 }
8082 EXPORT_SYMBOL(ufshcd_system_suspend);
8083
8084 /**
8085  * ufshcd_system_resume - system resume routine
8086  * @hba: per adapter instance
8087  *
8088  * Returns 0 for success and non-zero for failure
8089  */
8090
8091 int ufshcd_system_resume(struct ufs_hba *hba)
8092 {
8093         int ret = 0;
8094         ktime_t start = ktime_get();
8095
8096         if (!hba)
8097                 return -EINVAL;
8098
8099         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8100                 /*
8101                  * Let the runtime resume take care of resuming
8102                  * if runtime suspended.
8103                  */
8104                 goto out;
8105         else
8106                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8107 out:
8108         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8109                 ktime_to_us(ktime_sub(ktime_get(), start)),
8110                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8111         if (!ret)
8112                 hba->is_sys_suspended = false;
8113         return ret;
8114 }
8115 EXPORT_SYMBOL(ufshcd_system_resume);
8116
8117 /**
8118  * ufshcd_runtime_suspend - runtime suspend routine
8119  * @hba: per adapter instance
8120  *
8121  * Check the description of ufshcd_suspend() function for more details.
8122  *
8123  * Returns 0 for success and non-zero for failure
8124  */
8125 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8126 {
8127         int ret = 0;
8128         ktime_t start = ktime_get();
8129
8130         if (!hba)
8131                 return -EINVAL;
8132
8133         if (!hba->is_powered)
8134                 goto out;
8135         else
8136                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8137 out:
8138         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8139                 ktime_to_us(ktime_sub(ktime_get(), start)),
8140                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8141         return ret;
8142 }
8143 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8144
8145 /**
8146  * ufshcd_runtime_resume - runtime resume routine
8147  * @hba: per adapter instance
8148  *
8149  * This function basically brings the UFS device, UniPro link and controller
8150  * to active state. Following operations are done in this function:
8151  *
8152  * 1. Turn on all the controller related clocks
8153  * 2. Bring the UniPro link out of Hibernate state
8154  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8155  *    to active state.
8156  * 4. If auto-bkops is enabled on the device, disable it.
8157  *
8158  * So following would be the possible power state after this function return
8159  * successfully:
8160  *      S1: UFS device in Active state with VCC rail ON
8161  *          UniPro link in Active state
8162  *          All the UFS/UniPro controller clocks are ON
8163  *
8164  * Returns 0 for success and non-zero for failure
8165  */
8166 int ufshcd_runtime_resume(struct ufs_hba *hba)
8167 {
8168         int ret = 0;
8169         ktime_t start = ktime_get();
8170
8171         if (!hba)
8172                 return -EINVAL;
8173
8174         if (!hba->is_powered)
8175                 goto out;
8176         else
8177                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8178 out:
8179         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8180                 ktime_to_us(ktime_sub(ktime_get(), start)),
8181                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8182         return ret;
8183 }
8184 EXPORT_SYMBOL(ufshcd_runtime_resume);
8185
8186 int ufshcd_runtime_idle(struct ufs_hba *hba)
8187 {
8188         return 0;
8189 }
8190 EXPORT_SYMBOL(ufshcd_runtime_idle);
8191
8192 /**
8193  * ufshcd_shutdown - shutdown routine
8194  * @hba: per adapter instance
8195  *
8196  * This function would power off both UFS device and UFS link.
8197  *
8198  * Returns 0 always to allow force shutdown even in case of errors.
8199  */
8200 int ufshcd_shutdown(struct ufs_hba *hba)
8201 {
8202         int ret = 0;
8203
8204         if (!hba->is_powered)
8205                 goto out;
8206
8207         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8208                 goto out;
8209
8210         if (pm_runtime_suspended(hba->dev)) {
8211                 ret = ufshcd_runtime_resume(hba);
8212                 if (ret)
8213                         goto out;
8214         }
8215
8216         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8217 out:
8218         if (ret)
8219                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8220         /* allow force shutdown even in case of errors */
8221         return 0;
8222 }
8223 EXPORT_SYMBOL(ufshcd_shutdown);
8224
8225 /**
8226  * ufshcd_remove - de-allocate SCSI host and host memory space
8227  *              data structure memory
8228  * @hba: per adapter instance
8229  */
8230 void ufshcd_remove(struct ufs_hba *hba)
8231 {
8232         ufs_bsg_remove(hba);
8233         ufs_sysfs_remove_nodes(hba->dev);
8234         blk_cleanup_queue(hba->tmf_queue);
8235         blk_mq_free_tag_set(&hba->tmf_tag_set);
8236         blk_cleanup_queue(hba->cmd_queue);
8237         scsi_remove_host(hba->host);
8238         /* disable interrupts */
8239         ufshcd_disable_intr(hba, hba->intr_mask);
8240         ufshcd_hba_stop(hba, true);
8241
8242         ufshcd_exit_clk_scaling(hba);
8243         ufshcd_exit_clk_gating(hba);
8244         if (ufshcd_is_clkscaling_supported(hba))
8245                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8246         ufshcd_hba_exit(hba);
8247 }
8248 EXPORT_SYMBOL_GPL(ufshcd_remove);
8249
8250 /**
8251  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8252  * @hba: pointer to Host Bus Adapter (HBA)
8253  */
8254 void ufshcd_dealloc_host(struct ufs_hba *hba)
8255 {
8256         scsi_host_put(hba->host);
8257 }
8258 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8259
8260 /**
8261  * ufshcd_set_dma_mask - Set dma mask based on the controller
8262  *                       addressing capability
8263  * @hba: per adapter instance
8264  *
8265  * Returns 0 for success, non-zero for failure
8266  */
8267 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8268 {
8269         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8270                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8271                         return 0;
8272         }
8273         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8274 }
8275
8276 /**
8277  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8278  * @dev: pointer to device handle
8279  * @hba_handle: driver private handle
8280  * Returns 0 on success, non-zero value on failure
8281  */
8282 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8283 {
8284         struct Scsi_Host *host;
8285         struct ufs_hba *hba;
8286         int err = 0;
8287
8288         if (!dev) {
8289                 dev_err(dev,
8290                 "Invalid memory reference for dev is NULL\n");
8291                 err = -ENODEV;
8292                 goto out_error;
8293         }
8294
8295         host = scsi_host_alloc(&ufshcd_driver_template,
8296                                 sizeof(struct ufs_hba));
8297         if (!host) {
8298                 dev_err(dev, "scsi_host_alloc failed\n");
8299                 err = -ENOMEM;
8300                 goto out_error;
8301         }
8302         hba = shost_priv(host);
8303         hba->host = host;
8304         hba->dev = dev;
8305         *hba_handle = hba;
8306         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
8307
8308         INIT_LIST_HEAD(&hba->clk_list_head);
8309
8310 out_error:
8311         return err;
8312 }
8313 EXPORT_SYMBOL(ufshcd_alloc_host);
8314
8315 /* This function exists because blk_mq_alloc_tag_set() requires this. */
8316 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8317                                      const struct blk_mq_queue_data *qd)
8318 {
8319         WARN_ON_ONCE(true);
8320         return BLK_STS_NOTSUPP;
8321 }
8322
8323 static const struct blk_mq_ops ufshcd_tmf_ops = {
8324         .queue_rq = ufshcd_queue_tmf,
8325 };
8326
8327 /**
8328  * ufshcd_init - Driver initialization routine
8329  * @hba: per-adapter instance
8330  * @mmio_base: base register address
8331  * @irq: Interrupt line of device
8332  * Returns 0 on success, non-zero value on failure
8333  */
8334 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8335 {
8336         int err;
8337         struct Scsi_Host *host = hba->host;
8338         struct device *dev = hba->dev;
8339
8340         if (!mmio_base) {
8341                 dev_err(hba->dev,
8342                 "Invalid memory reference for mmio_base is NULL\n");
8343                 err = -ENODEV;
8344                 goto out_error;
8345         }
8346
8347         hba->mmio_base = mmio_base;
8348         hba->irq = irq;
8349
8350         err = ufshcd_hba_init(hba);
8351         if (err)
8352                 goto out_error;
8353
8354         /* Read capabilities registers */
8355         ufshcd_hba_capabilities(hba);
8356
8357         /* Get UFS version supported by the controller */
8358         hba->ufs_version = ufshcd_get_ufs_version(hba);
8359
8360         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8361             (hba->ufs_version != UFSHCI_VERSION_11) &&
8362             (hba->ufs_version != UFSHCI_VERSION_20) &&
8363             (hba->ufs_version != UFSHCI_VERSION_21))
8364                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8365                         hba->ufs_version);
8366
8367         /* Get Interrupt bit mask per version */
8368         hba->intr_mask = ufshcd_get_intr_mask(hba);
8369
8370         err = ufshcd_set_dma_mask(hba);
8371         if (err) {
8372                 dev_err(hba->dev, "set dma mask failed\n");
8373                 goto out_disable;
8374         }
8375
8376         /* Allocate memory for host memory space */
8377         err = ufshcd_memory_alloc(hba);
8378         if (err) {
8379                 dev_err(hba->dev, "Memory allocation failed\n");
8380                 goto out_disable;
8381         }
8382
8383         /* Configure LRB */
8384         ufshcd_host_memory_configure(hba);
8385
8386         host->can_queue = hba->nutrs;
8387         host->cmd_per_lun = hba->nutrs;
8388         host->max_id = UFSHCD_MAX_ID;
8389         host->max_lun = UFS_MAX_LUNS;
8390         host->max_channel = UFSHCD_MAX_CHANNEL;
8391         host->unique_id = host->host_no;
8392         host->max_cmd_len = UFS_CDB_SIZE;
8393
8394         hba->max_pwr_info.is_valid = false;
8395
8396         /* Initialize work queues */
8397         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8398         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8399
8400         /* Initialize UIC command mutex */
8401         mutex_init(&hba->uic_cmd_mutex);
8402
8403         /* Initialize mutex for device management commands */
8404         mutex_init(&hba->dev_cmd.lock);
8405
8406         init_rwsem(&hba->clk_scaling_lock);
8407
8408         ufshcd_init_clk_gating(hba);
8409
8410         ufshcd_init_clk_scaling(hba);
8411
8412         /*
8413          * In order to avoid any spurious interrupt immediately after
8414          * registering UFS controller interrupt handler, clear any pending UFS
8415          * interrupt status and disable all the UFS interrupts.
8416          */
8417         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8418                       REG_INTERRUPT_STATUS);
8419         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8420         /*
8421          * Make sure that UFS interrupts are disabled and any pending interrupt
8422          * status is cleared before registering UFS interrupt handler.
8423          */
8424         mb();
8425
8426         /* IRQ registration */
8427         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8428         if (err) {
8429                 dev_err(hba->dev, "request irq failed\n");
8430                 goto exit_gating;
8431         } else {
8432                 hba->is_irq_enabled = true;
8433         }
8434
8435         err = scsi_add_host(host, hba->dev);
8436         if (err) {
8437                 dev_err(hba->dev, "scsi_add_host failed\n");
8438                 goto exit_gating;
8439         }
8440
8441         hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8442         if (IS_ERR(hba->cmd_queue)) {
8443                 err = PTR_ERR(hba->cmd_queue);
8444                 goto out_remove_scsi_host;
8445         }
8446
8447         hba->tmf_tag_set = (struct blk_mq_tag_set) {
8448                 .nr_hw_queues   = 1,
8449                 .queue_depth    = hba->nutmrs,
8450                 .ops            = &ufshcd_tmf_ops,
8451                 .flags          = BLK_MQ_F_NO_SCHED,
8452         };
8453         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8454         if (err < 0)
8455                 goto free_cmd_queue;
8456         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8457         if (IS_ERR(hba->tmf_queue)) {
8458                 err = PTR_ERR(hba->tmf_queue);
8459                 goto free_tmf_tag_set;
8460         }
8461
8462         /* Reset the attached device */
8463         ufshcd_vops_device_reset(hba);
8464
8465         /* Host controller enable */
8466         err = ufshcd_hba_enable(hba);
8467         if (err) {
8468                 dev_err(hba->dev, "Host controller enable failed\n");
8469                 ufshcd_print_host_regs(hba);
8470                 ufshcd_print_host_state(hba);
8471                 goto free_tmf_queue;
8472         }
8473
8474         /*
8475          * Set the default power management level for runtime and system PM.
8476          * Default power saving mode is to keep UFS link in Hibern8 state
8477          * and UFS device in sleep state.
8478          */
8479         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8480                                                 UFS_SLEEP_PWR_MODE,
8481                                                 UIC_LINK_HIBERN8_STATE);
8482         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8483                                                 UFS_SLEEP_PWR_MODE,
8484                                                 UIC_LINK_HIBERN8_STATE);
8485
8486         /* Set the default auto-hiberate idle timer value to 150 ms */
8487         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
8488                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8489                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8490         }
8491
8492         /* Hold auto suspend until async scan completes */
8493         pm_runtime_get_sync(dev);
8494         atomic_set(&hba->scsi_block_reqs_cnt, 0);
8495         /*
8496          * We are assuming that device wasn't put in sleep/power-down
8497          * state exclusively during the boot stage before kernel.
8498          * This assumption helps avoid doing link startup twice during
8499          * ufshcd_probe_hba().
8500          */
8501         ufshcd_set_ufs_dev_active(hba);
8502
8503         async_schedule(ufshcd_async_scan, hba);
8504         ufs_sysfs_add_nodes(hba->dev);
8505
8506         return 0;
8507
8508 free_tmf_queue:
8509         blk_cleanup_queue(hba->tmf_queue);
8510 free_tmf_tag_set:
8511         blk_mq_free_tag_set(&hba->tmf_tag_set);
8512 free_cmd_queue:
8513         blk_cleanup_queue(hba->cmd_queue);
8514 out_remove_scsi_host:
8515         scsi_remove_host(hba->host);
8516 exit_gating:
8517         ufshcd_exit_clk_scaling(hba);
8518         ufshcd_exit_clk_gating(hba);
8519 out_disable:
8520         hba->is_irq_enabled = false;
8521         ufshcd_hba_exit(hba);
8522 out_error:
8523         return err;
8524 }
8525 EXPORT_SYMBOL_GPL(ufshcd_init);
8526
8527 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8528 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8529 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8530 MODULE_LICENSE("GPL");
8531 MODULE_VERSION(UFSHCD_DRIVER_VERSION);