1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
6 #include <linux/acpi.h>
7 #include <linux/time.h>
9 #include <linux/platform_device.h>
10 #include <linux/phy/phy.h>
11 #include <linux/reset-controller.h>
14 #include "ufshcd-pltfrm.h"
18 #include "ufs_quirks.h"
19 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
20 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
38 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
40 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
41 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
42 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
45 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
47 return container_of(rcd, struct ufs_qcom_host, rcdev);
50 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
51 const char *prefix, void *priv)
53 ufshcd_dump_regs(hba, offset, len * 4, prefix);
56 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
60 err = ufshcd_dme_get(hba,
61 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
63 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
69 static int ufs_qcom_host_clk_get(struct device *dev,
70 const char *name, struct clk **clk_out, bool optional)
75 clk = devm_clk_get(dev, name);
83 if (optional && err == -ENOENT) {
88 if (err != -EPROBE_DEFER)
89 dev_err(dev, "failed to get %s err %d\n", name, err);
94 static int ufs_qcom_host_clk_enable(struct device *dev,
95 const char *name, struct clk *clk)
99 err = clk_prepare_enable(clk);
101 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
106 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
108 if (!host->is_lane_clks_enabled)
111 clk_disable_unprepare(host->tx_l1_sync_clk);
112 clk_disable_unprepare(host->tx_l0_sync_clk);
113 clk_disable_unprepare(host->rx_l1_sync_clk);
114 clk_disable_unprepare(host->rx_l0_sync_clk);
116 host->is_lane_clks_enabled = false;
119 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
122 struct device *dev = host->hba->dev;
124 if (host->is_lane_clks_enabled)
127 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
128 host->rx_l0_sync_clk);
132 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
133 host->tx_l0_sync_clk);
137 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
138 host->rx_l1_sync_clk);
142 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
143 host->tx_l1_sync_clk);
147 host->is_lane_clks_enabled = true;
151 clk_disable_unprepare(host->rx_l1_sync_clk);
153 clk_disable_unprepare(host->tx_l0_sync_clk);
155 clk_disable_unprepare(host->rx_l0_sync_clk);
160 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
163 struct device *dev = host->hba->dev;
165 if (has_acpi_companion(dev))
168 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
169 &host->rx_l0_sync_clk, false);
173 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
174 &host->tx_l0_sync_clk, false);
178 /* In case of single lane per direction, don't read lane1 clocks */
179 if (host->hba->lanes_per_direction > 1) {
180 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
181 &host->rx_l1_sync_clk, false);
185 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
186 &host->tx_l1_sync_clk, true);
192 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
196 return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
199 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
203 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
206 err = ufshcd_dme_get(hba,
207 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
208 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
210 if (err || tx_fsm_val == TX_FSM_HIBERN8)
213 /* sleep for max. 200us */
214 usleep_range(100, 200);
215 } while (time_before(jiffies, timeout));
218 * we might have scheduled out for long during polling so
219 * check the state again.
221 if (time_after(jiffies, timeout))
222 err = ufshcd_dme_get(hba,
223 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
224 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
228 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
230 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
232 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
239 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
241 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
242 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
244 /* make sure above configuration is applied before we return */
248 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
250 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
251 struct phy *phy = host->generic_phy;
253 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
257 phy_set_mode(phy, PHY_MODE_UFS_HS_B);
259 /* phy initialization - calibrate the phy */
262 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
267 /* power on phy - start serdes and phy's power and clocks */
268 ret = phy_power_on(phy);
270 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
272 goto out_disable_phy;
275 ufs_qcom_select_unipro_mode(host);
286 * The UTP controller has a number of internal clock gating cells (CGCs).
287 * Internal hardware sub-modules within the UTP controller control the CGCs.
288 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
289 * in a specific operation, UTP controller CGCs are by default disabled and
290 * this function enables them (after every UFS link startup) to save some power
293 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
296 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
299 /* Ensure that HW clock gating is enabled before next operations */
303 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
304 enum ufs_notify_change_status status)
306 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
311 ufs_qcom_power_up_sequence(hba);
313 * The PHY PLL output is the source of tx/rx lane symbol
314 * clocks, hence, enable the lane clocks only after PHY
317 err = ufs_qcom_enable_lane_clks(host);
320 /* check if UFS PHY moved from DISABLED to HIBERN8 */
321 err = ufs_qcom_check_hibern8(hba);
322 ufs_qcom_enable_hw_clk_gating(hba);
326 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
334 * Returns zero for success and non-zero in case of a failure
336 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
337 u32 hs, u32 rate, bool update_link_startup_timer)
340 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
341 struct ufs_clk_info *clki;
342 u32 core_clk_period_in_ns;
343 u32 tx_clk_cycles_per_us = 0;
344 unsigned long core_clk_rate = 0;
345 u32 core_clk_cycles_per_us = 0;
347 static u32 pwm_fr_table[][2] = {
354 static u32 hs_fr_table_rA[][2] = {
360 static u32 hs_fr_table_rB[][2] = {
367 * The Qunipro controller does not use following registers:
368 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
369 * UFS_REG_PA_LINK_STARTUP_TIMER
370 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
373 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
377 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
381 list_for_each_entry(clki, &hba->clk_list_head, list) {
382 if (!strcmp(clki->name, "core_clk"))
383 core_clk_rate = clk_get_rate(clki->clk);
386 /* If frequency is smaller than 1MHz, set to 1MHz */
387 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
388 core_clk_rate = DEFAULT_CLK_RATE_HZ;
390 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
391 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
392 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
394 * make sure above write gets applied before we return from
400 if (ufs_qcom_cap_qunipro(host))
403 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
404 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
405 core_clk_period_in_ns &= MASK_CLK_NS_REG;
410 if (rate == PA_HS_MODE_A) {
411 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
413 "%s: index %d exceeds table size %zu\n",
415 ARRAY_SIZE(hs_fr_table_rA));
418 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
419 } else if (rate == PA_HS_MODE_B) {
420 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
422 "%s: index %d exceeds table size %zu\n",
424 ARRAY_SIZE(hs_fr_table_rB));
427 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
429 dev_err(hba->dev, "%s: invalid rate = %d\n",
436 if (gear > ARRAY_SIZE(pwm_fr_table)) {
438 "%s: index %d exceeds table size %zu\n",
440 ARRAY_SIZE(pwm_fr_table));
443 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
447 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
451 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
452 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
453 /* this register 2 fields shall be written at once */
454 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
455 REG_UFS_TX_SYMBOL_CLK_NS_US);
457 * make sure above write gets applied before we return from
463 if (update_link_startup_timer) {
464 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
465 REG_UFS_PA_LINK_STARTUP_TIMER);
467 * make sure that this configuration is applied before
480 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
481 enum ufs_notify_change_status status)
484 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
488 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
490 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
496 if (ufs_qcom_cap_qunipro(host))
498 * set unipro core clock cycles to 150 & clear clock
501 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
505 * Some UFS devices (and may be host) have issues if LCC is
506 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
507 * before link startup which will make sure that both host
508 * and device TX LCC are disabled once link startup is
511 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
512 err = ufshcd_dme_set(hba,
513 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
518 ufs_qcom_link_startup_post_change(hba);
528 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
530 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
531 struct phy *phy = host->generic_phy;
534 if (ufs_qcom_is_link_off(hba)) {
536 * Disable the tx/rx lane symbol clocks before PHY is
537 * powered down as the PLL source should be disabled
538 * after downstream clocks are disabled.
540 ufs_qcom_disable_lane_clks(host);
543 } else if (!ufs_qcom_is_link_active(hba)) {
544 ufs_qcom_disable_lane_clks(host);
550 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
552 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
553 struct phy *phy = host->generic_phy;
556 if (ufs_qcom_is_link_off(hba)) {
557 err = phy_power_on(phy);
559 dev_err(hba->dev, "%s: failed PHY power on: %d\n",
564 err = ufs_qcom_enable_lane_clks(host);
568 } else if (!ufs_qcom_is_link_active(hba)) {
569 err = ufs_qcom_enable_lane_clks(host);
574 hba->is_sys_suspended = false;
578 #ifdef CONFIG_MSM_BUS_SCALING
579 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
580 const char *speed_mode)
582 struct device *dev = host->hba->dev;
583 struct device_node *np = dev->of_node;
585 const char *key = "qcom,bus-vector-names";
592 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
593 err = of_property_match_string(np, key, "MAX");
595 err = of_property_match_string(np, key, speed_mode);
599 dev_err(dev, "%s: Invalid %s mode %d\n",
600 __func__, speed_mode, err);
604 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
606 int gear = max_t(u32, p->gear_rx, p->gear_tx);
607 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
610 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
617 if (!p->pwr_rx && !p->pwr_tx) {
619 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
620 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
621 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
623 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
624 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
627 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
632 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
636 if (vote != host->bus_vote.curr_vote) {
637 err = msm_bus_scale_client_update_request(
638 host->bus_vote.client_handle, vote);
640 dev_err(host->hba->dev,
641 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
642 __func__, host->bus_vote.client_handle,
647 host->bus_vote.curr_vote = vote;
653 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
657 char mode[BUS_VECTOR_NAME_LEN];
659 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
661 vote = ufs_qcom_get_bus_vote(host, mode);
663 err = ufs_qcom_set_bus_vote(host, vote);
668 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
670 host->bus_vote.saved_vote = vote;
675 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
678 struct ufs_hba *hba = dev_get_drvdata(dev);
679 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
681 return snprintf(buf, PAGE_SIZE, "%u\n",
682 host->bus_vote.is_max_bw_needed);
686 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
687 const char *buf, size_t count)
689 struct ufs_hba *hba = dev_get_drvdata(dev);
690 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
693 if (!kstrtou32(buf, 0, &value)) {
694 host->bus_vote.is_max_bw_needed = !!value;
695 ufs_qcom_update_bus_bw_vote(host);
701 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
704 struct msm_bus_scale_pdata *bus_pdata;
705 struct device *dev = host->hba->dev;
706 struct platform_device *pdev = to_platform_device(dev);
707 struct device_node *np = dev->of_node;
709 bus_pdata = msm_bus_cl_get_pdata(pdev);
711 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
716 err = of_property_count_strings(np, "qcom,bus-vector-names");
717 if (err < 0 || err != bus_pdata->num_usecases) {
718 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
723 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
724 if (!host->bus_vote.client_handle) {
725 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
731 /* cache the vote index for minimum and maximum bandwidth */
732 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
733 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
735 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
736 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
737 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
738 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
739 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
740 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
744 #else /* CONFIG_MSM_BUS_SCALING */
745 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
750 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
755 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
759 #endif /* CONFIG_MSM_BUS_SCALING */
761 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
763 if (host->dev_ref_clk_ctrl_mmio &&
764 (enable ^ host->is_dev_ref_clk_enabled)) {
765 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
768 temp |= host->dev_ref_clk_en_mask;
770 temp &= ~host->dev_ref_clk_en_mask;
773 * If we are here to disable this clock it might be immediately
774 * after entering into hibern8 in which case we need to make
775 * sure that device ref_clk is active at least 1us after the
781 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
783 /* ensure that ref_clk is enabled/disabled before we return */
787 * If we call hibern8 exit after this, we need to make sure that
788 * device ref_clk is stable for at least 1us before the hibern8
794 host->is_dev_ref_clk_enabled = enable;
798 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
799 enum ufs_notify_change_status status,
800 struct ufs_pa_layer_attr *dev_max_params,
801 struct ufs_pa_layer_attr *dev_req_params)
804 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
805 struct ufs_dev_params ufs_qcom_cap;
808 if (!dev_req_params) {
809 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
816 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
817 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
818 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
819 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
820 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
821 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
822 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
823 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
824 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
825 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
826 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
827 ufs_qcom_cap.desired_working_mode =
828 UFS_QCOM_LIMIT_DESIRED_MODE;
830 if (host->hw_ver.major == 0x1) {
832 * HS-G3 operations may not reliably work on legacy QCOM
833 * UFS host controller hardware even though capability
834 * exchange during link startup phase may end up
835 * negotiating maximum supported gear as G3.
836 * Hence downgrade the maximum supported gear to HS-G2.
838 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
839 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
840 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
841 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
844 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
848 pr_err("%s: failed to determine capabilities\n",
853 /* enable the device ref clock before changing to HS mode */
854 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
855 ufshcd_is_hs_mode(dev_req_params))
856 ufs_qcom_dev_ref_clk_ctrl(host, true);
859 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
860 dev_req_params->pwr_rx,
861 dev_req_params->hs_rate, false)) {
862 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
865 * we return error code at the end of the routine,
866 * but continue to configure UFS_PHY_TX_LANE_ENABLE
867 * and bus voting as usual
872 val = ~(MAX_U32 << dev_req_params->lane_tx);
874 /* cache the power mode parameters to use internally */
875 memcpy(&host->dev_req_params,
876 dev_req_params, sizeof(*dev_req_params));
877 ufs_qcom_update_bus_bw_vote(host);
879 /* disable the device ref clock if entered PWM mode */
880 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
881 !ufshcd_is_hs_mode(dev_req_params))
882 ufs_qcom_dev_ref_clk_ctrl(host, false);
892 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
895 u32 pa_vs_config_reg1;
897 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
902 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
903 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
904 (pa_vs_config_reg1 | (1 << 12)));
910 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
914 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
915 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
920 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
922 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
924 if (host->hw_ver.major == 0x1)
925 return UFSHCI_VERSION_11;
927 return UFSHCI_VERSION_20;
931 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
932 * @hba: host controller instance
934 * QCOM UFS host controller might have some non standard behaviours (quirks)
935 * than what is specified by UFSHCI specification. Advertise all such
936 * quirks to standard UFS host controller driver so standard takes them into
939 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
941 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
943 if (host->hw_ver.major == 0x01) {
944 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
945 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
946 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
948 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
949 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
951 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
954 if (host->hw_ver.major == 0x2) {
955 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
957 if (!ufs_qcom_cap_qunipro(host))
958 /* Legacy UniPro mode still need following quirks */
959 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
960 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
961 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
965 static void ufs_qcom_set_caps(struct ufs_hba *hba)
967 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
969 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
970 hba->caps |= UFSHCD_CAP_CLK_SCALING;
971 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
973 if (host->hw_ver.major >= 0x2) {
974 host->caps = UFS_QCOM_CAP_QUNIPRO |
975 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
980 * ufs_qcom_setup_clocks - enables/disable clocks
981 * @hba: host controller instance
982 * @on: If true, enable clocks else disable them.
983 * @status: PRE_CHANGE or POST_CHANGE notify
985 * Returns 0 on success, non-zero on failure.
987 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
988 enum ufs_notify_change_status status)
990 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
995 * In case ufs_qcom_init() is not yet done, simply ignore.
996 * This ufs_qcom_setup_clocks() shall be called from
997 * ufs_qcom_init() after init is done.
1002 if (on && (status == POST_CHANGE)) {
1003 /* enable the device ref clock for HS mode*/
1004 if (ufshcd_is_hs_mode(&hba->pwr_info))
1005 ufs_qcom_dev_ref_clk_ctrl(host, true);
1006 vote = host->bus_vote.saved_vote;
1007 if (vote == host->bus_vote.min_bw_vote)
1008 ufs_qcom_update_bus_bw_vote(host);
1010 } else if (!on && (status == PRE_CHANGE)) {
1011 if (!ufs_qcom_is_link_active(hba)) {
1012 /* disable device ref_clk */
1013 ufs_qcom_dev_ref_clk_ctrl(host, false);
1016 vote = host->bus_vote.min_bw_vote;
1019 err = ufs_qcom_set_bus_vote(host, vote);
1021 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1028 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
1030 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1032 /* Currently this code only knows about a single reset. */
1034 ufs_qcom_assert_reset(host->hba);
1035 /* provide 1ms delay to let the reset pulse propagate. */
1036 usleep_range(1000, 1100);
1041 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
1043 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1045 /* Currently this code only knows about a single reset. */
1047 ufs_qcom_deassert_reset(host->hba);
1050 * after reset deassertion, phy will need all ref clocks,
1051 * voltage, current to settle down before starting serdes.
1053 usleep_range(1000, 1100);
1057 static const struct reset_control_ops ufs_qcom_reset_ops = {
1058 .assert = ufs_qcom_reset_assert,
1059 .deassert = ufs_qcom_reset_deassert,
1062 #define ANDROID_BOOT_DEV_MAX 30
1063 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1066 static int __init get_android_boot_dev(char *str)
1068 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1071 __setup("androidboot.bootdevice=", get_android_boot_dev);
1075 * ufs_qcom_init - bind phy with controller
1076 * @hba: host controller instance
1078 * Binds PHY with controller and powers up PHY enabling clocks
1081 * Returns -EPROBE_DEFER if binding fails, returns negative error
1082 * on phy power up failure and returns zero on success.
1084 static int ufs_qcom_init(struct ufs_hba *hba)
1087 struct device *dev = hba->dev;
1088 struct platform_device *pdev = to_platform_device(dev);
1089 struct ufs_qcom_host *host;
1090 struct resource *res;
1092 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1095 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1098 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1102 /* Make a two way bind between the qcom host and the hba */
1104 ufshcd_set_variant(hba, host);
1106 /* Fire up the reset controller. Failure here is non-fatal. */
1107 host->rcdev.of_node = dev->of_node;
1108 host->rcdev.ops = &ufs_qcom_reset_ops;
1109 host->rcdev.owner = dev->driver->owner;
1110 host->rcdev.nr_resets = 1;
1111 err = devm_reset_controller_register(dev, &host->rcdev);
1113 dev_warn(dev, "Failed to register reset controller\n");
1118 * voting/devoting device ref_clk source is time consuming hence
1119 * skip devoting it during aggressive clock gating. This clock
1120 * will still be gated off during runtime suspend.
1122 host->generic_phy = devm_phy_get(dev, "ufsphy");
1124 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1126 * UFS driver might be probed before the phy driver does.
1127 * In that case we would like to return EPROBE_DEFER code.
1129 err = -EPROBE_DEFER;
1130 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1132 goto out_variant_clear;
1133 } else if (IS_ERR(host->generic_phy)) {
1134 if (has_acpi_companion(dev)) {
1135 host->generic_phy = NULL;
1137 err = PTR_ERR(host->generic_phy);
1138 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1139 goto out_variant_clear;
1143 err = ufs_qcom_bus_register(host);
1145 goto out_variant_clear;
1147 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1148 &host->hw_ver.minor, &host->hw_ver.step);
1151 * for newer controllers, device reference clock control bit has
1152 * moved inside UFS controller register address space itself.
1154 if (host->hw_ver.major >= 0x02) {
1155 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1156 host->dev_ref_clk_en_mask = BIT(26);
1158 /* "dev_ref_clk_ctrl_mem" is optional resource */
1159 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1161 host->dev_ref_clk_ctrl_mmio =
1162 devm_ioremap_resource(dev, res);
1163 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1165 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1167 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1168 host->dev_ref_clk_ctrl_mmio = NULL;
1170 host->dev_ref_clk_en_mask = BIT(5);
1174 err = ufs_qcom_init_lane_clks(host);
1176 goto out_variant_clear;
1178 ufs_qcom_set_caps(hba);
1179 ufs_qcom_advertise_quirks(hba);
1181 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1183 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1184 ufs_qcom_hosts[hba->dev->id] = host;
1186 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1187 ufs_qcom_get_default_testbus_cfg(host);
1188 err = ufs_qcom_testbus_config(host);
1190 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1198 ufshcd_set_variant(hba, NULL);
1203 static void ufs_qcom_exit(struct ufs_hba *hba)
1205 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1207 ufs_qcom_disable_lane_clks(host);
1208 phy_power_off(host->generic_phy);
1209 phy_exit(host->generic_phy);
1212 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1216 u32 core_clk_ctrl_reg;
1218 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1221 err = ufshcd_dme_get(hba,
1222 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1223 &core_clk_ctrl_reg);
1227 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1228 core_clk_ctrl_reg |= clk_cycles;
1230 /* Clear CORE_CLK_DIV_EN */
1231 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1233 err = ufshcd_dme_set(hba,
1234 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1240 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1242 /* nothing to do as of now */
1246 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1248 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1250 if (!ufs_qcom_cap_qunipro(host))
1253 /* set unipro core clock cycles to 150 and clear clock divider */
1254 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1257 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1259 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1261 u32 core_clk_ctrl_reg;
1263 if (!ufs_qcom_cap_qunipro(host))
1266 err = ufshcd_dme_get(hba,
1267 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1268 &core_clk_ctrl_reg);
1270 /* make sure CORE_CLK_DIV_EN is cleared */
1272 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1273 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1274 err = ufshcd_dme_set(hba,
1275 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1282 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1284 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1286 if (!ufs_qcom_cap_qunipro(host))
1289 /* set unipro core clock cycles to 75 and clear clock divider */
1290 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1293 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1294 bool scale_up, enum ufs_notify_change_status status)
1296 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1297 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1300 if (status == PRE_CHANGE) {
1302 err = ufs_qcom_clk_scale_up_pre_change(hba);
1304 err = ufs_qcom_clk_scale_down_pre_change(hba);
1307 err = ufs_qcom_clk_scale_up_post_change(hba);
1309 err = ufs_qcom_clk_scale_down_post_change(hba);
1311 if (err || !dev_req_params)
1314 ufs_qcom_cfg_timers(hba,
1315 dev_req_params->gear_rx,
1316 dev_req_params->pwr_rx,
1317 dev_req_params->hs_rate,
1319 ufs_qcom_update_bus_bw_vote(host);
1326 static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1327 void *priv, void (*print_fn)(struct ufs_hba *hba,
1328 int offset, int num_regs, const char *str, void *priv))
1331 struct ufs_qcom_host *host;
1333 if (unlikely(!hba)) {
1334 pr_err("%s: hba is NULL\n", __func__);
1337 if (unlikely(!print_fn)) {
1338 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1342 host = ufshcd_get_variant(hba);
1343 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1346 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1347 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1349 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1350 reg |= UTP_DBG_RAMS_EN;
1351 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1353 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1354 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1356 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1357 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1359 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1360 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1362 /* clear bit 17 - UTP_DBG_RAMS_EN */
1363 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1365 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1366 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1368 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1369 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1371 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1372 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1374 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1375 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1377 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1378 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1380 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1381 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1383 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1384 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1387 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1389 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1390 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1391 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1392 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1394 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1395 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1399 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1401 /* provide a legal default configuration */
1402 host->testbus.select_major = TSTBUS_UNIPRO;
1403 host->testbus.select_minor = 37;
1406 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1408 if (host->testbus.select_major >= TSTBUS_MAX) {
1409 dev_err(host->hba->dev,
1410 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1411 __func__, host->testbus.select_major);
1418 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1422 u32 mask = TEST_BUS_SUB_SEL_MASK;
1427 if (!ufs_qcom_testbus_cfg_is_ok(host))
1430 switch (host->testbus.select_major) {
1432 reg = UFS_TEST_BUS_CTRL_0;
1436 reg = UFS_TEST_BUS_CTRL_0;
1440 reg = UFS_TEST_BUS_CTRL_0;
1444 reg = UFS_TEST_BUS_CTRL_0;
1448 reg = UFS_TEST_BUS_CTRL_1;
1452 reg = UFS_TEST_BUS_CTRL_1;
1456 reg = UFS_TEST_BUS_CTRL_1;
1460 reg = UFS_TEST_BUS_CTRL_1;
1463 case TSTBUS_WRAPPER:
1464 reg = UFS_TEST_BUS_CTRL_2;
1467 case TSTBUS_COMBINED:
1468 reg = UFS_TEST_BUS_CTRL_2;
1471 case TSTBUS_UTP_HCI:
1472 reg = UFS_TEST_BUS_CTRL_2;
1476 reg = UFS_UNIPRO_CFG;
1481 * No need for a default case, since
1482 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1488 pm_runtime_get_sync(host->hba->dev);
1489 ufshcd_hold(host->hba, false);
1490 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1491 (u32)host->testbus.select_major << 19,
1493 ufshcd_rmwl(host->hba, mask,
1494 (u32)host->testbus.select_minor << offset,
1496 ufs_qcom_enable_test_bus(host);
1498 * Make sure the test bus configuration is
1499 * committed before returning.
1502 ufshcd_release(host->hba);
1503 pm_runtime_put_sync(host->hba->dev);
1508 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1510 ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
1513 static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1515 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1516 u32 *testbus = NULL;
1517 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1519 testbus = kmalloc(testbus_len, GFP_KERNEL);
1523 host->testbus.select_major = TSTBUS_UNIPRO;
1524 for (i = 0; i < nminor; i++) {
1525 host->testbus.select_minor = i;
1526 ufs_qcom_testbus_config(host);
1527 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1529 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1530 16, 4, testbus, testbus_len, false);
1534 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1536 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1537 "HCI Vendor Specific Registers ");
1539 /* sleep a bit intermittently as we are dumping too much data */
1540 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1541 usleep_range(1000, 1100);
1542 ufs_qcom_testbus_read(hba);
1543 usleep_range(1000, 1100);
1544 ufs_qcom_print_unipro_testbus(hba);
1545 usleep_range(1000, 1100);
1549 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1551 * The variant operations configure the necessary controller and PHY
1552 * handshake during initialization.
1554 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1556 .init = ufs_qcom_init,
1557 .exit = ufs_qcom_exit,
1558 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1559 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1560 .setup_clocks = ufs_qcom_setup_clocks,
1561 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1562 .link_startup_notify = ufs_qcom_link_startup_notify,
1563 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1564 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1565 .suspend = ufs_qcom_suspend,
1566 .resume = ufs_qcom_resume,
1567 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1571 * ufs_qcom_probe - probe routine of the driver
1572 * @pdev: pointer to Platform device handle
1574 * Return zero for success and non-zero for failure
1576 static int ufs_qcom_probe(struct platform_device *pdev)
1579 struct device *dev = &pdev->dev;
1581 /* Perform generic probe */
1582 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1584 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1590 * ufs_qcom_remove - set driver_data of the device to NULL
1591 * @pdev: pointer to platform device handle
1595 static int ufs_qcom_remove(struct platform_device *pdev)
1597 struct ufs_hba *hba = platform_get_drvdata(pdev);
1599 pm_runtime_get_sync(&(pdev)->dev);
1604 static const struct of_device_id ufs_qcom_of_match[] = {
1605 { .compatible = "qcom,ufshc"},
1608 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1611 static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1615 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1618 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1619 .suspend = ufshcd_pltfrm_suspend,
1620 .resume = ufshcd_pltfrm_resume,
1621 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1622 .runtime_resume = ufshcd_pltfrm_runtime_resume,
1623 .runtime_idle = ufshcd_pltfrm_runtime_idle,
1626 static struct platform_driver ufs_qcom_pltform = {
1627 .probe = ufs_qcom_probe,
1628 .remove = ufs_qcom_remove,
1629 .shutdown = ufshcd_pltfrm_shutdown,
1631 .name = "ufshcd-qcom",
1632 .pm = &ufs_qcom_pm_ops,
1633 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1634 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1637 module_platform_driver(ufs_qcom_pltform);
1639 MODULE_LICENSE("GPL v2");