1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * Copyright (C) 2010 ST-Ericsson SA
8 #include <linux/module.h>
9 #include <linux/moduleparam.h>
10 #include <linux/init.h>
11 #include <linux/ioport.h>
12 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/mmc.h>
22 #include <linux/mmc/pm.h>
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/slot-gpio.h>
26 #include <linux/amba/bus.h>
27 #include <linux/clk.h>
28 #include <linux/scatterlist.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/dmaengine.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/amba/mmci.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/types.h>
36 #include <linux/pinctrl/consumer.h>
37 #include <linux/reset.h>
39 #include <asm/div64.h>
44 #define DRIVER_NAME "mmci-pl18x"
46 static void mmci_variant_init(struct mmci_host *host);
47 static void ux500v2_variant_init(struct mmci_host *host);
49 static unsigned int fmax = 515633;
51 static struct variant_data variant_arm = {
53 .fifohalfsize = 8 * 4,
54 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
55 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
56 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
57 .cmdreg_srsp = MCI_CPSM_RESPONSE,
58 .datalength_bits = 16,
59 .datactrl_blocksz = 11,
60 .pwrreg_powerup = MCI_PWR_UP,
62 .reversed_irq_handling = true,
64 .irq_pio_mask = MCI_IRQ_PIO_MASK,
65 .start_err = MCI_STARTBITERR,
67 .init = mmci_variant_init,
70 static struct variant_data variant_arm_extended_fifo = {
72 .fifohalfsize = 64 * 4,
73 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
74 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
75 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
76 .cmdreg_srsp = MCI_CPSM_RESPONSE,
77 .datalength_bits = 16,
78 .datactrl_blocksz = 11,
79 .pwrreg_powerup = MCI_PWR_UP,
82 .irq_pio_mask = MCI_IRQ_PIO_MASK,
83 .start_err = MCI_STARTBITERR,
85 .init = mmci_variant_init,
88 static struct variant_data variant_arm_extended_fifo_hwfc = {
90 .fifohalfsize = 64 * 4,
91 .clkreg_enable = MCI_ARM_HWFCEN,
92 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
93 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
94 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
95 .cmdreg_srsp = MCI_CPSM_RESPONSE,
96 .datalength_bits = 16,
97 .datactrl_blocksz = 11,
98 .pwrreg_powerup = MCI_PWR_UP,
101 .irq_pio_mask = MCI_IRQ_PIO_MASK,
102 .start_err = MCI_STARTBITERR,
103 .opendrain = MCI_ROD,
104 .init = mmci_variant_init,
107 static struct variant_data variant_u300 = {
109 .fifohalfsize = 8 * 4,
110 .clkreg_enable = MCI_ST_U300_HWFCEN,
111 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
112 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
113 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
114 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
115 .cmdreg_srsp = MCI_CPSM_RESPONSE,
116 .datalength_bits = 16,
117 .datactrl_blocksz = 11,
118 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
120 .pwrreg_powerup = MCI_PWR_ON,
122 .signal_direction = true,
123 .pwrreg_clkgate = true,
124 .pwrreg_nopower = true,
126 .irq_pio_mask = MCI_IRQ_PIO_MASK,
127 .start_err = MCI_STARTBITERR,
129 .init = mmci_variant_init,
132 static struct variant_data variant_nomadik = {
134 .fifohalfsize = 8 * 4,
135 .clkreg = MCI_CLK_ENABLE,
136 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
137 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
138 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
139 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
140 .cmdreg_srsp = MCI_CPSM_RESPONSE,
141 .datalength_bits = 24,
142 .datactrl_blocksz = 11,
143 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
146 .pwrreg_powerup = MCI_PWR_ON,
148 .signal_direction = true,
149 .pwrreg_clkgate = true,
150 .pwrreg_nopower = true,
152 .irq_pio_mask = MCI_IRQ_PIO_MASK,
153 .start_err = MCI_STARTBITERR,
155 .init = mmci_variant_init,
158 static struct variant_data variant_ux500 = {
160 .fifohalfsize = 8 * 4,
161 .clkreg = MCI_CLK_ENABLE,
162 .clkreg_enable = MCI_ST_UX500_HWFCEN,
163 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
164 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
165 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
166 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
167 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
168 .cmdreg_srsp = MCI_CPSM_RESPONSE,
169 .datalength_bits = 24,
170 .datactrl_blocksz = 11,
171 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
174 .pwrreg_powerup = MCI_PWR_ON,
176 .signal_direction = true,
177 .pwrreg_clkgate = true,
179 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
180 .busy_detect_flag = MCI_ST_CARDBUSY,
181 .busy_detect_mask = MCI_ST_BUSYENDMASK,
182 .pwrreg_nopower = true,
184 .irq_pio_mask = MCI_IRQ_PIO_MASK,
185 .start_err = MCI_STARTBITERR,
187 .init = mmci_variant_init,
190 static struct variant_data variant_ux500v2 = {
192 .fifohalfsize = 8 * 4,
193 .clkreg = MCI_CLK_ENABLE,
194 .clkreg_enable = MCI_ST_UX500_HWFCEN,
195 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
196 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
197 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
198 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
199 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
200 .cmdreg_srsp = MCI_CPSM_RESPONSE,
201 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
202 .datalength_bits = 24,
203 .datactrl_blocksz = 11,
204 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
207 .pwrreg_powerup = MCI_PWR_ON,
209 .signal_direction = true,
210 .pwrreg_clkgate = true,
212 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
213 .busy_detect_flag = MCI_ST_CARDBUSY,
214 .busy_detect_mask = MCI_ST_BUSYENDMASK,
215 .pwrreg_nopower = true,
217 .irq_pio_mask = MCI_IRQ_PIO_MASK,
218 .start_err = MCI_STARTBITERR,
220 .init = ux500v2_variant_init,
223 static struct variant_data variant_stm32 = {
225 .fifohalfsize = 8 * 4,
226 .clkreg = MCI_CLK_ENABLE,
227 .clkreg_enable = MCI_ST_UX500_HWFCEN,
228 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
229 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
230 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
231 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
232 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
233 .cmdreg_srsp = MCI_CPSM_RESPONSE,
234 .irq_pio_mask = MCI_IRQ_PIO_MASK,
235 .datalength_bits = 24,
236 .datactrl_blocksz = 11,
237 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
240 .pwrreg_powerup = MCI_PWR_ON,
242 .pwrreg_clkgate = true,
243 .pwrreg_nopower = true,
244 .init = mmci_variant_init,
247 static struct variant_data variant_stm32_sdmmc = {
249 .fifohalfsize = 8 * 4,
251 .stm32_clkdiv = true,
252 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
253 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
254 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
255 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
256 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
257 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
258 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
259 .datactrl_first = true,
260 .datacnt_useless = true,
261 .datalength_bits = 25,
262 .datactrl_blocksz = 14,
263 .stm32_idmabsize_mask = GENMASK(12, 5),
264 .init = sdmmc_variant_init,
267 static struct variant_data variant_qcom = {
269 .fifohalfsize = 8 * 4,
270 .clkreg = MCI_CLK_ENABLE,
271 .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
272 MCI_QCOM_CLK_SELECT_IN_FBCLK,
273 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
274 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
275 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
276 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
277 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
278 .cmdreg_srsp = MCI_CPSM_RESPONSE,
279 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
280 .datalength_bits = 24,
281 .datactrl_blocksz = 11,
282 .pwrreg_powerup = MCI_PWR_UP,
284 .explicit_mclk_control = true,
288 .irq_pio_mask = MCI_IRQ_PIO_MASK,
289 .start_err = MCI_STARTBITERR,
290 .opendrain = MCI_ROD,
291 .init = qcom_variant_init,
294 /* Busy detection for the ST Micro variant */
295 static int mmci_card_busy(struct mmc_host *mmc)
297 struct mmci_host *host = mmc_priv(mmc);
301 spin_lock_irqsave(&host->lock, flags);
302 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
304 spin_unlock_irqrestore(&host->lock, flags);
309 static void mmci_reg_delay(struct mmci_host *host)
312 * According to the spec, at least three feedback clock cycles
313 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
314 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
315 * Worst delay time during card init is at 100 kHz => 30 us.
316 * Worst delay time when up and running is at 25 MHz => 120 ns.
318 if (host->cclk < 25000000)
325 * This must be called with host->lock held
327 void mmci_write_clkreg(struct mmci_host *host, u32 clk)
329 if (host->clk_reg != clk) {
331 writel(clk, host->base + MMCICLOCK);
336 * This must be called with host->lock held
338 void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
340 if (host->pwr_reg != pwr) {
342 writel(pwr, host->base + MMCIPOWER);
347 * This must be called with host->lock held
349 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
351 /* Keep busy mode in DPSM if enabled */
352 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
354 if (host->datactrl_reg != datactrl) {
355 host->datactrl_reg = datactrl;
356 writel(datactrl, host->base + MMCIDATACTRL);
361 * This must be called with host->lock held
363 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
365 struct variant_data *variant = host->variant;
366 u32 clk = variant->clkreg;
368 /* Make sure cclk reflects the current calculated clock */
372 if (variant->explicit_mclk_control) {
373 host->cclk = host->mclk;
374 } else if (desired >= host->mclk) {
375 clk = MCI_CLK_BYPASS;
376 if (variant->st_clkdiv)
377 clk |= MCI_ST_UX500_NEG_EDGE;
378 host->cclk = host->mclk;
379 } else if (variant->st_clkdiv) {
381 * DB8500 TRM says f = mclk / (clkdiv + 2)
382 * => clkdiv = (mclk / f) - 2
383 * Round the divider up so we don't exceed the max
386 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
389 host->cclk = host->mclk / (clk + 2);
392 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
393 * => clkdiv = mclk / (2 * f) - 1
395 clk = host->mclk / (2 * desired) - 1;
398 host->cclk = host->mclk / (2 * (clk + 1));
401 clk |= variant->clkreg_enable;
402 clk |= MCI_CLK_ENABLE;
403 /* This hasn't proven to be worthwhile */
404 /* clk |= MCI_CLK_PWRSAVE; */
407 /* Set actual clock for debug */
408 host->mmc->actual_clock = host->cclk;
410 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
412 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
413 clk |= variant->clkreg_8bit_bus_enable;
415 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
416 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
417 clk |= variant->clkreg_neg_edge_enable;
419 mmci_write_clkreg(host, clk);
422 void mmci_dma_release(struct mmci_host *host)
424 if (host->ops && host->ops->dma_release)
425 host->ops->dma_release(host);
427 host->use_dma = false;
430 void mmci_dma_setup(struct mmci_host *host)
432 if (!host->ops || !host->ops->dma_setup)
435 if (host->ops->dma_setup(host))
438 /* initialize pre request cookie */
439 host->next_cookie = 1;
441 host->use_dma = true;
445 * Validate mmc prerequisites
447 static int mmci_validate_data(struct mmci_host *host,
448 struct mmc_data *data)
453 if (!is_power_of_2(data->blksz)) {
454 dev_err(mmc_dev(host->mmc),
455 "unsupported block size (%d bytes)\n", data->blksz);
459 if (host->ops && host->ops->validate_data)
460 return host->ops->validate_data(host, data);
465 int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
469 if (!host->ops || !host->ops->prep_data)
472 err = host->ops->prep_data(host, data, next);
475 data->host_cookie = ++host->next_cookie < 0 ?
476 1 : host->next_cookie;
481 void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
484 if (host->ops && host->ops->unprep_data)
485 host->ops->unprep_data(host, data, err);
487 data->host_cookie = 0;
490 void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
492 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
494 if (host->ops && host->ops->get_next_data)
495 host->ops->get_next_data(host, data);
498 int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
500 struct mmc_data *data = host->data;
506 ret = mmci_prep_data(host, data, false);
510 if (!host->ops || !host->ops->dma_start)
513 /* Okay, go for it. */
514 dev_vdbg(mmc_dev(host->mmc),
515 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
516 data->sg_len, data->blksz, data->blocks, data->flags);
518 host->ops->dma_start(host, &datactrl);
520 /* Trigger the DMA transfer */
521 mmci_write_datactrlreg(host, datactrl);
524 * Let the MMCI say when the data is ended and it's time
525 * to fire next DMA request. When that happens, MMCI will
526 * call mmci_data_end()
528 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
529 host->base + MMCIMASK0);
533 void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
538 if (host->ops && host->ops->dma_finalize)
539 host->ops->dma_finalize(host, data);
542 void mmci_dma_error(struct mmci_host *host)
547 if (host->ops && host->ops->dma_error)
548 host->ops->dma_error(host);
552 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
554 writel(0, host->base + MMCICOMMAND);
561 mmc_request_done(host->mmc, mrq);
564 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
566 void __iomem *base = host->base;
567 struct variant_data *variant = host->variant;
569 if (host->singleirq) {
570 unsigned int mask0 = readl(base + MMCIMASK0);
572 mask0 &= ~variant->irq_pio_mask;
575 writel(mask0, base + MMCIMASK0);
578 if (variant->mmcimask1)
579 writel(mask, base + MMCIMASK1);
581 host->mask1_reg = mask;
584 static void mmci_stop_data(struct mmci_host *host)
586 mmci_write_datactrlreg(host, 0);
587 mmci_set_mask1(host, 0);
591 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
593 unsigned int flags = SG_MITER_ATOMIC;
595 if (data->flags & MMC_DATA_READ)
596 flags |= SG_MITER_TO_SG;
598 flags |= SG_MITER_FROM_SG;
600 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
603 static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
605 return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
608 static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
610 return MCI_DPSM_ENABLE | (host->data->blksz << 16);
614 * All the DMA operation mode stuff goes inside this ifdef.
615 * This assumes that you have a generic DMA device interface,
616 * no custom DMA interfaces are supported.
618 #ifdef CONFIG_DMA_ENGINE
619 struct mmci_dmae_next {
620 struct dma_async_tx_descriptor *desc;
621 struct dma_chan *chan;
624 struct mmci_dmae_priv {
625 struct dma_chan *cur;
626 struct dma_chan *rx_channel;
627 struct dma_chan *tx_channel;
628 struct dma_async_tx_descriptor *desc_current;
629 struct mmci_dmae_next next_data;
632 int mmci_dmae_setup(struct mmci_host *host)
634 const char *rxname, *txname;
635 struct mmci_dmae_priv *dmae;
637 dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
641 host->dma_priv = dmae;
643 dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
645 dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
649 * If only an RX channel is specified, the driver will
650 * attempt to use it bidirectionally, however if it is
651 * is specified but cannot be located, DMA will be disabled.
653 if (dmae->rx_channel && !dmae->tx_channel)
654 dmae->tx_channel = dmae->rx_channel;
656 if (dmae->rx_channel)
657 rxname = dma_chan_name(dmae->rx_channel);
661 if (dmae->tx_channel)
662 txname = dma_chan_name(dmae->tx_channel);
666 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
670 * Limit the maximum segment size in any SG entry according to
671 * the parameters of the DMA engine device.
673 if (dmae->tx_channel) {
674 struct device *dev = dmae->tx_channel->device->dev;
675 unsigned int max_seg_size = dma_get_max_seg_size(dev);
677 if (max_seg_size < host->mmc->max_seg_size)
678 host->mmc->max_seg_size = max_seg_size;
680 if (dmae->rx_channel) {
681 struct device *dev = dmae->rx_channel->device->dev;
682 unsigned int max_seg_size = dma_get_max_seg_size(dev);
684 if (max_seg_size < host->mmc->max_seg_size)
685 host->mmc->max_seg_size = max_seg_size;
688 if (!dmae->tx_channel || !dmae->rx_channel) {
689 mmci_dmae_release(host);
697 * This is used in or so inline it
698 * so it can be discarded.
700 void mmci_dmae_release(struct mmci_host *host)
702 struct mmci_dmae_priv *dmae = host->dma_priv;
704 if (dmae->rx_channel)
705 dma_release_channel(dmae->rx_channel);
706 if (dmae->tx_channel)
707 dma_release_channel(dmae->tx_channel);
708 dmae->rx_channel = dmae->tx_channel = NULL;
711 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
713 struct mmci_dmae_priv *dmae = host->dma_priv;
714 struct dma_chan *chan;
716 if (data->flags & MMC_DATA_READ)
717 chan = dmae->rx_channel;
719 chan = dmae->tx_channel;
721 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
722 mmc_get_dma_dir(data));
725 void mmci_dmae_error(struct mmci_host *host)
727 struct mmci_dmae_priv *dmae = host->dma_priv;
729 if (!dma_inprogress(host))
732 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
733 dmaengine_terminate_all(dmae->cur);
734 host->dma_in_progress = false;
736 dmae->desc_current = NULL;
737 host->data->host_cookie = 0;
739 mmci_dma_unmap(host, host->data);
742 void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
744 struct mmci_dmae_priv *dmae = host->dma_priv;
748 if (!dma_inprogress(host))
751 /* Wait up to 1ms for the DMA to complete */
753 status = readl(host->base + MMCISTATUS);
754 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
760 * Check to see whether we still have some data left in the FIFO -
761 * this catches DMA controllers which are unable to monitor the
762 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
763 * contiguous buffers. On TX, we'll get a FIFO underrun error.
765 if (status & MCI_RXDATAAVLBLMASK) {
766 mmci_dma_error(host);
769 } else if (!data->host_cookie) {
770 mmci_dma_unmap(host, data);
774 * Use of DMA with scatter-gather is impossible.
775 * Give up with DMA and switch back to PIO mode.
777 if (status & MCI_RXDATAAVLBLMASK) {
778 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
779 mmci_dma_release(host);
782 host->dma_in_progress = false;
784 dmae->desc_current = NULL;
787 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
788 static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
789 struct dma_chan **dma_chan,
790 struct dma_async_tx_descriptor **dma_desc)
792 struct mmci_dmae_priv *dmae = host->dma_priv;
793 struct variant_data *variant = host->variant;
794 struct dma_slave_config conf = {
795 .src_addr = host->phybase + MMCIFIFO,
796 .dst_addr = host->phybase + MMCIFIFO,
797 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
798 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
799 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
800 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
803 struct dma_chan *chan;
804 struct dma_device *device;
805 struct dma_async_tx_descriptor *desc;
807 unsigned long flags = DMA_CTRL_ACK;
809 if (data->flags & MMC_DATA_READ) {
810 conf.direction = DMA_DEV_TO_MEM;
811 chan = dmae->rx_channel;
813 conf.direction = DMA_MEM_TO_DEV;
814 chan = dmae->tx_channel;
817 /* If there's no DMA channel, fall back to PIO */
821 /* If less than or equal to the fifo size, don't bother with DMA */
822 if (data->blksz * data->blocks <= variant->fifosize)
825 device = chan->device;
826 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
827 mmc_get_dma_dir(data));
831 if (host->variant->qcom_dml)
832 flags |= DMA_PREP_INTERRUPT;
834 dmaengine_slave_config(chan, &conf);
835 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
836 conf.direction, flags);
846 dma_unmap_sg(device->dev, data->sg, data->sg_len,
847 mmc_get_dma_dir(data));
851 int mmci_dmae_prep_data(struct mmci_host *host,
852 struct mmc_data *data,
855 struct mmci_dmae_priv *dmae = host->dma_priv;
856 struct mmci_dmae_next *nd = &dmae->next_data;
862 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
863 /* Check if next job is already prepared. */
864 if (dmae->cur && dmae->desc_current)
867 /* No job were prepared thus do it now. */
868 return _mmci_dmae_prep_data(host, data, &dmae->cur,
869 &dmae->desc_current);
872 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
874 struct mmci_dmae_priv *dmae = host->dma_priv;
876 host->dma_in_progress = true;
877 dmaengine_submit(dmae->desc_current);
878 dma_async_issue_pending(dmae->cur);
880 *datactrl |= MCI_DPSM_DMAENABLE;
885 void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
887 struct mmci_dmae_priv *dmae = host->dma_priv;
888 struct mmci_dmae_next *next = &dmae->next_data;
893 WARN_ON(!data->host_cookie && (next->desc || next->chan));
895 dmae->desc_current = next->desc;
896 dmae->cur = next->chan;
901 void mmci_dmae_unprep_data(struct mmci_host *host,
902 struct mmc_data *data, int err)
905 struct mmci_dmae_priv *dmae = host->dma_priv;
910 mmci_dma_unmap(host, data);
913 struct mmci_dmae_next *next = &dmae->next_data;
914 struct dma_chan *chan;
915 if (data->flags & MMC_DATA_READ)
916 chan = dmae->rx_channel;
918 chan = dmae->tx_channel;
919 dmaengine_terminate_all(chan);
921 if (dmae->desc_current == next->desc)
922 dmae->desc_current = NULL;
924 if (dmae->cur == next->chan) {
925 host->dma_in_progress = false;
934 static struct mmci_host_ops mmci_variant_ops = {
935 .prep_data = mmci_dmae_prep_data,
936 .unprep_data = mmci_dmae_unprep_data,
937 .get_datactrl_cfg = mmci_get_dctrl_cfg,
938 .get_next_data = mmci_dmae_get_next_data,
939 .dma_setup = mmci_dmae_setup,
940 .dma_release = mmci_dmae_release,
941 .dma_start = mmci_dmae_start,
942 .dma_finalize = mmci_dmae_finalize,
943 .dma_error = mmci_dmae_error,
946 static struct mmci_host_ops mmci_variant_ops = {
947 .get_datactrl_cfg = mmci_get_dctrl_cfg,
951 void mmci_variant_init(struct mmci_host *host)
953 host->ops = &mmci_variant_ops;
956 void ux500v2_variant_init(struct mmci_host *host)
958 host->ops = &mmci_variant_ops;
959 host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
962 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
964 struct mmci_host *host = mmc_priv(mmc);
965 struct mmc_data *data = mrq->data;
970 WARN_ON(data->host_cookie);
972 if (mmci_validate_data(host, data))
975 mmci_prep_data(host, data, true);
978 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
981 struct mmci_host *host = mmc_priv(mmc);
982 struct mmc_data *data = mrq->data;
984 if (!data || !data->host_cookie)
987 mmci_unprep_data(host, data, err);
990 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
992 struct variant_data *variant = host->variant;
993 unsigned int datactrl, timeout, irqmask;
994 unsigned long long clks;
997 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
998 data->blksz, data->blocks, data->flags);
1001 host->size = data->blksz * data->blocks;
1002 data->bytes_xfered = 0;
1004 clks = (unsigned long long)data->timeout_ns * host->cclk;
1005 do_div(clks, NSEC_PER_SEC);
1007 timeout = data->timeout_clks + (unsigned int)clks;
1010 writel(timeout, base + MMCIDATATIMER);
1011 writel(host->size, base + MMCIDATALENGTH);
1013 datactrl = host->ops->get_datactrl_cfg(host);
1014 datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
1016 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1019 datactrl |= variant->datactrl_mask_sdio;
1022 * The ST Micro variant for SDIO small write transfers
1023 * needs to have clock H/W flow control disabled,
1024 * otherwise the transfer will not start. The threshold
1025 * depends on the rate of MCLK.
1027 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1029 (host->size <= 8 && host->mclk > 50000000)))
1030 clk = host->clk_reg & ~variant->clkreg_enable;
1032 clk = host->clk_reg | variant->clkreg_enable;
1034 mmci_write_clkreg(host, clk);
1037 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1038 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1039 datactrl |= variant->datactrl_mask_ddrmode;
1042 * Attempt to use DMA operation mode, if this
1043 * should fail, fall back to PIO mode
1045 if (!mmci_dma_start(host, datactrl))
1048 /* IRQ mode, map the SG list for CPU reading/writing */
1049 mmci_init_sg(host, data);
1051 if (data->flags & MMC_DATA_READ) {
1052 irqmask = MCI_RXFIFOHALFFULLMASK;
1055 * If we have less than the fifo 'half-full' threshold to
1056 * transfer, trigger a PIO interrupt as soon as any data
1059 if (host->size < variant->fifohalfsize)
1060 irqmask |= MCI_RXDATAAVLBLMASK;
1063 * We don't actually need to include "FIFO empty" here
1064 * since its implicit in "FIFO half empty".
1066 irqmask = MCI_TXFIFOHALFEMPTYMASK;
1069 mmci_write_datactrlreg(host, datactrl);
1070 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1071 mmci_set_mask1(host, irqmask);
1075 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1077 void __iomem *base = host->base;
1079 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1080 cmd->opcode, cmd->arg, cmd->flags);
1082 if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1083 writel(0, base + MMCICOMMAND);
1084 mmci_reg_delay(host);
1087 if (host->variant->cmdreg_stop &&
1088 cmd->opcode == MMC_STOP_TRANSMISSION)
1089 c |= host->variant->cmdreg_stop;
1091 c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1092 if (cmd->flags & MMC_RSP_PRESENT) {
1093 if (cmd->flags & MMC_RSP_136)
1094 c |= host->variant->cmdreg_lrsp_crc;
1095 else if (cmd->flags & MMC_RSP_CRC)
1096 c |= host->variant->cmdreg_srsp_crc;
1098 c |= host->variant->cmdreg_srsp;
1101 c |= MCI_CPSM_INTERRUPT;
1103 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1104 c |= host->variant->data_cmd_enable;
1108 writel(cmd->arg, base + MMCIARGUMENT);
1109 writel(c, base + MMCICOMMAND);
1112 static void mmci_stop_command(struct mmci_host *host)
1114 host->stop_abort.error = 0;
1115 mmci_start_command(host, &host->stop_abort, 0);
1119 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1120 unsigned int status)
1122 unsigned int status_err;
1124 /* Make sure we have data to handle */
1128 /* First check for errors */
1129 status_err = status & (host->variant->start_err |
1130 MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1131 MCI_TXUNDERRUN | MCI_RXOVERRUN);
1134 u32 remain, success;
1136 /* Terminate the DMA transfer */
1137 mmci_dma_error(host);
1140 * Calculate how far we are into the transfer. Note that
1141 * the data counter gives the number of bytes transferred
1142 * on the MMC bus, not on the host side. On reads, this
1143 * can be as much as a FIFO-worth of data ahead. This
1144 * matters for FIFO overruns only.
1146 if (!host->variant->datacnt_useless) {
1147 remain = readl(host->base + MMCIDATACNT);
1148 success = data->blksz * data->blocks - remain;
1153 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1154 status_err, success);
1155 if (status_err & MCI_DATACRCFAIL) {
1156 /* Last block was not successful */
1158 data->error = -EILSEQ;
1159 } else if (status_err & MCI_DATATIMEOUT) {
1160 data->error = -ETIMEDOUT;
1161 } else if (status_err & MCI_STARTBITERR) {
1162 data->error = -ECOMM;
1163 } else if (status_err & MCI_TXUNDERRUN) {
1165 } else if (status_err & MCI_RXOVERRUN) {
1166 if (success > host->variant->fifosize)
1167 success -= host->variant->fifosize;
1172 data->bytes_xfered = round_down(success, data->blksz);
1175 if (status & MCI_DATABLOCKEND)
1176 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1178 if (status & MCI_DATAEND || data->error) {
1179 mmci_dma_finalize(host, data);
1181 mmci_stop_data(host);
1184 /* The error clause is handled above, success! */
1185 data->bytes_xfered = data->blksz * data->blocks;
1188 if (host->variant->cmdreg_stop && data->error)
1189 mmci_stop_command(host);
1191 mmci_request_end(host, data->mrq);
1192 } else if (host->mrq->sbc && !data->error) {
1193 mmci_request_end(host, data->mrq);
1195 mmci_start_command(host, data->stop, 0);
1201 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1202 unsigned int status)
1204 void __iomem *base = host->base;
1205 bool sbc, busy_resp;
1210 sbc = (cmd == host->mrq->sbc);
1211 busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1214 * We need to be one of these interrupts to be considered worth
1215 * handling. Note that we tag on any latent IRQs postponed
1216 * due to waiting for busy status.
1218 if (!((status|host->busy_status) &
1219 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1222 /* Handle busy detection on DAT0 if the variant supports it. */
1223 if (busy_resp && host->variant->busy_detect) {
1226 * Before unmasking for the busy end IRQ, confirm that the
1227 * command was sent successfully. To keep track of having a
1228 * command in-progress, waiting for busy signaling to end,
1229 * store the status in host->busy_status.
1231 * Note that, the card may need a couple of clock cycles before
1232 * it starts signaling busy on DAT0, hence re-read the
1233 * MMCISTATUS register here, to allow the busy bit to be set.
1234 * Potentially we may even need to poll the register for a
1235 * while, to allow it to be set, but tests indicates that it
1238 if (!host->busy_status &&
1239 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1240 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1242 writel(readl(base + MMCIMASK0) |
1243 host->variant->busy_detect_mask,
1247 status & (MCI_CMDSENT|MCI_CMDRESPEND);
1252 * If there is a command in-progress that has been successfully
1253 * sent, then bail out if busy status is set and wait for the
1256 * Note that, the HW triggers an IRQ on both edges while
1257 * monitoring DAT0 for busy completion, but there is only one
1258 * status bit in MMCISTATUS for the busy state. Therefore
1259 * both the start and the end interrupts needs to be cleared,
1260 * one after the other. So, clear the busy start IRQ here.
1262 if (host->busy_status &&
1263 (status & host->variant->busy_detect_flag)) {
1264 writel(host->variant->busy_detect_mask,
1265 host->base + MMCICLEAR);
1270 * If there is a command in-progress that has been successfully
1271 * sent and the busy bit isn't set, it means we have received
1272 * the busy end IRQ. Clear and mask the IRQ, then continue to
1273 * process the command.
1275 if (host->busy_status) {
1277 writel(host->variant->busy_detect_mask,
1278 host->base + MMCICLEAR);
1280 writel(readl(base + MMCIMASK0) &
1281 ~host->variant->busy_detect_mask,
1283 host->busy_status = 0;
1289 if (status & MCI_CMDTIMEOUT) {
1290 cmd->error = -ETIMEDOUT;
1291 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1292 cmd->error = -EILSEQ;
1294 cmd->resp[0] = readl(base + MMCIRESPONSE0);
1295 cmd->resp[1] = readl(base + MMCIRESPONSE1);
1296 cmd->resp[2] = readl(base + MMCIRESPONSE2);
1297 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1300 if ((!sbc && !cmd->data) || cmd->error) {
1302 /* Terminate the DMA transfer */
1303 mmci_dma_error(host);
1305 mmci_stop_data(host);
1306 if (host->variant->cmdreg_stop && cmd->error) {
1307 mmci_stop_command(host);
1311 mmci_request_end(host, host->mrq);
1313 mmci_start_command(host, host->mrq->cmd, 0);
1314 } else if (!host->variant->datactrl_first &&
1315 !(cmd->data->flags & MMC_DATA_READ)) {
1316 mmci_start_data(host, cmd->data);
1320 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1322 return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1325 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1328 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1329 * from the fifo range should be used
1331 if (status & MCI_RXFIFOHALFFULL)
1332 return host->variant->fifohalfsize;
1333 else if (status & MCI_RXDATAAVLBL)
1339 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1341 void __iomem *base = host->base;
1343 u32 status = readl(host->base + MMCISTATUS);
1344 int host_remain = host->size;
1347 int count = host->get_rx_fifocnt(host, status, host_remain);
1356 * SDIO especially may want to send something that is
1357 * not divisible by 4 (as opposed to card sectors
1358 * etc). Therefore make sure to always read the last bytes
1359 * while only doing full 32-bit reads towards the FIFO.
1361 if (unlikely(count & 0x3)) {
1363 unsigned char buf[4];
1364 ioread32_rep(base + MMCIFIFO, buf, 1);
1365 memcpy(ptr, buf, count);
1367 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1371 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1376 host_remain -= count;
1381 status = readl(base + MMCISTATUS);
1382 } while (status & MCI_RXDATAAVLBL);
1384 return ptr - buffer;
1387 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1389 struct variant_data *variant = host->variant;
1390 void __iomem *base = host->base;
1394 unsigned int count, maxcnt;
1396 maxcnt = status & MCI_TXFIFOEMPTY ?
1397 variant->fifosize : variant->fifohalfsize;
1398 count = min(remain, maxcnt);
1401 * SDIO especially may want to send something that is
1402 * not divisible by 4 (as opposed to card sectors
1403 * etc), and the FIFO only accept full 32-bit writes.
1404 * So compensate by adding +3 on the count, a single
1405 * byte become a 32bit write, 7 bytes will be two
1408 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1416 status = readl(base + MMCISTATUS);
1417 } while (status & MCI_TXFIFOHALFEMPTY);
1419 return ptr - buffer;
1423 * PIO data transfer IRQ handler.
1425 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1427 struct mmci_host *host = dev_id;
1428 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1429 struct variant_data *variant = host->variant;
1430 void __iomem *base = host->base;
1433 status = readl(base + MMCISTATUS);
1435 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1438 unsigned int remain, len;
1442 * For write, we only need to test the half-empty flag
1443 * here - if the FIFO is completely empty, then by
1444 * definition it is more than half empty.
1446 * For read, check for data available.
1448 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1451 if (!sg_miter_next(sg_miter))
1454 buffer = sg_miter->addr;
1455 remain = sg_miter->length;
1458 if (status & MCI_RXACTIVE)
1459 len = mmci_pio_read(host, buffer, remain);
1460 if (status & MCI_TXACTIVE)
1461 len = mmci_pio_write(host, buffer, remain, status);
1463 sg_miter->consumed = len;
1471 status = readl(base + MMCISTATUS);
1474 sg_miter_stop(sg_miter);
1477 * If we have less than the fifo 'half-full' threshold to transfer,
1478 * trigger a PIO interrupt as soon as any data is available.
1480 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1481 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1484 * If we run out of data, disable the data IRQs; this
1485 * prevents a race where the FIFO becomes empty before
1486 * the chip itself has disabled the data path, and
1487 * stops us racing with our data end IRQ.
1489 if (host->size == 0) {
1490 mmci_set_mask1(host, 0);
1491 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1498 * Handle completion of command and data transfers.
1500 static irqreturn_t mmci_irq(int irq, void *dev_id)
1502 struct mmci_host *host = dev_id;
1506 spin_lock(&host->lock);
1509 status = readl(host->base + MMCISTATUS);
1511 if (host->singleirq) {
1512 if (status & host->mask1_reg)
1513 mmci_pio_irq(irq, dev_id);
1515 status &= ~host->variant->irq_pio_mask;
1519 * Busy detection is managed by mmci_cmd_irq(), including to
1520 * clear the corresponding IRQ.
1522 status &= readl(host->base + MMCIMASK0);
1523 if (host->variant->busy_detect)
1524 writel(status & ~host->variant->busy_detect_mask,
1525 host->base + MMCICLEAR);
1527 writel(status, host->base + MMCICLEAR);
1529 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1531 if (host->variant->reversed_irq_handling) {
1532 mmci_data_irq(host, host->data, status);
1533 mmci_cmd_irq(host, host->cmd, status);
1535 mmci_cmd_irq(host, host->cmd, status);
1536 mmci_data_irq(host, host->data, status);
1540 * Busy detection has been handled by mmci_cmd_irq() above.
1541 * Clear the status bit to prevent polling in IRQ context.
1543 if (host->variant->busy_detect_flag)
1544 status &= ~host->variant->busy_detect_flag;
1549 spin_unlock(&host->lock);
1551 return IRQ_RETVAL(ret);
1554 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1556 struct mmci_host *host = mmc_priv(mmc);
1557 unsigned long flags;
1559 WARN_ON(host->mrq != NULL);
1561 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1562 if (mrq->cmd->error) {
1563 mmc_request_done(mmc, mrq);
1567 spin_lock_irqsave(&host->lock, flags);
1572 mmci_get_next_data(host, mrq->data);
1575 (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1576 mmci_start_data(host, mrq->data);
1579 mmci_start_command(host, mrq->sbc, 0);
1581 mmci_start_command(host, mrq->cmd, 0);
1583 spin_unlock_irqrestore(&host->lock, flags);
1586 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1588 struct mmci_host *host = mmc_priv(mmc);
1589 struct variant_data *variant = host->variant;
1591 unsigned long flags;
1594 if (host->plat->ios_handler &&
1595 host->plat->ios_handler(mmc_dev(mmc), ios))
1596 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1598 switch (ios->power_mode) {
1600 if (!IS_ERR(mmc->supply.vmmc))
1601 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1603 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1604 regulator_disable(mmc->supply.vqmmc);
1605 host->vqmmc_enabled = false;
1610 if (!IS_ERR(mmc->supply.vmmc))
1611 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1614 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1615 * and instead uses MCI_PWR_ON so apply whatever value is
1616 * configured in the variant data.
1618 pwr |= variant->pwrreg_powerup;
1622 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1623 ret = regulator_enable(mmc->supply.vqmmc);
1625 dev_err(mmc_dev(mmc),
1626 "failed to enable vqmmc regulator\n");
1628 host->vqmmc_enabled = true;
1635 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1637 * The ST Micro variant has some additional bits
1638 * indicating signal direction for the signals in
1639 * the SD/MMC bus and feedback-clock usage.
1641 pwr |= host->pwr_reg_add;
1643 if (ios->bus_width == MMC_BUS_WIDTH_4)
1644 pwr &= ~MCI_ST_DATA74DIREN;
1645 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1646 pwr &= (~MCI_ST_DATA74DIREN &
1647 ~MCI_ST_DATA31DIREN &
1648 ~MCI_ST_DATA2DIREN);
1651 if (variant->opendrain) {
1652 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1653 pwr |= variant->opendrain;
1656 * If the variant cannot configure the pads by its own, then we
1657 * expect the pinctrl to be able to do that for us
1659 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1660 pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1662 pinctrl_select_state(host->pinctrl, host->pins_default);
1666 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1667 * gating the clock, the MCI_PWR_ON bit is cleared.
1669 if (!ios->clock && variant->pwrreg_clkgate)
1672 if (host->variant->explicit_mclk_control &&
1673 ios->clock != host->clock_cache) {
1674 ret = clk_set_rate(host->clk, ios->clock);
1676 dev_err(mmc_dev(host->mmc),
1677 "Error setting clock rate (%d)\n", ret);
1679 host->mclk = clk_get_rate(host->clk);
1681 host->clock_cache = ios->clock;
1683 spin_lock_irqsave(&host->lock, flags);
1685 if (host->ops && host->ops->set_clkreg)
1686 host->ops->set_clkreg(host, ios->clock);
1688 mmci_set_clkreg(host, ios->clock);
1690 if (host->ops && host->ops->set_pwrreg)
1691 host->ops->set_pwrreg(host, pwr);
1693 mmci_write_pwrreg(host, pwr);
1695 mmci_reg_delay(host);
1697 spin_unlock_irqrestore(&host->lock, flags);
1700 static int mmci_get_cd(struct mmc_host *mmc)
1702 struct mmci_host *host = mmc_priv(mmc);
1703 struct mmci_platform_data *plat = host->plat;
1704 unsigned int status = mmc_gpio_get_cd(mmc);
1706 if (status == -ENOSYS) {
1708 return 1; /* Assume always present */
1710 status = plat->status(mmc_dev(host->mmc));
1715 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1719 if (!IS_ERR(mmc->supply.vqmmc)) {
1721 switch (ios->signal_voltage) {
1722 case MMC_SIGNAL_VOLTAGE_330:
1723 ret = regulator_set_voltage(mmc->supply.vqmmc,
1726 case MMC_SIGNAL_VOLTAGE_180:
1727 ret = regulator_set_voltage(mmc->supply.vqmmc,
1730 case MMC_SIGNAL_VOLTAGE_120:
1731 ret = regulator_set_voltage(mmc->supply.vqmmc,
1737 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1743 static struct mmc_host_ops mmci_ops = {
1744 .request = mmci_request,
1745 .pre_req = mmci_pre_request,
1746 .post_req = mmci_post_request,
1747 .set_ios = mmci_set_ios,
1748 .get_ro = mmc_gpio_get_ro,
1749 .get_cd = mmci_get_cd,
1750 .start_signal_voltage_switch = mmci_sig_volt_switch,
1753 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1755 struct mmci_host *host = mmc_priv(mmc);
1756 int ret = mmc_of_parse(mmc);
1761 if (of_get_property(np, "st,sig-dir-dat0", NULL))
1762 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1763 if (of_get_property(np, "st,sig-dir-dat2", NULL))
1764 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1765 if (of_get_property(np, "st,sig-dir-dat31", NULL))
1766 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1767 if (of_get_property(np, "st,sig-dir-dat74", NULL))
1768 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1769 if (of_get_property(np, "st,sig-dir-cmd", NULL))
1770 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1771 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1772 host->pwr_reg_add |= MCI_ST_FBCLKEN;
1773 if (of_get_property(np, "st,sig-dir", NULL))
1774 host->pwr_reg_add |= MCI_STM32_DIRPOL;
1775 if (of_get_property(np, "st,neg-edge", NULL))
1776 host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1777 if (of_get_property(np, "st,use-ckin", NULL))
1778 host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
1780 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1781 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1782 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1783 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1788 static int mmci_probe(struct amba_device *dev,
1789 const struct amba_id *id)
1791 struct mmci_platform_data *plat = dev->dev.platform_data;
1792 struct device_node *np = dev->dev.of_node;
1793 struct variant_data *variant = id->data;
1794 struct mmci_host *host;
1795 struct mmc_host *mmc;
1798 /* Must have platform data or Device Tree. */
1800 dev_err(&dev->dev, "No plat data or DT found\n");
1805 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1810 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1814 ret = mmci_of_parse(np, mmc);
1818 host = mmc_priv(mmc);
1822 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1823 * pins can be set accordingly using pinctrl
1825 if (!variant->opendrain) {
1826 host->pinctrl = devm_pinctrl_get(&dev->dev);
1827 if (IS_ERR(host->pinctrl)) {
1828 dev_err(&dev->dev, "failed to get pinctrl");
1829 ret = PTR_ERR(host->pinctrl);
1833 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1834 PINCTRL_STATE_DEFAULT);
1835 if (IS_ERR(host->pins_default)) {
1836 dev_err(mmc_dev(mmc), "Can't select default pins\n");
1837 ret = PTR_ERR(host->pins_default);
1841 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1842 MMCI_PINCTRL_STATE_OPENDRAIN);
1843 if (IS_ERR(host->pins_opendrain)) {
1844 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1845 ret = PTR_ERR(host->pins_opendrain);
1850 host->hw_designer = amba_manf(dev);
1851 host->hw_revision = amba_rev(dev);
1852 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1853 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1855 host->clk = devm_clk_get(&dev->dev, NULL);
1856 if (IS_ERR(host->clk)) {
1857 ret = PTR_ERR(host->clk);
1861 ret = clk_prepare_enable(host->clk);
1865 if (variant->qcom_fifo)
1866 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1868 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1871 host->variant = variant;
1872 host->mclk = clk_get_rate(host->clk);
1874 * According to the spec, mclk is max 100 MHz,
1875 * so we try to adjust the clock down to this,
1878 if (host->mclk > variant->f_max) {
1879 ret = clk_set_rate(host->clk, variant->f_max);
1882 host->mclk = clk_get_rate(host->clk);
1883 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1887 host->phybase = dev->res.start;
1888 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1889 if (IS_ERR(host->base)) {
1890 ret = PTR_ERR(host->base);
1895 variant->init(host);
1898 * The ARM and ST versions of the block have slightly different
1899 * clock divider equations which means that the minimum divider
1901 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1903 if (variant->st_clkdiv)
1904 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1905 else if (variant->stm32_clkdiv)
1906 mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
1907 else if (variant->explicit_mclk_control)
1908 mmc->f_min = clk_round_rate(host->clk, 100000);
1910 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1912 * If no maximum operating frequency is supplied, fall back to use
1913 * the module parameter, which has a (low) default value in case it
1914 * is not specified. Either value must not exceed the clock rate into
1915 * the block, of course.
1918 mmc->f_max = variant->explicit_mclk_control ?
1919 min(variant->f_max, mmc->f_max) :
1920 min(host->mclk, mmc->f_max);
1922 mmc->f_max = variant->explicit_mclk_control ?
1923 fmax : min(host->mclk, fmax);
1926 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1928 host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
1929 if (IS_ERR(host->rst)) {
1930 ret = PTR_ERR(host->rst);
1934 /* Get regulators and the supported OCR mask */
1935 ret = mmc_regulator_get_supply(mmc);
1939 if (!mmc->ocr_avail)
1940 mmc->ocr_avail = plat->ocr_mask;
1941 else if (plat->ocr_mask)
1942 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1944 /* We support these capabilities. */
1945 mmc->caps |= MMC_CAP_CMD23;
1948 * Enable busy detection.
1950 if (variant->busy_detect) {
1951 mmci_ops.card_busy = mmci_card_busy;
1953 * Not all variants have a flag to enable busy detection
1954 * in the DPSM, but if they do, set it here.
1956 if (variant->busy_dpsm_flag)
1957 mmci_write_datactrlreg(host,
1958 host->variant->busy_dpsm_flag);
1959 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1960 mmc->max_busy_timeout = 0;
1963 /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
1964 host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
1965 host->stop_abort.arg = 0;
1966 host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
1968 mmc->ops = &mmci_ops;
1970 /* We support these PM capabilities. */
1971 mmc->pm_caps |= MMC_PM_KEEP_POWER;
1976 mmc->max_segs = NR_SG;
1979 * Since only a certain number of bits are valid in the data length
1980 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1983 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1986 * Set the maximum segment size. Since we aren't doing DMA
1987 * (yet) we are only limited by the data length register.
1989 mmc->max_seg_size = mmc->max_req_size;
1992 * Block size can be up to 2048 bytes, but must be a power of two.
1994 mmc->max_blk_size = 1 << variant->datactrl_blocksz;
1997 * Limit the number of blocks transferred so that we don't overflow
1998 * the maximum request size.
2000 mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2002 spin_lock_init(&host->lock);
2004 writel(0, host->base + MMCIMASK0);
2006 if (variant->mmcimask1)
2007 writel(0, host->base + MMCIMASK1);
2009 writel(0xfff, host->base + MMCICLEAR);
2013 * - not using DT but using a descriptor table, or
2014 * - using a table of descriptors ALONGSIDE DT, or
2015 * look up these descriptors named "cd" and "wp" right here, fail
2016 * silently of these do not exist
2019 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
2020 if (ret == -EPROBE_DEFER)
2023 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
2024 if (ret == -EPROBE_DEFER)
2028 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
2029 DRIVER_NAME " (cmd)", host);
2034 host->singleirq = true;
2036 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2037 IRQF_SHARED, DRIVER_NAME " (pio)", host);
2042 writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2044 amba_set_drvdata(dev, mmc);
2046 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2047 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2048 amba_rev(dev), (unsigned long long)dev->res.start,
2049 dev->irq[0], dev->irq[1]);
2051 mmci_dma_setup(host);
2053 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2054 pm_runtime_use_autosuspend(&dev->dev);
2058 pm_runtime_put(&dev->dev);
2062 clk_disable_unprepare(host->clk);
2068 static int mmci_remove(struct amba_device *dev)
2070 struct mmc_host *mmc = amba_get_drvdata(dev);
2073 struct mmci_host *host = mmc_priv(mmc);
2074 struct variant_data *variant = host->variant;
2077 * Undo pm_runtime_put() in probe. We use the _sync
2078 * version here so that we can access the primecell.
2080 pm_runtime_get_sync(&dev->dev);
2082 mmc_remove_host(mmc);
2084 writel(0, host->base + MMCIMASK0);
2086 if (variant->mmcimask1)
2087 writel(0, host->base + MMCIMASK1);
2089 writel(0, host->base + MMCICOMMAND);
2090 writel(0, host->base + MMCIDATACTRL);
2092 mmci_dma_release(host);
2093 clk_disable_unprepare(host->clk);
2101 static void mmci_save(struct mmci_host *host)
2103 unsigned long flags;
2105 spin_lock_irqsave(&host->lock, flags);
2107 writel(0, host->base + MMCIMASK0);
2108 if (host->variant->pwrreg_nopower) {
2109 writel(0, host->base + MMCIDATACTRL);
2110 writel(0, host->base + MMCIPOWER);
2111 writel(0, host->base + MMCICLOCK);
2113 mmci_reg_delay(host);
2115 spin_unlock_irqrestore(&host->lock, flags);
2118 static void mmci_restore(struct mmci_host *host)
2120 unsigned long flags;
2122 spin_lock_irqsave(&host->lock, flags);
2124 if (host->variant->pwrreg_nopower) {
2125 writel(host->clk_reg, host->base + MMCICLOCK);
2126 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2127 writel(host->pwr_reg, host->base + MMCIPOWER);
2129 writel(MCI_IRQENABLE | host->variant->start_err,
2130 host->base + MMCIMASK0);
2131 mmci_reg_delay(host);
2133 spin_unlock_irqrestore(&host->lock, flags);
2136 static int mmci_runtime_suspend(struct device *dev)
2138 struct amba_device *adev = to_amba_device(dev);
2139 struct mmc_host *mmc = amba_get_drvdata(adev);
2142 struct mmci_host *host = mmc_priv(mmc);
2143 pinctrl_pm_select_sleep_state(dev);
2145 clk_disable_unprepare(host->clk);
2151 static int mmci_runtime_resume(struct device *dev)
2153 struct amba_device *adev = to_amba_device(dev);
2154 struct mmc_host *mmc = amba_get_drvdata(adev);
2157 struct mmci_host *host = mmc_priv(mmc);
2158 clk_prepare_enable(host->clk);
2160 pinctrl_pm_select_default_state(dev);
2167 static const struct dev_pm_ops mmci_dev_pm_ops = {
2168 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2169 pm_runtime_force_resume)
2170 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2173 static const struct amba_id mmci_ids[] = {
2177 .data = &variant_arm,
2182 .data = &variant_arm_extended_fifo,
2187 .data = &variant_arm_extended_fifo_hwfc,
2192 .data = &variant_arm,
2194 /* ST Micro variants */
2198 .data = &variant_u300,
2203 .data = &variant_nomadik,
2208 .data = &variant_nomadik,
2213 .data = &variant_ux500,
2218 .data = &variant_ux500v2,
2223 .data = &variant_stm32,
2228 .data = &variant_stm32_sdmmc,
2230 /* Qualcomm variants */
2234 .data = &variant_qcom,
2239 MODULE_DEVICE_TABLE(amba, mmci_ids);
2241 static struct amba_driver mmci_driver = {
2243 .name = DRIVER_NAME,
2244 .pm = &mmci_dev_pm_ops,
2246 .probe = mmci_probe,
2247 .remove = mmci_remove,
2248 .id_table = mmci_ids,
2251 module_amba_driver(mmci_driver);
2253 module_param(fmax, uint, 0444);
2255 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2256 MODULE_LICENSE("GPL");