]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'mmc-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 13 Dec 2016 16:34:11 +0000 (08:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 13 Dec 2016 16:34:11 +0000 (08:34 -0800)
Pull MMC updates from Ulf Hansson:
 "It's been an busy period for mmc. Quite some changes in the mmc core,
  two new mmc host drivers, some existing drivers being extended to
  support new IP versions and lots of other updates.

  MMC core:
   - Delete eMMC packed command support
   - Introduce mmc_abort_tuning() to enable eMMC tuning to fail
     gracefully
   - Introduce mmc_can_retune() to see if a host can be retuned
   - Re-work and improve the sequence when sending a CMD6 for mmc
   - Enable CDM13 polling when switching to HS and HS DDR mode for mmc
   - Relax checking for CMD6 errors after switch to HS200
   - Re-factoring the code dealing with the mmc block queue
   - Recognize whether the eMMC card supports CMDQ
   - Fix 4K native sector check
   - Don't power off the card when starting the host
   - Increase MMC_IOC_MAX_BYTES to support bigger firmware binaries
   - Improve error handling and drop meaningless BUG_ONs()
   - Lots of clean-ups and changes to improve the quality of the code

  MMC host:
   - sdhci: Fix tuning sequence and clean-up the related code
   - sdhci: Add support to via DT override broken SDHCI cap register
     bits
   - sdhci-cadence: Add new driver for Cadence SD4HC SDHCI variant
   - sdhci-msm: Update clock management
   - sdhci-msm: Add support for eMMC HS400 mode
   - sdhci-msm: Deploy runtime/system PM support
   - sdhci-iproc: Extend driver support to newer IP versions
   - sdhci-pci: Add support for Intel GLK
   - sdhci-pci: Add support for Intel NI byt sdio
   - sdhci-acpi: Add support for 80860F14 UID 2 SDIO bus
   - sdhci: Lots of various small improvements and clean-ups
   - tmio: Add support for tuning
   - sh_mobile_sdhi: Add support for tuning
   - sh_mobile_sdhi: Extend driver to support SDHI IP on R7S72100 SoC
   - sh_mobile_sdhi: remove support for sh7372
   - davinci: Use mmc_of_parse() to enable generic mmc DT bindings
   - meson: Add new driver to support GX platforms
   - dw_mmc: Deploy generic runtime/system PM support
   - dw_mmc: Lots of various small improvements

  As a part of the mmc changes this time, I have also pulled in an
  immutable branch/tag (soc-device-match-tag1) hosted by Geert
  Uytterhoeven, to share the implementation of the new
  soc_device_match() interface. This is needed by these mmc related
  changes:

   - mmc: sdhci-of-esdhc: Get correct IP version for T4240-R1.0-R2.0
   - soc: fsl: add GUTS driver for QorIQ platforms"

* tag 'mmc-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (136 commits)
  mmc: sdhci-cadence: add Cadence SD4HC support
  mmc: sdhci: export sdhci_execute_tuning()
  mmc: sdhci: Tidy tuning loop
  mmc: sdhci: Simplify tuning block size logic
  mmc: sdhci: Factor out tuning helper functions
  mmc: sdhci: Use mmc_abort_tuning()
  mmc: mmc: Introduce mmc_abort_tuning()
  mmc: sdhci: Always allow tuning to fall back to fixed sampling
  mmc: sdhci: Fix tuning reset after exhausting the maximum number of loops
  mmc: sdhci: Fix recovery from tuning timeout
  Revert "mmc: sdhci: Reset cmd and data circuits after tuning failure"
  mmc: mmc: Relax checking for switch errors after HS200 switch
  mmc: sdhci-acpi: support 80860F14 UID 2 SDIO bus
  mmc: sdhci-of-at91: remove bogus MMC_SDHCI_IO_ACCESSORS select
  mmc: sdhci-pci: Use ACPI to get max frequency for Intel NI byt sdio
  mmc: sdhci-pci: Add PCI ID for Intel NI byt sdio
  mmc: sdhci-s3c: add spin_unlock_irq() before calling clk_round_rate
  mmc: dw_mmc: display the clock message only one time when card is polling
  mmc: dw_mmc: add the debug message for polling and non-removable
  mmc: dw_mmc: check the "present" variable before checking flags
  ...

79 files changed:
Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
Documentation/devicetree/bindings/mmc/sdhci-cadence.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/sdhci-msm.txt
Documentation/devicetree/bindings/mmc/sdhci.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
Documentation/devicetree/bindings/mmc/tmio_mmc.txt
Documentation/devicetree/bindings/soc/fsl/guts.txt [moved from Documentation/devicetree/bindings/powerpc/fsl/guts.txt with 91% similarity]
MAINTAINERS
arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
drivers/base/Kconfig
drivers/base/soc.c
drivers/mmc/card/block.c
drivers/mmc/card/mmc_test.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mmc/card/sdio_uart.c
drivers/mmc/core/core.c
drivers/mmc/core/debugfs.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/mmc_ops.h
drivers/mmc/core/sd.c
drivers/mmc/core/sd_ops.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_cis.c
drivers/mmc/core/sdio_irq.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/Kconfig
drivers/mmc/host/Makefile
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc-k3.c
drivers/mmc/host/dw_mmc-pci.c
drivers/mmc/host/dw_mmc-pltfm.c
drivers/mmc/host/dw_mmc-rockchip.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/jz4740_mmc.c
drivers/mmc/host/meson-gx-mmc.c [new file with mode: 0644]
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mmc/host/rtsx_usb_sdmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-cadence.c [new file with mode: 0644]
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pltfm.h
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mmc/host/sunxi-mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/wbsd.c
drivers/soc/Kconfig
drivers/soc/fsl/Kconfig [new file with mode: 0644]
drivers/soc/fsl/Makefile
drivers/soc/fsl/guts.c [new file with mode: 0644]
include/linux/fsl/guts.h
include/linux/mfd/tmio.h
include/linux/mmc/card.h
include/linux/mmc/core.h
include/linux/mmc/dw_mmc.h
include/linux/mmc/host.h
include/linux/mmc/mmc.h
include/linux/mmc/slot-gpio.h
include/linux/sys_soc.h
include/uapi/linux/mmc/ioctl.h

diff --git a/Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt b/Documentation/devicetree/bindings/mmc/amlogic,meson-gx.txt
new file mode 100644 (file)
index 0000000..7f95ec4
--- /dev/null
@@ -0,0 +1,32 @@
+Amlogic SD / eMMC controller for S905/GXBB family SoCs
+
+The MMC 5.1 compliant host controller on Amlogic provides the
+interface for SD, eMMC and SDIO devices.
+
+This file documents the properties in addition to those available in
+the MMC core bindings, documented by mmc.txt.
+
+Required properties:
+- compatible : contains one of:
+  - "amlogic,meson-gx-mmc"
+  - "amlogic,meson-gxbb-mmc"
+  - "amlogic,meson-gxl-mmc"
+  - "amlogic,meson-gxm-mmc"
+- clocks     : A list of phandle + clock-specifier pairs for the clocks listed in clock-names.
+- clock-names: Should contain the following:
+       "core" - Main peripheral bus clock
+       "clkin0" - Parent clock of internal mux
+       "clkin1" - Other parent clock of internal mux
+  The driver has an interal mux clock which switches between clkin0 and clkin1 depending on the
+  clock rate requested by the MMC core.
+
+Example:
+
+       sd_emmc_a: mmc@70000 {
+               compatible = "amlogic,meson-gxbb-mmc";
+               reg = <0x0 0x70000 0x0 0x2000>;
+               interrupts = < GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
+               clocks = <&clkc CLKID_SD_EMMC_A>, <&xtal>, <&clkc CLKID_FCLK_DIV2>;
+               clock-names = "core", "clkin0", "clkin1";
+               pinctrl-0 = <&emmc_pins>;
+       };
index be56d2bd474a0c309729128c97c4a3a91bc7b918..954561d09a8e6acca1cdfcd433c07da29027c1ca 100644 (file)
@@ -7,6 +7,15 @@ Required properties:
 - compatible : Should be one of the following
               "brcm,bcm2835-sdhci"
               "brcm,sdhci-iproc-cygnus"
+              "brcm,sdhci-iproc"
+
+Use brcm2835-sdhci for Rasperry PI.
+
+Use sdhci-iproc-cygnus for Broadcom SDHCI Controllers
+restricted to 32bit host accesses to SDHCI registers.
+
+Use sdhci-iproc for Broadcom SDHCI Controllers that allow standard
+8, 16, 32-bit host access to SDHCI register.
 
 - clocks : The clock feeding the SDHCI controller.
 
index ff611fa66871dec93363ea875df4237df90423f0..e4ba92aa035e2314255037ae5ef844ea34d3a406 100644 (file)
@@ -8,11 +8,14 @@ Required properties:
 
 - compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
   fallback. Examples with <soctype> are:
+       - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
        - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
+       - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
        - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
        - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
        - "renesas,mmcif-r8a7793" for the MMCIF found in r8a7793 SoCs
        - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
+       - "renesas,mmcif-sh73a0" for the MMCIF found in sh73a0 SoCs
 
 - clocks: reference to the functional clock
 
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
new file mode 100644 (file)
index 0000000..750374f
--- /dev/null
@@ -0,0 +1,30 @@
+* Cadence SD/SDIO/eMMC Host Controller
+
+Required properties:
+- compatible: should be "cdns,sd4hc".
+- reg: offset and length of the register set for the device.
+- interrupts: a single interrupt specifier.
+- clocks: phandle to the input clock.
+
+Optional properties:
+For eMMC configuration, supported speed modes are not indicated by the SDHCI
+Capabilities Register.  Instead, the following properties should be specified
+if supported.  See mmc.txt for details.
+- mmc-ddr-1_8v
+- mmc-ddr-1_2v
+- mmc-hs200-1_8v
+- mmc-hs200-1_2v
+- mmc-hs400-1_8v
+- mmc-hs400-1_2v
+
+Example:
+       emmc: sdhci@5a000000 {
+               compatible = "cdns,sd4hc";
+               reg = <0x5a000000 0x400>;
+               interrupts = <0 78 4>;
+               clocks = <&clk 4>;
+               bus-width = <8>;
+               mmc-ddr-1_8v;
+               mmc-hs200-1_8v;
+               mmc-hs400-1_8v;
+       };
index 485483a63d8ce3f44a82d30d56c283c8f1034dff..0576264eab5e9b4cb29e003ff9014ad45bedf014 100644 (file)
@@ -17,6 +17,7 @@ Required properties:
        "iface" - Main peripheral bus clock (PCLK/HCLK - AHB Bus clock) (required)
        "core"  - SDC MMC clock (MCLK) (required)
        "bus"   - SDCC bus voter clock (optional)
+       "xo"    - TCXO clock (optional)
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/mmc/sdhci.txt b/Documentation/devicetree/bindings/mmc/sdhci.txt
new file mode 100644 (file)
index 0000000..1c95a1a
--- /dev/null
@@ -0,0 +1,13 @@
+The properties specific for SD host controllers. For properties shared by MMC
+host controllers refer to the mmc[1] bindings.
+
+  [1] Documentation/devicetree/bindings/mmc/mmc.txt
+
+Optional properties:
+- sdhci-caps-mask: The sdhci capabilities register is incorrect. This 64bit
+  property corresponds to the bits in the sdhci capabilty register. If the bit
+  is on in the mask then the bit is incorrect in the register and should be
+  turned off, before applying sdhci-caps.
+- sdhci-caps: The sdhci capabilities register is incorrect. This 64bit
+  property corresponds to the bits in the sdhci capability register. If the
+  bit is on in the property then the bit should be turned on.
index bfa461aaac99b3e3033727572009c308efc3b6bf..7fd17c3da11626a8759f6dcf9895ae1f93c48388 100644 (file)
@@ -59,8 +59,9 @@ Optional properties:
   is specified and the ciu clock is specified then we'll try to set the ciu
   clock to this at probe time.
 
-* clock-freq-min-max: Minimum and Maximum clock frequency for card output
+* clock-freq-min-max (DEPRECATED): Minimum and Maximum clock frequency for card output
   clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
+         (Use the "max-frequency" instead of "clock-freq-min-max".)
 
 * num-slots: specifies the number of slots supported by the controller.
   The number of physical slots actually used could be equal or less than the
@@ -74,11 +75,6 @@ Optional properties:
 * card-detect-delay: Delay in milli-seconds before detecting card after card
   insert event. The default value is 0.
 
-* supports-highspeed (DEPRECATED): Enables support for high speed cards (up to 50MHz)
-                          (use "cap-mmc-highspeed" or "cap-sd-highspeed" instead)
-
-* broken-cd: as documented in mmc core bindings.
-
 * vmmc-supply: The phandle to the regulator to use for vmmc.  If this is
   specified we'll defer probe until we can find this regulator.
 
index 13df9c2399c38988b0797ba05f30b0d16e4937f8..a1650edfd2b706cea003d7e242e43fbceeb23f27 100644 (file)
@@ -11,8 +11,8 @@ optional bindings can be used.
 
 Required properties:
 - compatible:  "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
-               "renesas,sdhi-sh7372" - SDHI IP on SH7372 SoC
                "renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
+               "renesas,sdhi-r7s72100" - SDHI IP on R7S72100 SoC
                "renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
                "renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
                "renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
similarity index 91%
rename from Documentation/devicetree/bindings/powerpc/fsl/guts.txt
rename to Documentation/devicetree/bindings/soc/fsl/guts.txt
index b71b2039e112b165aff6be74c3dee3bf77fda1a1..07adca914d3dcaf1560de8a54bc155f4f16db8b2 100644 (file)
@@ -25,6 +25,9 @@ Recommended properties:
  - fsl,liodn-bits : Indicates the number of defined bits in the LIODN
    registers, for those SOCs that have a PAMU device.
 
+ - little-endian : Indicates that the global utilities block is little
+   endian. The default is big endian.
+
 Examples:
        global-utilities@e0000 {        /* global utilities block */
                compatible = "fsl,mpc8548-guts";
index d04335080cdd5f06e641c1f183f7bb7d76ba6df8..ef461e087275e579141d4c532b2ea9b3ba13107d 100644 (file)
@@ -1052,6 +1052,7 @@ F:        arch/arm/mach-meson/
 F:     arch/arm/boot/dts/meson*
 F:     arch/arm64/boot/dts/amlogic/
 F:     drivers/pinctrl/meson/
+F:     drivers/mmc/host/meson*
 N:     meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
@@ -5068,9 +5069,18 @@ S:       Maintained
 F:     drivers/net/ethernet/freescale/fman
 F:     Documentation/devicetree/bindings/powerpc/fsl/fman.txt
 
+FREESCALE SOC DRIVERS
+M:     Scott Wood <oss@buserror.net>
+L:     linuxppc-dev@lists.ozlabs.org
+L:     linux-arm-kernel@lists.infradead.org
+S:     Maintained
+F:     drivers/soc/fsl/
+F:     include/linux/fsl/
+
 FREESCALE QUICC ENGINE LIBRARY
+M:     Qiang Zhao <qiang.zhao@nxp.com>
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Orphan
+S:     Maintained
 F:     drivers/soc/fsl/qe/
 F:     include/soc/fsl/*qe*.h
 F:     include/soc/fsl/*ucc*.h
index 7f0dc13b4087f5a346fcb60e7ab40080d74c5500..d058e56db72d46f8d6b9cd4444c76219ffd0e9ef 100644 (file)
@@ -216,6 +216,12 @@ clockgen: clocking@1300000 {
                        clocks = <&sysclk>;
                };
 
+               dcfg: dcfg@1e00000 {
+                       compatible = "fsl,ls2080a-dcfg", "syscon";
+                       reg = <0x0 0x1e00000 0x0 0x10000>;
+                       little-endian;
+               };
+
                serial0: serial@21c0500 {
                        compatible = "fsl,ns16550", "ns16550a";
                        reg = <0x0 0x21c0500 0x0 0x100>;
index d02e7c0f5bfdff1c6c56372113e230e55be54f5f..2abea876c0a0c2c2ed7b86e88e953462b6fdd096 100644 (file)
@@ -237,6 +237,7 @@ config GENERIC_CPU_AUTOPROBE
 
 config SOC_BUS
        bool
+       select GLOB
 
 source "drivers/base/regmap/Kconfig"
 
index b63f23e6ad61b647ff26eecaeb4685e55fd67ad0..dc26e5949a3202233ddbeecd6921cd3f1488120b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/sys_soc.h>
 #include <linux/err.h>
+#include <linux/glob.h>
 
 static DEFINE_IDA(soc_ida);
 
@@ -113,6 +114,12 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
        struct soc_device *soc_dev;
        int ret;
 
+       if (!soc_bus_type.p) {
+               ret = bus_register(&soc_bus_type);
+               if (ret)
+                       goto out1;
+       }
+
        soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
        if (!soc_dev) {
                ret = -ENOMEM;
@@ -156,6 +163,78 @@ void soc_device_unregister(struct soc_device *soc_dev)
 
 static int __init soc_bus_register(void)
 {
+       if (soc_bus_type.p)
+               return 0;
+
        return bus_register(&soc_bus_type);
 }
 core_initcall(soc_bus_register);
+
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+       struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+       const struct soc_device_attribute *match = arg;
+
+       if (match->machine &&
+           (!soc_dev->attr->machine ||
+            !glob_match(match->machine, soc_dev->attr->machine)))
+               return 0;
+
+       if (match->family &&
+           (!soc_dev->attr->family ||
+            !glob_match(match->family, soc_dev->attr->family)))
+               return 0;
+
+       if (match->revision &&
+           (!soc_dev->attr->revision ||
+            !glob_match(match->revision, soc_dev->attr->revision)))
+               return 0;
+
+       if (match->soc_id &&
+           (!soc_dev->attr->soc_id ||
+            !glob_match(match->soc_id, soc_dev->attr->soc_id)))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * soc_device_match - identify the SoC in the machine
+ * @matches: zero-terminated array of possible matches
+ *
+ * returns the first matching entry of the argument array, or NULL
+ * if none of them match.
+ *
+ * This function is meant as a helper in place of of_match_node()
+ * in cases where either no device tree is available or the information
+ * in a device node is insufficient to identify a particular variant
+ * by its compatible strings or other properties. For new devices,
+ * the DT binding should always provide unique compatible strings
+ * that allow the use of of_match_node() instead.
+ *
+ * The calling function can use the .data entry of the
+ * soc_device_attribute to pass a structure or function pointer for
+ * each entry.
+ */
+const struct soc_device_attribute *soc_device_match(
+       const struct soc_device_attribute *matches)
+{
+       int ret = 0;
+
+       if (!matches)
+               return NULL;
+
+       while (!ret) {
+               if (!(matches->machine || matches->family ||
+                     matches->revision || matches->soc_id))
+                       break;
+               ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
+                                      soc_device_match_one);
+               if (!ret)
+                       matches++;
+               else
+                       return matches;
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
index 709a872ed484a9da1ce620238c3222190c612f86..646d1a1fa6ca8d47e2984824f602433385da91a1 100644 (file)
@@ -66,9 +66,6 @@ MODULE_ALIAS("mmc:block");
 
 #define mmc_req_rel_wr(req)    ((req->cmd_flags & REQ_FUA) && \
                                  (rq_data_dir(req) == WRITE))
-#define PACKED_CMD_VER 0x01
-#define PACKED_CMD_WR  0x02
-
 static DEFINE_MUTEX(block_mutex);
 
 /*
@@ -102,7 +99,6 @@ struct mmc_blk_data {
        unsigned int    flags;
 #define MMC_BLK_CMD23  (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR (1 << 1)        /* MMC Reliable write support */
-#define MMC_BLK_PACKED_CMD     (1 << 2)        /* MMC packed command support */
 
        unsigned int    usage;
        unsigned int    read_only;
@@ -126,12 +122,6 @@ struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
-enum {
-       MMC_PACKED_NR_IDX = -1,
-       MMC_PACKED_NR_ZERO,
-       MMC_PACKED_NR_SINGLE,
-};
-
 module_param(perdev_minors, int, 0444);
 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 
@@ -139,17 +129,6 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
                                      struct mmc_blk_data *md);
 static int get_card_status(struct mmc_card *card, u32 *status, int retries);
 
-static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
-{
-       struct mmc_packed *packed = mqrq->packed;
-
-       mqrq->cmd_type = MMC_PACKED_NONE;
-       packed->nr_entries = MMC_PACKED_NR_ZERO;
-       packed->idx_failure = MMC_PACKED_NR_IDX;
-       packed->retries = 0;
-       packed->blocks = 0;
-}
-
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
        struct mmc_blk_data *md;
@@ -854,7 +833,7 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
 }
 
 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
-               bool hw_busy_detect, struct request *req, int *gen_err)
+               bool hw_busy_detect, struct request *req, bool *gen_err)
 {
        unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
        int err = 0;
@@ -871,7 +850,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
                if (status & R1_ERROR) {
                        pr_err("%s: %s: error sending status cmd, status %#x\n",
                                req->rq_disk->disk_name, __func__, status);
-                       *gen_err = 1;
+                       *gen_err = true;
                }
 
                /* We may rely on the host hw to handle busy detection.*/
@@ -902,7 +881,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
 }
 
 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
-               struct request *req, int *gen_err, u32 *stop_status)
+               struct request *req, bool *gen_err, u32 *stop_status)
 {
        struct mmc_host *host = card->host;
        struct mmc_command cmd = {0};
@@ -940,7 +919,7 @@ static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
                (*stop_status & R1_ERROR)) {
                pr_err("%s: %s: general error sending stop command, resp %#x\n",
                        req->rq_disk->disk_name, __func__, *stop_status);
-               *gen_err = 1;
+               *gen_err = true;
        }
 
        return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
@@ -1014,7 +993,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
  * Otherwise we don't understand what happened, so abort.
  */
 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
-       struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+       struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
 {
        bool prev_cmd_status_valid = true;
        u32 status, stop_status = 0;
@@ -1053,7 +1032,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
        if ((status & R1_CARD_ECC_FAILED) ||
            (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
            (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
-               *ecc_err = 1;
+               *ecc_err = true;
 
        /* Flag General errors */
        if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
@@ -1062,7 +1041,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
                        pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
                               req->rq_disk->disk_name, __func__,
                               brq->stop.resp[0], status);
-                       *gen_err = 1;
+                       *gen_err = true;
                }
 
        /*
@@ -1085,7 +1064,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
                }
 
                if (stop_status & R1_CARD_ECC_FAILED)
-                       *ecc_err = 1;
+                       *ecc_err = true;
        }
 
        /* Check for set block count errors */
@@ -1154,7 +1133,7 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
 
 int mmc_access_rpmb(struct mmc_queue *mq)
 {
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        /*
         * If this is a RPMB partition access, return ture
         */
@@ -1166,7 +1145,7 @@ int mmc_access_rpmb(struct mmc_queue *mq)
 
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
        int err = 0, type = MMC_BLK_DISCARD;
@@ -1210,7 +1189,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
                                       struct request *req)
 {
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
        int err = 0, type = MMC_BLK_SECDISCARD;
@@ -1276,7 +1255,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 
 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
 {
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        struct mmc_card *card = md->queue.card;
        int ret = 0;
 
@@ -1320,15 +1299,16 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
         R1_CC_ERROR |          /* Card controller error */             \
         R1_ERROR)              /* General/unknown error */
 
-static int mmc_blk_err_check(struct mmc_card *card,
-                            struct mmc_async_req *areq)
+static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
+                                            struct mmc_async_req *areq)
 {
        struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
                                                    mmc_active);
        struct mmc_blk_request *brq = &mq_mrq->brq;
        struct request *req = mq_mrq->req;
        int need_retune = card->host->need_retune;
-       int ecc_err = 0, gen_err = 0;
+       bool ecc_err = false;
+       bool gen_err = false;
 
        /*
         * sbc.error indicates a problem with the set block count
@@ -1378,7 +1358,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
                        pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
                               req->rq_disk->disk_name, __func__,
                               brq->stop.resp[0]);
-                       gen_err = 1;
+                       gen_err = true;
                }
 
                err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
@@ -1419,67 +1399,12 @@ static int mmc_blk_err_check(struct mmc_card *card,
        if (!brq->data.bytes_xfered)
                return MMC_BLK_RETRY;
 
-       if (mmc_packed_cmd(mq_mrq->cmd_type)) {
-               if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
-                       return MMC_BLK_PARTIAL;
-               else
-                       return MMC_BLK_SUCCESS;
-       }
-
        if (blk_rq_bytes(req) != brq->data.bytes_xfered)
                return MMC_BLK_PARTIAL;
 
        return MMC_BLK_SUCCESS;
 }
 
-static int mmc_blk_packed_err_check(struct mmc_card *card,
-                                   struct mmc_async_req *areq)
-{
-       struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
-                       mmc_active);
-       struct request *req = mq_rq->req;
-       struct mmc_packed *packed = mq_rq->packed;
-       int err, check, status;
-       u8 *ext_csd;
-
-       packed->retries--;
-       check = mmc_blk_err_check(card, areq);
-       err = get_card_status(card, &status, 0);
-       if (err) {
-               pr_err("%s: error %d sending status command\n",
-                      req->rq_disk->disk_name, err);
-               return MMC_BLK_ABORT;
-       }
-
-       if (status & R1_EXCEPTION_EVENT) {
-               err = mmc_get_ext_csd(card, &ext_csd);
-               if (err) {
-                       pr_err("%s: error %d sending ext_csd\n",
-                              req->rq_disk->disk_name, err);
-                       return MMC_BLK_ABORT;
-               }
-
-               if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
-                    EXT_CSD_PACKED_FAILURE) &&
-                   (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
-                    EXT_CSD_PACKED_GENERIC_ERROR)) {
-                       if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
-                           EXT_CSD_PACKED_INDEXED_ERROR) {
-                               packed->idx_failure =
-                                 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
-                               check = MMC_BLK_PARTIAL;
-                       }
-                       pr_err("%s: packed cmd failed, nr %u, sectors %u, "
-                              "failure index: %d\n",
-                              req->rq_disk->disk_name, packed->nr_entries,
-                              packed->blocks, packed->idx_failure);
-               }
-               kfree(ext_csd);
-       }
-
-       return check;
-}
-
 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                               struct mmc_card *card,
                               int disable_multi,
@@ -1488,7 +1413,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
        u32 readcmd, writecmd;
        struct mmc_blk_request *brq = &mqrq->brq;
        struct request *req = mqrq->req;
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        bool do_data_tag;
 
        /*
@@ -1640,224 +1565,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
        mmc_queue_bounce_pre(mqrq);
 }
 
-static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
-                                         struct mmc_card *card)
-{
-       unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
-       unsigned int max_seg_sz = queue_max_segment_size(q);
-       unsigned int len, nr_segs = 0;
-
-       do {
-               len = min(hdr_sz, max_seg_sz);
-               hdr_sz -= len;
-               nr_segs++;
-       } while (hdr_sz);
-
-       return nr_segs;
-}
-
-static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
-{
-       struct request_queue *q = mq->queue;
-       struct mmc_card *card = mq->card;
-       struct request *cur = req, *next = NULL;
-       struct mmc_blk_data *md = mq->data;
-       struct mmc_queue_req *mqrq = mq->mqrq_cur;
-       bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
-       unsigned int req_sectors = 0, phys_segments = 0;
-       unsigned int max_blk_count, max_phys_segs;
-       bool put_back = true;
-       u8 max_packed_rw = 0;
-       u8 reqs = 0;
-
-       /*
-        * We don't need to check packed for any further
-        * operation of packed stuff as we set MMC_PACKED_NONE
-        * and return zero for reqs if geting null packed. Also
-        * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
-        * it again when removing blk req.
-        */
-       if (!mqrq->packed) {
-               md->flags &= (~MMC_BLK_PACKED_CMD);
-               goto no_packed;
-       }
-
-       if (!(md->flags & MMC_BLK_PACKED_CMD))
-               goto no_packed;
-
-       if ((rq_data_dir(cur) == WRITE) &&
-           mmc_host_packed_wr(card->host))
-               max_packed_rw = card->ext_csd.max_packed_writes;
-
-       if (max_packed_rw == 0)
-               goto no_packed;
-
-       if (mmc_req_rel_wr(cur) &&
-           (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
-               goto no_packed;
-
-       if (mmc_large_sector(card) &&
-           !IS_ALIGNED(blk_rq_sectors(cur), 8))
-               goto no_packed;
-
-       mmc_blk_clear_packed(mqrq);
-
-       max_blk_count = min(card->host->max_blk_count,
-                           card->host->max_req_size >> 9);
-       if (unlikely(max_blk_count > 0xffff))
-               max_blk_count = 0xffff;
-
-       max_phys_segs = queue_max_segments(q);
-       req_sectors += blk_rq_sectors(cur);
-       phys_segments += cur->nr_phys_segments;
-
-       if (rq_data_dir(cur) == WRITE) {
-               req_sectors += mmc_large_sector(card) ? 8 : 1;
-               phys_segments += mmc_calc_packed_hdr_segs(q, card);
-       }
-
-       do {
-               if (reqs >= max_packed_rw - 1) {
-                       put_back = false;
-                       break;
-               }
-
-               spin_lock_irq(q->queue_lock);
-               next = blk_fetch_request(q);
-               spin_unlock_irq(q->queue_lock);
-               if (!next) {
-                       put_back = false;
-                       break;
-               }
-
-               if (mmc_large_sector(card) &&
-                   !IS_ALIGNED(blk_rq_sectors(next), 8))
-                       break;
-
-               if (req_op(next) == REQ_OP_DISCARD ||
-                   req_op(next) == REQ_OP_SECURE_ERASE ||
-                   req_op(next) == REQ_OP_FLUSH)
-                       break;
-
-               if (rq_data_dir(cur) != rq_data_dir(next))
-                       break;
-
-               if (mmc_req_rel_wr(next) &&
-                   (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
-                       break;
-
-               req_sectors += blk_rq_sectors(next);
-               if (req_sectors > max_blk_count)
-                       break;
-
-               phys_segments +=  next->nr_phys_segments;
-               if (phys_segments > max_phys_segs)
-                       break;
-
-               list_add_tail(&next->queuelist, &mqrq->packed->list);
-               cur = next;
-               reqs++;
-       } while (1);
-
-       if (put_back) {
-               spin_lock_irq(q->queue_lock);
-               blk_requeue_request(q, next);
-               spin_unlock_irq(q->queue_lock);
-       }
-
-       if (reqs > 0) {
-               list_add(&req->queuelist, &mqrq->packed->list);
-               mqrq->packed->nr_entries = ++reqs;
-               mqrq->packed->retries = reqs;
-               return reqs;
-       }
-
-no_packed:
-       mqrq->cmd_type = MMC_PACKED_NONE;
-       return 0;
-}
-
-static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
-                                       struct mmc_card *card,
-                                       struct mmc_queue *mq)
-{
-       struct mmc_blk_request *brq = &mqrq->brq;
-       struct request *req = mqrq->req;
-       struct request *prq;
-       struct mmc_blk_data *md = mq->data;
-       struct mmc_packed *packed = mqrq->packed;
-       bool do_rel_wr, do_data_tag;
-       __le32 *packed_cmd_hdr;
-       u8 hdr_blocks;
-       u8 i = 1;
-
-       mqrq->cmd_type = MMC_PACKED_WRITE;
-       packed->blocks = 0;
-       packed->idx_failure = MMC_PACKED_NR_IDX;
-
-       packed_cmd_hdr = packed->cmd_hdr;
-       memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
-       packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
-               (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
-       hdr_blocks = mmc_large_sector(card) ? 8 : 1;
-
-       /*
-        * Argument for each entry of packed group
-        */
-       list_for_each_entry(prq, &packed->list, queuelist) {
-               do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
-               do_data_tag = (card->ext_csd.data_tag_unit_size) &&
-                       (prq->cmd_flags & REQ_META) &&
-                       (rq_data_dir(prq) == WRITE) &&
-                       blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
-               /* Argument of CMD23 */
-               packed_cmd_hdr[(i * 2)] = cpu_to_le32(
-                       (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
-                       (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
-                       blk_rq_sectors(prq));
-               /* Argument of CMD18 or CMD25 */
-               packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
-                       mmc_card_blockaddr(card) ?
-                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
-               packed->blocks += blk_rq_sectors(prq);
-               i++;
-       }
-
-       memset(brq, 0, sizeof(struct mmc_blk_request));
-       brq->mrq.cmd = &brq->cmd;
-       brq->mrq.data = &brq->data;
-       brq->mrq.sbc = &brq->sbc;
-       brq->mrq.stop = &brq->stop;
-
-       brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
-       brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
-       brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
-       brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
-       brq->cmd.arg = blk_rq_pos(req);
-       if (!mmc_card_blockaddr(card))
-               brq->cmd.arg <<= 9;
-       brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
-       brq->data.blksz = 512;
-       brq->data.blocks = packed->blocks + hdr_blocks;
-       brq->data.flags = MMC_DATA_WRITE;
-
-       brq->stop.opcode = MMC_STOP_TRANSMISSION;
-       brq->stop.arg = 0;
-       brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-
-       mmc_set_data_timeout(&brq->data, card);
-
-       brq->data.sg = mqrq->sg;
-       brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
-
-       mqrq->mmc_active.mrq = &brq->mrq;
-       mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
-
-       mmc_queue_bounce_pre(mqrq);
-}
-
 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
                           struct mmc_blk_request *brq, struct request *req,
                           int ret)
@@ -1881,97 +1588,25 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
                        ret = blk_end_request(req, 0, blocks << 9);
                }
        } else {
-               if (!mmc_packed_cmd(mq_rq->cmd_type))
-                       ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+               ret = blk_end_request(req, 0, brq->data.bytes_xfered);
        }
        return ret;
 }
 
-static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
-{
-       struct request *prq;
-       struct mmc_packed *packed = mq_rq->packed;
-       int idx = packed->idx_failure, i = 0;
-       int ret = 0;
-
-       while (!list_empty(&packed->list)) {
-               prq = list_entry_rq(packed->list.next);
-               if (idx == i) {
-                       /* retry from error index */
-                       packed->nr_entries -= idx;
-                       mq_rq->req = prq;
-                       ret = 1;
-
-                       if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
-                               list_del_init(&prq->queuelist);
-                               mmc_blk_clear_packed(mq_rq);
-                       }
-                       return ret;
-               }
-               list_del_init(&prq->queuelist);
-               blk_end_request(prq, 0, blk_rq_bytes(prq));
-               i++;
-       }
-
-       mmc_blk_clear_packed(mq_rq);
-       return ret;
-}
-
-static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
-{
-       struct request *prq;
-       struct mmc_packed *packed = mq_rq->packed;
-
-       while (!list_empty(&packed->list)) {
-               prq = list_entry_rq(packed->list.next);
-               list_del_init(&prq->queuelist);
-               blk_end_request(prq, -EIO, blk_rq_bytes(prq));
-       }
-
-       mmc_blk_clear_packed(mq_rq);
-}
-
-static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
-                                     struct mmc_queue_req *mq_rq)
-{
-       struct request *prq;
-       struct request_queue *q = mq->queue;
-       struct mmc_packed *packed = mq_rq->packed;
-
-       while (!list_empty(&packed->list)) {
-               prq = list_entry_rq(packed->list.prev);
-               if (prq->queuelist.prev != &packed->list) {
-                       list_del_init(&prq->queuelist);
-                       spin_lock_irq(q->queue_lock);
-                       blk_requeue_request(mq->queue, prq);
-                       spin_unlock_irq(q->queue_lock);
-               } else {
-                       list_del_init(&prq->queuelist);
-               }
-       }
-
-       mmc_blk_clear_packed(mq_rq);
-}
-
 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 {
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        struct mmc_card *card = md->queue.card;
-       struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+       struct mmc_blk_request *brq;
        int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
        enum mmc_blk_status status;
        struct mmc_queue_req *mq_rq;
-       struct request *req = rqc;
+       struct request *req;
        struct mmc_async_req *areq;
-       const u8 packed_nr = 2;
-       u8 reqs = 0;
 
        if (!rqc && !mq->mqrq_prev->req)
                return 0;
 
-       if (rqc)
-               reqs = mmc_blk_prep_packed_list(mq, rqc);
-
        do {
                if (rqc) {
                        /*
@@ -1981,20 +1616,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                        if (mmc_large_sector(card) &&
                                !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
                                pr_err("%s: Transfer size is not 4KB sector size aligned\n",
-                                       req->rq_disk->disk_name);
+                                       rqc->rq_disk->disk_name);
                                mq_rq = mq->mqrq_cur;
+                               req = rqc;
+                               rqc = NULL;
                                goto cmd_abort;
                        }
 
-                       if (reqs >= packed_nr)
-                               mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
-                                                           card, mq);
-                       else
-                               mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+                       mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
                        areq = &mq->mqrq_cur->mmc_active;
                } else
                        areq = NULL;
-               areq = mmc_start_req(card->host, areq, (int *) &status);
+               areq = mmc_start_req(card->host, areq, &status);
                if (!areq) {
                        if (status == MMC_BLK_NEW_REQUEST)
                                mq->flags |= MMC_QUEUE_NEW_REQUEST;
@@ -2015,13 +1648,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                         */
                        mmc_blk_reset_success(md, type);
 
-                       if (mmc_packed_cmd(mq_rq->cmd_type)) {
-                               ret = mmc_blk_end_packed_req(mq_rq);
-                               break;
-                       } else {
-                               ret = blk_end_request(req, 0,
-                                               brq->data.bytes_xfered);
-                       }
+                       ret = blk_end_request(req, 0,
+                                       brq->data.bytes_xfered);
 
                        /*
                         * If the blk_end_request function returns non-zero even
@@ -2058,8 +1686,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                        err = mmc_blk_reset(md, card->host, type);
                        if (!err)
                                break;
-                       if (err == -ENODEV ||
-                               mmc_packed_cmd(mq_rq->cmd_type))
+                       if (err == -ENODEV)
                                goto cmd_abort;
                        /* Fall through */
                }
@@ -2090,23 +1717,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                }
 
                if (ret) {
-                       if (mmc_packed_cmd(mq_rq->cmd_type)) {
-                               if (!mq_rq->packed->retries)
-                                       goto cmd_abort;
-                               mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
-                               mmc_start_req(card->host,
-                                             &mq_rq->mmc_active, NULL);
-                       } else {
-
-                               /*
-                                * In case of a incomplete request
-                                * prepare it again and resend.
-                                */
-                               mmc_blk_rw_rq_prep(mq_rq, card,
-                                               disable_multi, mq);
-                               mmc_start_req(card->host,
-                                               &mq_rq->mmc_active, NULL);
-                       }
+                       /*
+                        * In case of a incomplete request
+                        * prepare it again and resend.
+                        */
+                       mmc_blk_rw_rq_prep(mq_rq, card,
+                                       disable_multi, mq);
+                       mmc_start_req(card->host,
+                                       &mq_rq->mmc_active, NULL);
                        mq_rq->brq.retune_retry_done = retune_retry_done;
                }
        } while (ret);
@@ -2114,15 +1732,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
        return 1;
 
  cmd_abort:
-       if (mmc_packed_cmd(mq_rq->cmd_type)) {
-               mmc_blk_abort_packed_req(mq_rq);
-       } else {
-               if (mmc_card_removed(card))
-                       req->cmd_flags |= REQ_QUIET;
-               while (ret)
-                       ret = blk_end_request(req, -EIO,
-                                       blk_rq_cur_bytes(req));
-       }
+       if (mmc_card_removed(card))
+               req->cmd_flags |= REQ_QUIET;
+       while (ret)
+               ret = blk_end_request(req, -EIO,
+                               blk_rq_cur_bytes(req));
 
  start_new_req:
        if (rqc) {
@@ -2130,12 +1744,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                        rqc->cmd_flags |= REQ_QUIET;
                        blk_end_request_all(rqc, -EIO);
                } else {
-                       /*
-                        * If current request is packed, it needs to put back.
-                        */
-                       if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
-                               mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
-
                        mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
                        mmc_start_req(card->host,
                                      &mq->mqrq_cur->mmc_active, NULL);
@@ -2148,10 +1756,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 {
        int ret;
-       struct mmc_blk_data *md = mq->data;
+       struct mmc_blk_data *md = mq->blkdata;
        struct mmc_card *card = md->queue.card;
-       struct mmc_host *host = card->host;
-       unsigned long flags;
        bool req_is_special = mmc_req_is_special(req);
 
        if (req && !mq->mqrq_prev->req)
@@ -2184,11 +1790,6 @@ int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                        mmc_blk_issue_rw_rq(mq, NULL);
                ret = mmc_blk_issue_flush(mq, req);
        } else {
-               if (!req && host->areq) {
-                       spin_lock_irqsave(&host->context_info.lock, flags);
-                       host->context_info.is_waiting_last_req = true;
-                       spin_unlock_irqrestore(&host->context_info.lock, flags);
-               }
                ret = mmc_blk_issue_rw_rq(mq, req);
        }
 
@@ -2266,7 +1867,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        if (ret)
                goto err_putdisk;
 
-       md->queue.data = md;
+       md->queue.blkdata = md;
 
        md->disk->major = MMC_BLOCK_MAJOR;
        md->disk->first_minor = devidx * perdev_minors;
@@ -2318,14 +1919,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                blk_queue_write_cache(md->queue.queue, true, true);
        }
 
-       if (mmc_card_mmc(card) &&
-           (area_type == MMC_BLK_DATA_AREA_MAIN) &&
-           (md->flags & MMC_BLK_CMD23) &&
-           card->ext_csd.packed_event_en) {
-               if (!mmc_packed_init(&md->queue, card))
-                       md->flags |= MMC_BLK_PACKED_CMD;
-       }
-
        return md;
 
  err_putdisk:
@@ -2429,8 +2022,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
                 */
                card = md->queue.card;
                mmc_cleanup_queue(&md->queue);
-               if (md->flags & MMC_BLK_PACKED_CMD)
-                       mmc_packed_clean(&md->queue);
                if (md->disk->flags & GENHD_FL_UP) {
                        device_remove_file(disk_to_dev(md->disk), &md->force_ro);
                        if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
index 3678220964fe62948a9a4d1aa2fed06b9c1a3a66..ec1d1c46eb90069fe4f313e8bb4c877d973a7b7b 100644 (file)
@@ -214,7 +214,8 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
        struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
        unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
 {
-       BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+       if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
+               return;
 
        if (blocks > 1) {
                mrq->cmd->opcode = write ?
@@ -694,7 +695,8 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
        struct mmc_request *mrq, int write)
 {
-       BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+       if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+               return;
 
        if (mrq->data->blocks > 1) {
                mrq->cmd->opcode = write ?
@@ -714,7 +716,8 @@ static int mmc_test_check_result(struct mmc_test_card *test,
 {
        int ret;
 
-       BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+       if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+               return -EINVAL;
 
        ret = 0;
 
@@ -736,15 +739,28 @@ static int mmc_test_check_result(struct mmc_test_card *test,
        return ret;
 }
 
-static int mmc_test_check_result_async(struct mmc_card *card,
+static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
                                       struct mmc_async_req *areq)
 {
        struct mmc_test_async_req *test_async =
                container_of(areq, struct mmc_test_async_req, areq);
+       int ret;
 
        mmc_test_wait_busy(test_async->test);
 
-       return mmc_test_check_result(test_async->test, areq->mrq);
+       /*
+        * FIXME: this would earlier just casts a regular error code,
+        * either of the kernel type -ERRORCODE or the local test framework
+        * RESULT_* errorcode, into an enum mmc_blk_status and return as
+        * result check. Instead, convert it to some reasonable type by just
+        * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
+        * If possible, a reasonable error code should be returned.
+        */
+       ret = mmc_test_check_result(test_async->test, areq->mrq);
+       if (ret)
+               return MMC_BLK_CMD_ERR;
+
+       return MMC_BLK_SUCCESS;
 }
 
 /*
@@ -755,7 +771,8 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
 {
        int ret;
 
-       BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+       if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+               return -EINVAL;
 
        ret = 0;
 
@@ -817,8 +834,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
        struct mmc_async_req *done_areq;
        struct mmc_async_req *cur_areq = &test_areq[0].areq;
        struct mmc_async_req *other_areq = &test_areq[1].areq;
+       enum mmc_blk_status status;
        int i;
-       int ret;
+       int ret = RESULT_OK;
 
        test_areq[0].test = test;
        test_areq[1].test = test;
@@ -834,10 +852,12 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
        for (i = 0; i < count; i++) {
                mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
                                     blocks, blksz, write);
-               done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+               done_areq = mmc_start_req(test->card->host, cur_areq, &status);
 
-               if (ret || (!done_areq && i > 0))
+               if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
+                       ret = RESULT_FAIL;
                        goto err;
+               }
 
                if (done_areq) {
                        if (done_areq->mrq == &mrq2)
@@ -851,7 +871,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
                dev_addr += blocks;
        }
 
-       done_areq = mmc_start_req(test->card->host, NULL, &ret);
+       done_areq = mmc_start_req(test->card->host, NULL, &status);
+       if (status != MMC_BLK_SUCCESS)
+               ret = RESULT_FAIL;
 
        return ret;
 err:
@@ -2351,6 +2373,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
        struct mmc_request *mrq;
        unsigned long timeout;
        bool expired = false;
+       enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
        int ret = 0, cmd_ret;
        u32 status = 0;
        int count = 0;
@@ -2378,9 +2401,11 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
 
        /* Start ongoing data request */
        if (use_areq) {
-               mmc_start_req(host, &test_areq.areq, &ret);
-               if (ret)
+               mmc_start_req(host, &test_areq.areq, &blkstat);
+               if (blkstat != MMC_BLK_SUCCESS) {
+                       ret = RESULT_FAIL;
                        goto out_free;
+               }
        } else {
                mmc_wait_for_req(host, mrq);
        }
@@ -2413,10 +2438,13 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
        } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
 
        /* Wait for data request to complete */
-       if (use_areq)
-               mmc_start_req(host, NULL, &ret);
-       else
+       if (use_areq) {
+               mmc_start_req(host, NULL, &blkstat);
+               if (blkstat != MMC_BLK_SUCCESS)
+                       ret = RESULT_FAIL;
+       } else {
                mmc_wait_for_req_done(test->card->host, mrq);
+       }
 
        /*
         * For cap_cmd_during_tfr request, upper layer must send stop if
index 8037f73a109a14a4b9593e4034705ebc29ec8752..cf29809f69e4e2cc462ff0e6e2f71d620a551913 100644 (file)
@@ -53,6 +53,7 @@ static int mmc_queue_thread(void *d)
 {
        struct mmc_queue *mq = d;
        struct request_queue *q = mq->queue;
+       struct mmc_context_info *cntx = &mq->card->host->context_info;
 
        current->flags |= PF_MEMALLOC;
 
@@ -63,6 +64,19 @@ static int mmc_queue_thread(void *d)
                spin_lock_irq(q->queue_lock);
                set_current_state(TASK_INTERRUPTIBLE);
                req = blk_fetch_request(q);
+               mq->asleep = false;
+               cntx->is_waiting_last_req = false;
+               cntx->is_new_req = false;
+               if (!req) {
+                       /*
+                        * Dispatch queue is empty so set flags for
+                        * mmc_request_fn() to wake us up.
+                        */
+                       if (mq->mqrq_prev->req)
+                               cntx->is_waiting_last_req = true;
+                       else
+                               mq->asleep = true;
+               }
                mq->mqrq_cur->req = req;
                spin_unlock_irq(q->queue_lock);
 
@@ -115,7 +129,6 @@ static void mmc_request_fn(struct request_queue *q)
 {
        struct mmc_queue *mq = q->queuedata;
        struct request *req;
-       unsigned long flags;
        struct mmc_context_info *cntx;
 
        if (!mq) {
@@ -127,19 +140,13 @@ static void mmc_request_fn(struct request_queue *q)
        }
 
        cntx = &mq->card->host->context_info;
-       if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
-               /*
-                * New MMC request arrived when MMC thread may be
-                * blocked on the previous request to be complete
-                * with no current request fetched
-                */
-               spin_lock_irqsave(&cntx->lock, flags);
-               if (cntx->is_waiting_last_req) {
-                       cntx->is_new_req = true;
-                       wake_up_interruptible(&cntx->wait);
-               }
-               spin_unlock_irqrestore(&cntx->lock, flags);
-       } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+
+       if (cntx->is_waiting_last_req) {
+               cntx->is_new_req = true;
+               wake_up_interruptible(&cntx->wait);
+       }
+
+       if (mq->asleep)
                wake_up_process(mq->thread);
 }
 
@@ -179,6 +186,82 @@ static void mmc_queue_setup_discard(struct request_queue *q,
                queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
 }
 
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
+                                       unsigned int bouncesz)
+{
+       int i;
+
+       for (i = 0; i < mq->qdepth; i++) {
+               mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+               if (!mq->mqrq[i].bounce_buf)
+                       goto out_err;
+       }
+
+       return true;
+
+out_err:
+       while (--i >= 0) {
+               kfree(mq->mqrq[i].bounce_buf);
+               mq->mqrq[i].bounce_buf = NULL;
+       }
+       pr_warn("%s: unable to allocate bounce buffers\n",
+               mmc_card_name(mq->card));
+       return false;
+}
+
+static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
+                                     unsigned int bouncesz)
+{
+       int i, ret;
+
+       for (i = 0; i < mq->qdepth; i++) {
+               mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
+               if (ret)
+                       return ret;
+
+               mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+#endif
+
+static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
+{
+       int i, ret;
+
+       for (i = 0; i < mq->qdepth; i++) {
+               mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
+{
+       kfree(mqrq->bounce_sg);
+       mqrq->bounce_sg = NULL;
+
+       kfree(mqrq->sg);
+       mqrq->sg = NULL;
+
+       kfree(mqrq->bounce_buf);
+       mqrq->bounce_buf = NULL;
+}
+
+static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
+{
+       int i;
+
+       for (i = 0; i < mq->qdepth; i++)
+               mmc_queue_req_free_bufs(&mq->mqrq[i]);
+}
+
 /**
  * mmc_init_queue - initialise a queue structure.
  * @mq: mmc queue
@@ -193,9 +276,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 {
        struct mmc_host *host = card->host;
        u64 limit = BLK_BOUNCE_HIGH;
-       int ret;
-       struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
-       struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+       bool bounce = false;
+       int ret = -ENOMEM;
 
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
                limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -205,8 +287,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
        if (!mq->queue)
                return -ENOMEM;
 
-       mq->mqrq_cur = mqrq_cur;
-       mq->mqrq_prev = mqrq_prev;
+       mq->qdepth = 2;
+       mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
+                          GFP_KERNEL);
+       if (!mq->mqrq)
+               goto blk_cleanup;
+       mq->mqrq_cur = &mq->mqrq[0];
+       mq->mqrq_prev = &mq->mqrq[1];
        mq->queue->queuedata = mq;
 
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -228,63 +315,29 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                if (bouncesz > (host->max_blk_count * 512))
                        bouncesz = host->max_blk_count * 512;
 
-               if (bouncesz > 512) {
-                       mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
-                       if (!mqrq_cur->bounce_buf) {
-                               pr_warn("%s: unable to allocate bounce cur buffer\n",
-                                       mmc_card_name(card));
-                       } else {
-                               mqrq_prev->bounce_buf =
-                                               kmalloc(bouncesz, GFP_KERNEL);
-                               if (!mqrq_prev->bounce_buf) {
-                                       pr_warn("%s: unable to allocate bounce prev buffer\n",
-                                               mmc_card_name(card));
-                                       kfree(mqrq_cur->bounce_buf);
-                                       mqrq_cur->bounce_buf = NULL;
-                               }
-                       }
-               }
-
-               if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
+               if (bouncesz > 512 &&
+                   mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
                        blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
                        blk_queue_max_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_segment_size(mq->queue, bouncesz);
 
-                       mqrq_cur->sg = mmc_alloc_sg(1, &ret);
-                       if (ret)
-                               goto cleanup_queue;
-
-                       mqrq_cur->bounce_sg =
-                               mmc_alloc_sg(bouncesz / 512, &ret);
-                       if (ret)
-                               goto cleanup_queue;
-
-                       mqrq_prev->sg = mmc_alloc_sg(1, &ret);
-                       if (ret)
-                               goto cleanup_queue;
-
-                       mqrq_prev->bounce_sg =
-                               mmc_alloc_sg(bouncesz / 512, &ret);
+                       ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
                        if (ret)
                                goto cleanup_queue;
+                       bounce = true;
                }
        }
 #endif
 
-       if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+       if (!bounce) {
                blk_queue_bounce_limit(mq->queue, limit);
                blk_queue_max_hw_sectors(mq->queue,
                        min(host->max_blk_count, host->max_req_size / 512));
                blk_queue_max_segments(mq->queue, host->max_segs);
                blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
-               mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
-               if (ret)
-                       goto cleanup_queue;
-
-
-               mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+               ret = mmc_queue_alloc_sgs(mq, host->max_segs);
                if (ret)
                        goto cleanup_queue;
        }
@@ -296,27 +349,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 
        if (IS_ERR(mq->thread)) {
                ret = PTR_ERR(mq->thread);
-               goto free_bounce_sg;
+               goto cleanup_queue;
        }
 
        return 0;
- free_bounce_sg:
-       kfree(mqrq_cur->bounce_sg);
-       mqrq_cur->bounce_sg = NULL;
-       kfree(mqrq_prev->bounce_sg);
-       mqrq_prev->bounce_sg = NULL;
 
  cleanup_queue:
-       kfree(mqrq_cur->sg);
-       mqrq_cur->sg = NULL;
-       kfree(mqrq_cur->bounce_buf);
-       mqrq_cur->bounce_buf = NULL;
-
-       kfree(mqrq_prev->sg);
-       mqrq_prev->sg = NULL;
-       kfree(mqrq_prev->bounce_buf);
-       mqrq_prev->bounce_buf = NULL;
-
+       mmc_queue_reqs_free_bufs(mq);
+       kfree(mq->mqrq);
+       mq->mqrq = NULL;
+blk_cleanup:
        blk_cleanup_queue(mq->queue);
        return ret;
 }
@@ -325,8 +367,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
 {
        struct request_queue *q = mq->queue;
        unsigned long flags;
-       struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
-       struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
 
        /* Make sure the queue isn't suspended, as that will deadlock */
        mmc_queue_resume(mq);
@@ -340,71 +380,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
        blk_start_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       kfree(mqrq_cur->bounce_sg);
-       mqrq_cur->bounce_sg = NULL;
-
-       kfree(mqrq_cur->sg);
-       mqrq_cur->sg = NULL;
-
-       kfree(mqrq_cur->bounce_buf);
-       mqrq_cur->bounce_buf = NULL;
-
-       kfree(mqrq_prev->bounce_sg);
-       mqrq_prev->bounce_sg = NULL;
-
-       kfree(mqrq_prev->sg);
-       mqrq_prev->sg = NULL;
-
-       kfree(mqrq_prev->bounce_buf);
-       mqrq_prev->bounce_buf = NULL;
+       mmc_queue_reqs_free_bufs(mq);
+       kfree(mq->mqrq);
+       mq->mqrq = NULL;
 
        mq->card = NULL;
 }
 EXPORT_SYMBOL(mmc_cleanup_queue);
 
-int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
-{
-       struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
-       struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
-       int ret = 0;
-
-
-       mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
-       if (!mqrq_cur->packed) {
-               pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
-                       mmc_card_name(card));
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
-       if (!mqrq_prev->packed) {
-               pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
-                       mmc_card_name(card));
-               kfree(mqrq_cur->packed);
-               mqrq_cur->packed = NULL;
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       INIT_LIST_HEAD(&mqrq_cur->packed->list);
-       INIT_LIST_HEAD(&mqrq_prev->packed->list);
-
-out:
-       return ret;
-}
-
-void mmc_packed_clean(struct mmc_queue *mq)
-{
-       struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
-       struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
-
-       kfree(mqrq_cur->packed);
-       mqrq_cur->packed = NULL;
-       kfree(mqrq_prev->packed);
-       mqrq_prev->packed = NULL;
-}
-
 /**
  * mmc_queue_suspend - suspend a MMC request queue
  * @mq: MMC queue to suspend
@@ -449,41 +432,6 @@ void mmc_queue_resume(struct mmc_queue *mq)
        }
 }
 
-static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
-                                           struct mmc_packed *packed,
-                                           struct scatterlist *sg,
-                                           enum mmc_packed_type cmd_type)
-{
-       struct scatterlist *__sg = sg;
-       unsigned int sg_len = 0;
-       struct request *req;
-
-       if (mmc_packed_wr(cmd_type)) {
-               unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
-               unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
-               unsigned int len, remain, offset = 0;
-               u8 *buf = (u8 *)packed->cmd_hdr;
-
-               remain = hdr_sz;
-               do {
-                       len = min(remain, max_seg_sz);
-                       sg_set_buf(__sg, buf + offset, len);
-                       offset += len;
-                       remain -= len;
-                       sg_unmark_end(__sg++);
-                       sg_len++;
-               } while (remain);
-       }
-
-       list_for_each_entry(req, &packed->list, queuelist) {
-               sg_len += blk_rq_map_sg(mq->queue, req, __sg);
-               __sg = sg + (sg_len - 1);
-               sg_unmark_end(__sg++);
-       }
-       sg_mark_end(sg + (sg_len - 1));
-       return sg_len;
-}
-
 /*
  * Prepare the sg list(s) to be handed of to the host driver
  */
@@ -492,26 +440,12 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
        unsigned int sg_len;
        size_t buflen;
        struct scatterlist *sg;
-       enum mmc_packed_type cmd_type;
        int i;
 
-       cmd_type = mqrq->cmd_type;
-
-       if (!mqrq->bounce_buf) {
-               if (mmc_packed_cmd(cmd_type))
-                       return mmc_queue_packed_map_sg(mq, mqrq->packed,
-                                                      mqrq->sg, cmd_type);
-               else
-                       return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
-       }
-
-       BUG_ON(!mqrq->bounce_sg);
+       if (!mqrq->bounce_buf)
+               return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
 
-       if (mmc_packed_cmd(cmd_type))
-               sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
-                                                mqrq->bounce_sg, cmd_type);
-       else
-               sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+       sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
 
        mqrq->bounce_sg_len = sg_len;
 
index 342f1e3f301e9e6e7d0cc918f3f38aae32a88480..dac8c3d010dd9615a576256ec45b2dd7cef5a6f2 100644 (file)
@@ -11,6 +11,7 @@ static inline bool mmc_req_is_special(struct request *req)
 
 struct request;
 struct task_struct;
+struct mmc_blk_data;
 
 struct mmc_blk_request {
        struct mmc_request      mrq;
@@ -21,23 +22,6 @@ struct mmc_blk_request {
        int                     retune_retry_done;
 };
 
-enum mmc_packed_type {
-       MMC_PACKED_NONE = 0,
-       MMC_PACKED_WRITE,
-};
-
-#define mmc_packed_cmd(type)   ((type) != MMC_PACKED_NONE)
-#define mmc_packed_wr(type)    ((type) == MMC_PACKED_WRITE)
-
-struct mmc_packed {
-       struct list_head        list;
-       __le32                  cmd_hdr[1024];
-       unsigned int            blocks;
-       u8                      nr_entries;
-       u8                      retries;
-       s16                     idx_failure;
-};
-
 struct mmc_queue_req {
        struct request          *req;
        struct mmc_blk_request  brq;
@@ -46,8 +30,6 @@ struct mmc_queue_req {
        struct scatterlist      *bounce_sg;
        unsigned int            bounce_sg_len;
        struct mmc_async_req    mmc_active;
-       enum mmc_packed_type    cmd_type;
-       struct mmc_packed       *packed;
 };
 
 struct mmc_queue {
@@ -57,11 +39,13 @@ struct mmc_queue {
        unsigned int            flags;
 #define MMC_QUEUE_SUSPENDED    (1 << 0)
 #define MMC_QUEUE_NEW_REQUEST  (1 << 1)
-       void                    *data;
+       bool                    asleep;
+       struct mmc_blk_data     *blkdata;
        struct request_queue    *queue;
-       struct mmc_queue_req    mqrq[2];
+       struct mmc_queue_req    *mqrq;
        struct mmc_queue_req    *mqrq_cur;
        struct mmc_queue_req    *mqrq_prev;
+       int                     qdepth;
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -75,9 +59,6 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
 extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
 extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 
-extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
-extern void mmc_packed_clean(struct mmc_queue *);
-
 extern int mmc_access_rpmb(struct mmc_queue *);
 
 #endif
index 5af6fb9a9ce2e56cf78ce3f3629aee6a63f0f6a1..491c187744f58ab4b808dd8b780af3b3526834aa 100644 (file)
@@ -135,8 +135,6 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)
 {
        struct sdio_func *func;
 
-       BUG_ON(sdio_uart_table[port->index] != port);
-
        spin_lock(&sdio_uart_table_lock);
        sdio_uart_table[port->index] = NULL;
        spin_unlock(&sdio_uart_table_lock);
index 2553d903a82b9eaa8577c1269bc9f63e18b00085..543eadd230e55c8918c45c3ef3b6428cbb73ed97 100644 (file)
@@ -306,16 +306,16 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                mrq->sbc->mrq = mrq;
        }
        if (mrq->data) {
-               BUG_ON(mrq->data->blksz > host->max_blk_size);
-               BUG_ON(mrq->data->blocks > host->max_blk_count);
-               BUG_ON(mrq->data->blocks * mrq->data->blksz >
-                       host->max_req_size);
-
+               if (mrq->data->blksz > host->max_blk_size ||
+                   mrq->data->blocks > host->max_blk_count ||
+                   mrq->data->blocks * mrq->data->blksz > host->max_req_size)
+                       return -EINVAL;
 #ifdef CONFIG_MMC_DEBUG
                sz = 0;
                for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
                        sz += sg->length;
-               BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
+               if (sz != mrq->data->blocks * mrq->data->blksz)
+                       return -EINVAL;
 #endif
 
                mrq->cmd->data = mrq->data;
@@ -349,8 +349,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
        int timeout;
        bool use_busy_signal;
 
-       BUG_ON(!card);
-
        if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
                return;
 
@@ -380,7 +378,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
        mmc_retune_hold(card->host);
 
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                       EXT_CSD_BKOPS_START, 1, timeout,
+                       EXT_CSD_BKOPS_START, 1, timeout, 0,
                        use_busy_signal, true, false);
        if (err) {
                pr_warn("%s: Error %d starting bkops\n",
@@ -497,32 +495,28 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
  *
  * Returns enum mmc_blk_status after checking errors.
  */
-static int mmc_wait_for_data_req_done(struct mmc_host *host,
+static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
                                      struct mmc_request *mrq,
                                      struct mmc_async_req *next_req)
 {
        struct mmc_command *cmd;
        struct mmc_context_info *context_info = &host->context_info;
-       int err;
-       unsigned long flags;
+       enum mmc_blk_status status;
 
        while (1) {
                wait_event_interruptible(context_info->wait,
                                (context_info->is_done_rcv ||
                                 context_info->is_new_req));
-               spin_lock_irqsave(&context_info->lock, flags);
                context_info->is_waiting_last_req = false;
-               spin_unlock_irqrestore(&context_info->lock, flags);
                if (context_info->is_done_rcv) {
                        context_info->is_done_rcv = false;
-                       context_info->is_new_req = false;
                        cmd = mrq->cmd;
 
                        if (!cmd->error || !cmd->retries ||
                            mmc_card_removed(host->card)) {
-                               err = host->areq->err_check(host->card,
-                                                           host->areq);
-                               break; /* return err */
+                               status = host->areq->err_check(host->card,
+                                                              host->areq);
+                               break; /* return status */
                        } else {
                                mmc_retune_recheck(host);
                                pr_info("%s: req failed (CMD%u): %d, retrying...\n",
@@ -534,13 +528,12 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
                                continue; /* wait for done/new event again */
                        }
                } else if (context_info->is_new_req) {
-                       context_info->is_new_req = false;
                        if (!next_req)
                                return MMC_BLK_NEW_REQUEST;
                }
        }
        mmc_retune_release(host);
-       return err;
+       return status;
 }
 
 void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
@@ -611,18 +604,15 @@ EXPORT_SYMBOL(mmc_is_req_done);
  *     mmc_pre_req - Prepare for a new request
  *     @host: MMC host to prepare command
  *     @mrq: MMC request to prepare for
- *     @is_first_req: true if there is no previous started request
- *                     that may run in parellel to this call, otherwise false
  *
  *     mmc_pre_req() is called in prior to mmc_start_req() to let
  *     host prepare for the new request. Preparation of a request may be
  *     performed while another request is running on the host.
  */
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
-                bool is_first_req)
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
 {
        if (host->ops->pre_req)
-               host->ops->pre_req(host, mrq, is_first_req);
+               host->ops->pre_req(host, mrq);
 }
 
 /**
@@ -658,21 +648,22 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
  *     is returned without waiting. NULL is not an error condition.
  */
 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
-                                   struct mmc_async_req *areq, int *error)
+                                   struct mmc_async_req *areq,
+                                   enum mmc_blk_status *ret_stat)
 {
-       int err = 0;
+       enum mmc_blk_status status = MMC_BLK_SUCCESS;
        int start_err = 0;
        struct mmc_async_req *data = host->areq;
 
        /* Prepare a new request */
        if (areq)
-               mmc_pre_req(host, areq->mrq, !host->areq);
+               mmc_pre_req(host, areq->mrq);
 
        if (host->areq) {
-               err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
-               if (err == MMC_BLK_NEW_REQUEST) {
-                       if (error)
-                               *error = err;
+               status = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+               if (status == MMC_BLK_NEW_REQUEST) {
+                       if (ret_stat)
+                               *ret_stat = status;
                        /*
                         * The previous request was not completed,
                         * nothing to return
@@ -695,27 +686,27 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
 
                        /* prepare the request again */
                        if (areq)
-                               mmc_pre_req(host, areq->mrq, !host->areq);
+                               mmc_pre_req(host, areq->mrq);
                }
        }
 
-       if (!err && areq)
+       if (status == MMC_BLK_SUCCESS && areq)
                start_err = __mmc_start_data_req(host, areq->mrq);
 
        if (host->areq)
                mmc_post_req(host, host->areq->mrq, 0);
 
         /* Cancel a prepared request if it was not started. */
-       if ((err || start_err) && areq)
+       if ((status != MMC_BLK_SUCCESS || start_err) && areq)
                mmc_post_req(host, areq->mrq, -EINVAL);
 
-       if (err)
+       if (status != MMC_BLK_SUCCESS)
                host->areq = NULL;
        else
                host->areq = areq;
 
-       if (error)
-               *error = err;
+       if (ret_stat)
+               *ret_stat = status;
        return data;
 }
 EXPORT_SYMBOL(mmc_start_req);
@@ -754,8 +745,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
        u32 status;
        unsigned long prg_wait;
 
-       BUG_ON(!card);
-
        if (!card->ext_csd.hpi_en) {
                pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
                return 1;
@@ -850,7 +839,6 @@ int mmc_stop_bkops(struct mmc_card *card)
 {
        int err = 0;
 
-       BUG_ON(!card);
        err = mmc_interrupt_hpi(card);
 
        /*
@@ -1666,8 +1654,6 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
        int err = 0;
        u32 clock;
 
-       BUG_ON(!host);
-
        /*
         * Send CMD11 only if the request is to switch the card to
         * 1.8V signalling.
@@ -1884,9 +1870,7 @@ void mmc_power_cycle(struct mmc_host *host, u32 ocr)
  */
 static void __mmc_release_bus(struct mmc_host *host)
 {
-       BUG_ON(!host);
-       BUG_ON(host->bus_refs);
-       BUG_ON(!host->bus_dead);
+       WARN_ON(!host->bus_dead);
 
        host->bus_ops = NULL;
 }
@@ -1926,15 +1910,12 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
 {
        unsigned long flags;
 
-       BUG_ON(!host);
-       BUG_ON(!ops);
-
        WARN_ON(!host->claimed);
 
        spin_lock_irqsave(&host->lock, flags);
 
-       BUG_ON(host->bus_ops);
-       BUG_ON(host->bus_refs);
+       WARN_ON(host->bus_ops);
+       WARN_ON(host->bus_refs);
 
        host->bus_ops = ops;
        host->bus_refs = 1;
@@ -1950,8 +1931,6 @@ void mmc_detach_bus(struct mmc_host *host)
 {
        unsigned long flags;
 
-       BUG_ON(!host);
-
        WARN_ON(!host->claimed);
        WARN_ON(!host->bus_ops);
 
@@ -2824,12 +2803,11 @@ void mmc_start_host(struct mmc_host *host)
        host->rescan_disable = 0;
        host->ios.power_mode = MMC_POWER_UNDEFINED;
 
-       mmc_claim_host(host);
-       if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
-               mmc_power_off(host);
-       else
+       if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
+               mmc_claim_host(host);
                mmc_power_up(host, host->ocr_avail);
-       mmc_release_host(host);
+               mmc_release_host(host);
+       }
 
        mmc_gpiod_request_cd_irq(host);
        _mmc_detect_change(host, 0, false);
@@ -2865,8 +2843,6 @@ void mmc_stop_host(struct mmc_host *host)
        }
        mmc_bus_put(host);
 
-       BUG_ON(host->card);
-
        mmc_claim_host(host);
        mmc_power_off(host);
        mmc_release_host(host);
@@ -3019,7 +2995,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
  */
 void mmc_init_context_info(struct mmc_host *host)
 {
-       spin_lock_init(&host->context_info.lock);
        host->context_info.is_new_req = false;
        host->context_info.is_done_rcv = false;
        host->context_info.is_waiting_last_req = false;
index c8451ce557ae69da2d01aed9a06c9ca6efbc556f..30623b8b86a42589ed462b5625fd99d546a43b7d 100644 (file)
@@ -321,7 +321,11 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
        for (i = 0; i < 512; i++)
                n += sprintf(buf + n, "%02x", ext_csd[i]);
        n += sprintf(buf + n, "\n");
-       BUG_ON(n != EXT_CSD_STR_LEN);
+
+       if (n != EXT_CSD_STR_LEN) {
+               err = -EINVAL;
+               goto out_free;
+       }
 
        filp->private_data = buf;
        kfree(ext_csd);
index df19777068a6237f388bb4bb9c1a8ee6917ea64b..b61b52f9da3d88bc1903e14ffc56dd276700c814 100644 (file)
@@ -618,6 +618,24 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
                        !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
        }
+
+       /* eMMC v5.1 or later */
+       if (card->ext_csd.rev >= 8) {
+               card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
+                                            EXT_CSD_CMDQ_SUPPORTED;
+               card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
+                                           EXT_CSD_CMDQ_DEPTH_MASK) + 1;
+               /* Exclude inefficiently small queue depths */
+               if (card->ext_csd.cmdq_depth <= 2) {
+                       card->ext_csd.cmdq_support = false;
+                       card->ext_csd.cmdq_depth = 0;
+               }
+               if (card->ext_csd.cmdq_support) {
+                       pr_debug("%s: Command Queue supported depth %u\n",
+                                mmc_hostname(card->host),
+                                card->ext_csd.cmdq_depth);
+               }
+       }
 out:
        return err;
 }
@@ -1003,19 +1021,6 @@ static int mmc_select_bus_width(struct mmc_card *card)
        return err;
 }
 
-/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
-{
-       u32 status;
-       int err;
-
-       err = mmc_send_status(card, &status);
-       if (err)
-               return err;
-
-       return mmc_switch_status_error(card->host, status);
-}
-
 /*
  * Switch to the high-speed mode
  */
@@ -1025,13 +1030,8 @@ static int mmc_select_hs(struct mmc_card *card)
 
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
-                          card->ext_csd.generic_cmd6_time,
-                          true, false, true);
-       if (!err) {
-               mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
-               err = mmc_switch_status(card);
-       }
-
+                          card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
+                          true, true, true);
        if (err)
                pr_warn("%s: switch to high-speed failed, err:%d\n",
                        mmc_hostname(card->host), err);
@@ -1058,10 +1058,12 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
        ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
                EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
 
-       err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                       EXT_CSD_BUS_WIDTH,
-                       ext_csd_bits,
-                       card->ext_csd.generic_cmd6_time);
+       err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+                          EXT_CSD_BUS_WIDTH,
+                          ext_csd_bits,
+                          card->ext_csd.generic_cmd6_time,
+                          MMC_TIMING_MMC_DDR52,
+                          true, true, true);
        if (err) {
                pr_err("%s: switch to bus width %d ddr failed\n",
                        mmc_hostname(host), 1 << bus_width);
@@ -1104,9 +1106,6 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
        if (err)
                err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
 
-       if (!err)
-               mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
-
        return err;
 }
 
@@ -1128,7 +1127,7 @@ static int mmc_select_hs400(struct mmc_card *card)
        val = EXT_CSD_TIMING_HS;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, val,
-                          card->ext_csd.generic_cmd6_time,
+                          card->ext_csd.generic_cmd6_time, 0,
                           true, false, true);
        if (err) {
                pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
@@ -1163,7 +1162,7 @@ static int mmc_select_hs400(struct mmc_card *card)
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, val,
-                          card->ext_csd.generic_cmd6_time,
+                          card->ext_csd.generic_cmd6_time, 0,
                           true, false, true);
        if (err) {
                pr_err("%s: switch to hs400 failed, err:%d\n",
@@ -1206,7 +1205,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
        /* Switch HS400 to HS DDR */
        val = EXT_CSD_TIMING_HS;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
-                          val, card->ext_csd.generic_cmd6_time,
+                          val, card->ext_csd.generic_cmd6_time, 0,
                           true, false, true);
        if (err)
                goto out_err;
@@ -1220,7 +1219,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
        /* Switch HS DDR to HS */
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
                           EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
-                          true, false, true);
+                          0, true, false, true);
        if (err)
                goto out_err;
 
@@ -1234,14 +1233,19 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
        val = EXT_CSD_TIMING_HS200 |
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
-                          val, card->ext_csd.generic_cmd6_time,
+                          val, card->ext_csd.generic_cmd6_time, 0,
                           true, false, true);
        if (err)
                goto out_err;
 
        mmc_set_timing(host, MMC_TIMING_MMC_HS200);
 
-       err = mmc_switch_status(card);
+       /*
+        * For HS200, CRC errors are not a reliable way to know the switch
+        * failed. If there really is a problem, we would expect tuning will
+        * fail and the result ends up the same.
+        */
+       err = __mmc_switch_status(card, false);
        if (err)
                goto out_err;
 
@@ -1281,16 +1285,23 @@ static int mmc_select_hs400es(struct mmc_card *card)
                goto out_err;
 
        /* Switch card to HS mode */
-       err = mmc_select_hs(card);
-       if (err)
+       err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+                          EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+                          card->ext_csd.generic_cmd6_time, 0,
+                          true, false, true);
+       if (err) {
+               pr_err("%s: switch to hs for hs400es failed, err:%d\n",
+                       mmc_hostname(host), err);
                goto out_err;
+       }
 
-       mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
+       mmc_set_timing(host, MMC_TIMING_MMC_HS);
        err = mmc_switch_status(card);
        if (err)
                goto out_err;
 
+       mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
        /* Switch card to DDR with strobe bit */
        val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1308,7 +1319,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
              card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, val,
-                          card->ext_csd.generic_cmd6_time,
+                          card->ext_csd.generic_cmd6_time, 0,
                           true, false, true);
        if (err) {
                pr_err("%s: switch to hs400es failed, err:%d\n",
@@ -1390,14 +1401,20 @@ static int mmc_select_hs200(struct mmc_card *card)
                      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
                err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                   EXT_CSD_HS_TIMING, val,
-                                  card->ext_csd.generic_cmd6_time,
+                                  card->ext_csd.generic_cmd6_time, 0,
                                   true, false, true);
                if (err)
                        goto err;
                old_timing = host->ios.timing;
                mmc_set_timing(host, MMC_TIMING_MMC_HS200);
 
-               err = mmc_switch_status(card);
+               /*
+                * For HS200, CRC errors are not a reliable way to know the
+                * switch failed. If there really is a problem, we would expect
+                * tuning will fail and the result ends up the same.
+                */
+               err = __mmc_switch_status(card, false);
+
                /*
                 * mmc_select_timing() assumes timing has not changed if
                 * it is a switch error.
@@ -1480,7 +1497,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
        u32 cid[4];
        u32 rocr;
 
-       BUG_ON(!host);
        WARN_ON(!host->claimed);
 
        /* Set correct bus mode for MMC before attempting init */
@@ -1854,7 +1870,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
 
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                        EXT_CSD_POWER_OFF_NOTIFICATION,
-                       notify_type, timeout, true, false, false);
+                       notify_type, timeout, 0, true, false, false);
        if (err)
                pr_err("%s: Power Off Notification timed out, %u\n",
                       mmc_hostname(card->host), timeout);
@@ -1870,9 +1886,6 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
  */
 static void mmc_remove(struct mmc_host *host)
 {
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_remove_card(host->card);
        host->card = NULL;
 }
@@ -1892,9 +1905,6 @@ static void mmc_detect(struct mmc_host *host)
 {
        int err;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_get_card(host->card);
 
        /*
@@ -1920,9 +1930,6 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
        unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
                                        EXT_CSD_POWER_OFF_LONG;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_claim_host(host);
 
        if (mmc_card_suspended(host->card))
@@ -1979,9 +1986,6 @@ static int _mmc_resume(struct mmc_host *host)
 {
        int err = 0;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_claim_host(host);
 
        if (!mmc_card_suspended(host->card))
@@ -2114,7 +2118,6 @@ int mmc_attach_mmc(struct mmc_host *host)
        int err;
        u32 ocr, rocr;
 
-       BUG_ON(!host);
        WARN_ON(!host->claimed);
 
        /* Set correct bus mode for MMC before attempting attach */
index ad6e9798e9491f32ef6b7b9957c9853d2a251a0f..b11c3455b040c5b03e8083e9b8e4f234b0752537 100644 (file)
@@ -54,21 +54,15 @@ static const u8 tuning_blk_pattern_8bit[] = {
        0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 };
 
-static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
-                                   bool ignore_crc)
+int mmc_send_status(struct mmc_card *card, u32 *status)
 {
        int err;
        struct mmc_command cmd = {0};
 
-       BUG_ON(!card);
-       BUG_ON(!card->host);
-
        cmd.opcode = MMC_SEND_STATUS;
        if (!mmc_host_is_spi(card->host))
                cmd.arg = card->rca << 16;
        cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-       if (ignore_crc)
-               cmd.flags &= ~MMC_RSP_CRC;
 
        err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
        if (err)
@@ -83,17 +77,10 @@ static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
        return 0;
 }
 
-int mmc_send_status(struct mmc_card *card, u32 *status)
-{
-       return __mmc_send_status(card, status, false);
-}
-
 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 {
        struct mmc_command cmd = {0};
 
-       BUG_ON(!host);
-
        cmd.opcode = MMC_SELECT_CARD;
 
        if (card) {
@@ -109,7 +96,6 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 
 int mmc_select_card(struct mmc_card *card)
 {
-       BUG_ON(!card);
 
        return _mmc_select_card(card->host, card);
 }
@@ -181,8 +167,6 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
        struct mmc_command cmd = {0};
        int i, err = 0;
 
-       BUG_ON(!host);
-
        cmd.opcode = MMC_SEND_OP_COND;
        cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
        cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -221,9 +205,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
        int err;
        struct mmc_command cmd = {0};
 
-       BUG_ON(!host);
-       BUG_ON(!cid);
-
        cmd.opcode = MMC_ALL_SEND_CID;
        cmd.arg = 0;
        cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
@@ -241,9 +222,6 @@ int mmc_set_relative_addr(struct mmc_card *card)
 {
        struct mmc_command cmd = {0};
 
-       BUG_ON(!card);
-       BUG_ON(!card->host);
-
        cmd.opcode = MMC_SET_RELATIVE_ADDR;
        cmd.arg = card->rca << 16;
        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -257,9 +235,6 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
        int err;
        struct mmc_command cmd = {0};
 
-       BUG_ON(!host);
-       BUG_ON(!cxd);
-
        cmd.opcode = opcode;
        cmd.arg = arg;
        cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
@@ -440,7 +415,7 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
        return err;
 }
 
-int mmc_switch_status_error(struct mmc_host *host, u32 status)
+static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 {
        if (mmc_host_is_spi(host)) {
                if (status & R1_SPI_ILLEGAL_COMMAND)
@@ -455,6 +430,88 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
        return 0;
 }
 
+/* Caller must hold re-tuning */
+int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
+{
+       u32 status;
+       int err;
+
+       err = mmc_send_status(card, &status);
+       if (!crc_err_fatal && err == -EILSEQ)
+               return 0;
+       if (err)
+               return err;
+
+       return mmc_switch_status_error(card->host, status);
+}
+
+int mmc_switch_status(struct mmc_card *card)
+{
+       return __mmc_switch_status(card, true);
+}
+
+static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+                       bool send_status, bool retry_crc_err)
+{
+       struct mmc_host *host = card->host;
+       int err;
+       unsigned long timeout;
+       u32 status = 0;
+       bool expired = false;
+       bool busy = false;
+
+       /* We have an unspecified cmd timeout, use the fallback value. */
+       if (!timeout_ms)
+               timeout_ms = MMC_OPS_TIMEOUT_MS;
+
+       /*
+        * In cases when not allowed to poll by using CMD13 or because we aren't
+        * capable of polling by using ->card_busy(), then rely on waiting the
+        * stated timeout to be sufficient.
+        */
+       if (!send_status && !host->ops->card_busy) {
+               mmc_delay(timeout_ms);
+               return 0;
+       }
+
+       timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+       do {
+               /*
+                * Due to the possibility of being preempted while polling,
+                * check the expiration time first.
+                */
+               expired = time_after(jiffies, timeout);
+
+               if (host->ops->card_busy) {
+                       busy = host->ops->card_busy(host);
+               } else {
+                       err = mmc_send_status(card, &status);
+                       if (retry_crc_err && err == -EILSEQ) {
+                               busy = true;
+                       } else if (err) {
+                               return err;
+                       } else {
+                               err = mmc_switch_status_error(host, status);
+                               if (err)
+                                       return err;
+                               busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
+                       }
+               }
+
+               /* Timeout if the device still remains busy. */
+               if (expired && busy) {
+                       pr_err("%s: Card stuck being busy! %s\n",
+                               mmc_hostname(host), __func__);
+                       return -ETIMEDOUT;
+               }
+       } while (busy);
+
+       if (host->ops->card_busy && send_status)
+               return mmc_switch_status(card);
+
+       return 0;
+}
+
 /**
  *     __mmc_switch - modify EXT_CSD register
  *     @card: the MMC card associated with the data transfer
@@ -463,24 +520,22 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
  *     @value: value to program into EXT_CSD register
  *     @timeout_ms: timeout (ms) for operation performed by register write,
  *                   timeout of zero implies maximum possible timeout
+ *     @timing: new timing to change to
  *     @use_busy_signal: use the busy signal as response type
  *     @send_status: send status cmd to poll for busy
- *     @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
+ *     @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  *
  *     Modifies the EXT_CSD register for selected card.
  */
 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
-               unsigned int timeout_ms, bool use_busy_signal, bool send_status,
-               bool ignore_crc)
+               unsigned int timeout_ms, unsigned char timing,
+               bool use_busy_signal, bool send_status, bool retry_crc_err)
 {
        struct mmc_host *host = card->host;
        int err;
        struct mmc_command cmd = {0};
-       unsigned long timeout;
-       u32 status = 0;
        bool use_r1b_resp = use_busy_signal;
-       bool expired = false;
-       bool busy = false;
+       unsigned char old_timing = host->ios.timing;
 
        mmc_retune_hold(host);
 
@@ -522,62 +577,24 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
        if (!use_busy_signal)
                goto out;
 
-       /*
-        * CRC errors shall only be ignored in cases were CMD13 is used to poll
-        * to detect busy completion.
-        */
-       if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
-               ignore_crc = false;
-
-       /* We have an unspecified cmd timeout, use the fallback value. */
-       if (!timeout_ms)
-               timeout_ms = MMC_OPS_TIMEOUT_MS;
-
-       /* Must check status to be sure of no errors. */
-       timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
-       do {
-               /*
-                * Due to the possibility of being preempted after
-                * sending the status command, check the expiration
-                * time first.
-                */
-               expired = time_after(jiffies, timeout);
-               if (send_status) {
-                       err = __mmc_send_status(card, &status, ignore_crc);
-                       if (err)
-                               goto out;
-               }
-               if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
-                       break;
-               if (host->ops->card_busy) {
-                       if (!host->ops->card_busy(host))
-                               break;
-                       busy = true;
-               }
-               if (mmc_host_is_spi(host))
-                       break;
+       /* Switch to new timing before poll and check switch status. */
+       if (timing)
+               mmc_set_timing(host, timing);
 
-               /*
-                * We are not allowed to issue a status command and the host
-                * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
-                * rely on waiting for the stated timeout to be sufficient.
-                */
-               if (!send_status && !host->ops->card_busy) {
-                       mmc_delay(timeout_ms);
-                       goto out;
-               }
+       /*If SPI or used HW busy detection above, then we don't need to poll. */
+       if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
+               mmc_host_is_spi(host)) {
+               if (send_status)
+                       err = mmc_switch_status(card);
+               goto out_tim;
+       }
 
-               /* Timeout if the device never leaves the program state. */
-               if (expired &&
-                   (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
-                       pr_err("%s: Card stuck in programming state! %s\n",
-                               mmc_hostname(host), __func__);
-                       err = -ETIMEDOUT;
-                       goto out;
-               }
-       } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
+       /* Let's try to poll to find out when the command is completed. */
+       err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
 
-       err = mmc_switch_status_error(host, status);
+out_tim:
+       if (err && timing)
+               mmc_set_timing(host, old_timing);
 out:
        mmc_retune_release(host);
 
@@ -587,8 +604,8 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
                unsigned int timeout_ms)
 {
-       return __mmc_switch(card, set, index, value, timeout_ms, true, true,
-                               false);
+       return __mmc_switch(card, set, index, value, timeout_ms, 0,
+                       true, true, false);
 }
 EXPORT_SYMBOL_GPL(mmc_switch);
 
@@ -661,6 +678,31 @@ int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 }
 EXPORT_SYMBOL_GPL(mmc_send_tuning);
 
+int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+{
+       struct mmc_command cmd = {0};
+
+       /*
+        * eMMC specification specifies that CMD12 can be used to stop a tuning
+        * command, but SD specification does not, so do nothing unless it is
+        * eMMC.
+        */
+       if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
+               return 0;
+
+       cmd.opcode = MMC_STOP_TRANSMISSION;
+       cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+       /*
+        * For drivers that override R1 to R1b, set an arbitrary timeout based
+        * on the tuning timeout i.e. 150ms.
+        */
+       cmd.busy_timeout = 150;
+
+       return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+
 static int
 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
                  u8 len)
index f1b8e81aaa284d1310c4da481a915cf5560e7700..abd525ed74be37b4fb3d1a4bdfeb770f835061f4 100644 (file)
@@ -27,10 +27,11 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
 int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
 int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_switch_status_error(struct mmc_host *host, u32 status);
+int mmc_switch_status(struct mmc_card *card);
+int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
-               unsigned int timeout_ms, bool use_busy_signal, bool send_status,
-               bool ignore_crc);
+               unsigned int timeout_ms, unsigned char timing,
+               bool use_busy_signal, bool send_status, bool retry_crc_err);
 
 #endif
 
index 73c762a28dfed886bc6a2dd2e91cfe43343b361f..deb90c2ff6b423e63352283c8dfdbb935bfb4e27 100644 (file)
@@ -927,7 +927,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
        u32 cid[4];
        u32 rocr = 0;
 
-       BUG_ON(!host);
        WARN_ON(!host->claimed);
 
        err = mmc_sd_get_cid(host, ocr, cid, &rocr);
@@ -1043,9 +1042,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
  */
 static void mmc_sd_remove(struct mmc_host *host)
 {
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_remove_card(host->card);
        host->card = NULL;
 }
@@ -1065,9 +1061,6 @@ static void mmc_sd_detect(struct mmc_host *host)
 {
        int err;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_get_card(host->card);
 
        /*
@@ -1091,9 +1084,6 @@ static int _mmc_sd_suspend(struct mmc_host *host)
 {
        int err = 0;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_claim_host(host);
 
        if (mmc_card_suspended(host->card))
@@ -1136,9 +1126,6 @@ static int _mmc_sd_resume(struct mmc_host *host)
 {
        int err = 0;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_claim_host(host);
 
        if (!mmc_card_suspended(host->card))
@@ -1221,7 +1208,6 @@ int mmc_attach_sd(struct mmc_host *host)
        int err;
        u32 ocr, rocr;
 
-       BUG_ON(!host);
        WARN_ON(!host->claimed);
 
        err = mmc_send_app_op_cond(host, 0, &ocr);
index 16b774c18e75fbc9b37c982bff30e38c18944304..de125a41aa7ae31960fab8f5c8d8b5651bccdc31 100644 (file)
@@ -27,8 +27,8 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
        int err;
        struct mmc_command cmd = {0};
 
-       BUG_ON(!host);
-       BUG_ON(card && (card->host != host));
+       if (WARN_ON(card && card->host != host))
+               return -EINVAL;
 
        cmd.opcode = MMC_APP_CMD;
 
@@ -72,8 +72,8 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
 
        int i, err;
 
-       BUG_ON(!cmd);
-       BUG_ON(retries < 0);
+       if (retries < 0)
+               retries = MMC_CMD_RETRIES;
 
        err = -EIO;
 
@@ -122,9 +122,6 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
 {
        struct mmc_command cmd = {0};
 
-       BUG_ON(!card);
-       BUG_ON(!card->host);
-
        cmd.opcode = SD_APP_SET_BUS_WIDTH;
        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
@@ -147,8 +144,6 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
        struct mmc_command cmd = {0};
        int i, err = 0;
 
-       BUG_ON(!host);
-
        cmd.opcode = SD_APP_OP_COND;
        if (mmc_host_is_spi(host))
                cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -224,9 +219,6 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
        int err;
        struct mmc_command cmd = {0};
 
-       BUG_ON(!host);
-       BUG_ON(!rca);
-
        cmd.opcode = SD_SEND_RELATIVE_ADDR;
        cmd.arg = 0;
        cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
@@ -249,10 +241,6 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
        struct scatterlist sg;
        void *data_buf;
 
-       BUG_ON(!card);
-       BUG_ON(!card->host);
-       BUG_ON(!scr);
-
        /* NOTE: caller guarantees scr is heap-allocated */
 
        err = mmc_app_cmd(card->host, card);
@@ -307,9 +295,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
        struct mmc_data data = {0};
        struct scatterlist sg;
 
-       BUG_ON(!card);
-       BUG_ON(!card->host);
-
        /* NOTE: caller guarantees resp is heap-allocated */
 
        mode = !!mode;
@@ -352,10 +337,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
        struct mmc_data data = {0};
        struct scatterlist sg;
 
-       BUG_ON(!card);
-       BUG_ON(!card->host);
-       BUG_ON(!ssr);
-
        /* NOTE: caller guarantees ssr is heap-allocated */
 
        err = mmc_app_cmd(card->host, card);
index bd44ba8116d1584767e15ace328c72efb133f272..ecbc52981ba5c810463c5fb046bd3ab4b517e442 100644 (file)
@@ -63,7 +63,8 @@ static int sdio_init_func(struct mmc_card *card, unsigned int fn)
        int ret;
        struct sdio_func *func;
 
-       BUG_ON(fn > SDIO_MAX_FUNCS);
+       if (WARN_ON(fn > SDIO_MAX_FUNCS))
+               return -EINVAL;
 
        func = sdio_alloc_func(card);
        if (IS_ERR(func))
@@ -555,7 +556,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
        u32 rocr = 0;
        u32 ocr_card = ocr;
 
-       BUG_ON(!host);
        WARN_ON(!host->claimed);
 
        /* to query card if 1.8V signalling is supported */
@@ -791,9 +791,6 @@ static void mmc_sdio_remove(struct mmc_host *host)
 {
        int i;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        for (i = 0;i < host->card->sdio_funcs;i++) {
                if (host->card->sdio_func[i]) {
                        sdio_remove_func(host->card->sdio_func[i]);
@@ -820,9 +817,6 @@ static void mmc_sdio_detect(struct mmc_host *host)
 {
        int err;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        /* Make sure card is powered before detecting it */
        if (host->caps & MMC_CAP_POWER_OFF_CARD) {
                err = pm_runtime_get_sync(&host->card->dev);
@@ -916,9 +910,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
 {
        int err = 0;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        /* Basic card reinitialization. */
        mmc_claim_host(host);
 
@@ -970,9 +961,6 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
 {
        int ret;
 
-       BUG_ON(!host);
-       BUG_ON(!host->card);
-
        mmc_claim_host(host);
 
        /*
@@ -1063,7 +1051,6 @@ int mmc_attach_sdio(struct mmc_host *host)
        u32 ocr, rocr;
        struct mmc_card *card;
 
-       BUG_ON(!host);
        WARN_ON(!host->claimed);
 
        err = mmc_send_io_op_cond(host, 0, &ocr);
index dcb3dee59fa5f2eaa7d273f24b31c9a346567809..f8c372839d24491f607c8a919480e19112e26ff1 100644 (file)
@@ -262,7 +262,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
        else
                prev = &card->tuples;
 
-       BUG_ON(*prev);
+       if (*prev)
+               return -EINVAL;
 
        do {
                unsigned char tpl_code, tpl_link;
index 91bbbfb29f3f9dd1eeed0976f2757589c032ab99..f1faf9acc007d5b3bbf9423ce306203523acc5fa 100644 (file)
@@ -214,7 +214,9 @@ static int sdio_card_irq_put(struct mmc_card *card)
        struct mmc_host *host = card->host;
 
        WARN_ON(!host->claimed);
-       BUG_ON(host->sdio_irqs < 1);
+
+       if (host->sdio_irqs < 1)
+               return -EINVAL;
 
        if (!--host->sdio_irqs) {
                if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
@@ -261,8 +263,8 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
        int ret;
        unsigned char reg;
 
-       BUG_ON(!func);
-       BUG_ON(!func->card);
+       if (!func)
+               return -EINVAL;
 
        pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
 
@@ -304,8 +306,8 @@ int sdio_release_irq(struct sdio_func *func)
        int ret;
        unsigned char reg;
 
-       BUG_ON(!func);
-       BUG_ON(!func->card);
+       if (!func)
+               return -EINVAL;
 
        pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
 
index 27117ba4707312dad5f06bbf142ffe50c34efa22..babe591aea969204bb9500e6c30ff1e64bdafd81 100644 (file)
@@ -258,6 +258,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
 }
 EXPORT_SYMBOL(mmc_gpiod_request_cd);
 
+bool mmc_can_gpio_cd(struct mmc_host *host)
+{
+       struct mmc_gpio *ctx = host->slot.handler_priv;
+
+       return ctx->cd_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_cd);
+
 /**
  * mmc_gpiod_request_ro - request a gpio descriptor for write protection
  * @host: mmc host
index 5274f503a39ad9c034e23b634ff0ec6634cbbd78..2eb97014dc3f2073c3b471688ba31598d023bc9e 100644 (file)
@@ -135,7 +135,6 @@ config MMC_SDHCI_OF_AT91
        tristate "SDHCI OF support for the Atmel SDMMC controller"
        depends on MMC_SDHCI_PLTFM
        depends on OF
-       select MMC_SDHCI_IO_ACCESSORS
        help
          This selects the Atmel SDMMC driver
 
@@ -144,6 +143,7 @@ config MMC_SDHCI_OF_ESDHC
        depends on MMC_SDHCI_PLTFM
        depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
        select MMC_SDHCI_IO_ACCESSORS
+       select FSL_GUTS
        help
          This selects the Freescale eSDHC controller support.
 
@@ -165,6 +165,17 @@ config MMC_SDHCI_OF_HLWD
 
          If unsure, say N.
 
+config MMC_SDHCI_CADENCE
+       tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
+       depends on MMC_SDHCI_PLTFM
+       depends on OF
+       help
+         This selects the Cadence SD/SDIO/eMMC driver.
+
+         If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
+
 config MMC_SDHCI_CNS3XXX
        tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
        depends on ARCH_CNS3XXX
@@ -322,6 +333,16 @@ config MMC_SDHCI_IPROC
 
          If unsure, say N.
 
+config MMC_MESON_GX
+       tristate "Amlogic S905/GX* SD/MMC Host Controller support"
+       depends on ARCH_MESON && MMC
+       help
+         This selects support for the Amlogic SD/MMC Host Controller
+         found on the S905/GX* family of SoCs.  This controller is
+         MMC 5.1 compliant and supports SD, eMMC and SDIO interfaces.
+
+         If you have a controller with this interface, say Y here.
+
 config MMC_MOXART
        tristate "MOXART SD/MMC Host Controller support"
        depends on ARCH_MOXART && MMC
index e2bdaaf431841535b936600af56b8e38f52f9845..ccc9c4cba154c6ba1849151353526e555d63c8a6 100644 (file)
@@ -53,6 +53,7 @@ obj-$(CONFIG_MMC_JZ4740)      += jz4740_mmc.o
 obj-$(CONFIG_MMC_VUB300)       += vub300.o
 obj-$(CONFIG_MMC_USHC)         += ushc.o
 obj-$(CONFIG_MMC_WMT)          += wmt-sdmmc.o
+obj-$(CONFIG_MMC_MESON_GX)     += meson-gx-mmc.o
 obj-$(CONFIG_MMC_MOXART)       += moxart-mmc.o
 obj-$(CONFIG_MMC_SUNXI)                += sunxi-mmc.o
 obj-$(CONFIG_MMC_USDHI6ROL0)   += usdhi6rol0.o
@@ -62,6 +63,7 @@ obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
 obj-$(CONFIG_MMC_REALTEK_USB)  += rtsx_usb_sdmmc.o
 
 obj-$(CONFIG_MMC_SDHCI_PLTFM)          += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_CADENCE)                += sdhci-cadence.o
 obj-$(CONFIG_MMC_SDHCI_CNS3XXX)                += sdhci-cns3xxx.o
 obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX)      += sdhci-esdhc-imx.o
 obj-$(CONFIG_MMC_SDHCI_DOVE)           += sdhci-dove.o
index 8fa478c3b0db7f4f05cc122d135af20c6065da78..36b5af8eadb88eae79e74cad782259ba100b9910 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/mmc/mmc.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/mmc/slot-gpio.h>
 
 #include <linux/platform_data/mmc-davinci.h>
 
@@ -1029,9 +1030,10 @@ static int mmc_davinci_get_cd(struct mmc_host *mmc)
        struct platform_device *pdev = to_platform_device(mmc->parent);
        struct davinci_mmc_config *config = pdev->dev.platform_data;
 
-       if (!config || !config->get_cd)
-               return -ENOSYS;
-       return config->get_cd(pdev->id);
+       if (config && config->get_cd)
+               return config->get_cd(pdev->id);
+
+       return mmc_gpio_get_cd(mmc);
 }
 
 static int mmc_davinci_get_ro(struct mmc_host *mmc)
@@ -1039,9 +1041,10 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
        struct platform_device *pdev = to_platform_device(mmc->parent);
        struct davinci_mmc_config *config = pdev->dev.platform_data;
 
-       if (!config || !config->get_ro)
-               return -ENOSYS;
-       return config->get_ro(pdev->id);
+       if (config && config->get_ro)
+               return config->get_ro(pdev->id);
+
+       return mmc_gpio_get_ro(mmc);
 }
 
 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1159,49 +1162,53 @@ static const struct of_device_id davinci_mmc_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
 
-static struct davinci_mmc_config
-       *mmc_parse_pdata(struct platform_device *pdev)
+static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
 {
-       struct device_node *np;
+       struct platform_device *pdev = to_platform_device(mmc->parent);
        struct davinci_mmc_config *pdata = pdev->dev.platform_data;
-       const struct of_device_id *match =
-               of_match_device(davinci_mmc_dt_ids, &pdev->dev);
-       u32 data;
-
-       np = pdev->dev.of_node;
-       if (!np)
-               return pdata;
-
-       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata) {
-               dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
-               goto nodata;
-       }
+       struct mmc_davinci_host *host;
+       int ret;
 
-       if (match)
-               pdev->id_entry = match->data;
+       if (!pdata)
+               return -EINVAL;
 
-       if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
-               dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
+       host = mmc_priv(mmc);
+       if (!host)
+               return -EINVAL;
 
-       of_property_read_u32(np, "bus-width", &data);
-       switch (data) {
-       case 1:
-       case 4:
-       case 8:
-               pdata->wires = data;
-               break;
-       default:
-               pdata->wires = 1;
-               dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
-       }
-nodata:
-       return pdata;
+       if (pdata && pdata->nr_sg)
+               host->nr_sg = pdata->nr_sg - 1;
+
+       if (pdata && (pdata->wires == 4 || pdata->wires == 0))
+               mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+       if (pdata && (pdata->wires == 8))
+               mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
+
+       mmc->f_min = 312500;
+       mmc->f_max = 25000000;
+       if (pdata && pdata->max_freq)
+               mmc->f_max = pdata->max_freq;
+       if (pdata && pdata->caps)
+               mmc->caps |= pdata->caps;
+
+       /* Register a cd gpio, if there is not one, enable polling */
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       if (ret == -EPROBE_DEFER)
+               return ret;
+       else if (ret)
+               mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+       ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+       if (ret == -EPROBE_DEFER)
+               return ret;
+
+       return 0;
 }
 
 static int __init davinci_mmcsd_probe(struct platform_device *pdev)
 {
-       struct davinci_mmc_config *pdata = NULL;
+       const struct of_device_id *match;
        struct mmc_davinci_host *host = NULL;
        struct mmc_host *mmc = NULL;
        struct resource *r, *mem = NULL;
@@ -1209,12 +1216,6 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
        size_t mem_size;
        const struct platform_device_id *id_entry;
 
-       pdata = mmc_parse_pdata(pdev);
-       if (pdata == NULL) {
-               dev_err(&pdev->dev, "Couldn't get platform data\n");
-               return -ENOENT;
-       }
-
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r)
                return -ENODEV;
@@ -1253,14 +1254,28 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
 
        host->mmc_input_clk = clk_get_rate(host->clk);
 
-       init_mmcsd_host(host);
-
-       if (pdata->nr_sg)
-               host->nr_sg = pdata->nr_sg - 1;
+       match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
+       if (match) {
+               pdev->id_entry = match->data;
+               ret = mmc_of_parse(mmc);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "could not parse of data: %d\n", ret);
+                       goto parse_fail;
+               }
+       } else {
+               ret = mmc_davinci_parse_pdata(mmc);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "could not parse platform data: %d\n", ret);
+                       goto parse_fail;
+       }       }
 
        if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
                host->nr_sg = MAX_NR_SG;
 
+       init_mmcsd_host(host);
+
        host->use_dma = use_dma;
        host->mmc_irq = irq;
        host->sdio_irq = platform_get_irq(pdev, 1);
@@ -1273,27 +1288,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
                        host->use_dma = 0;
        }
 
-       /* REVISIT:  someday, support IRQ-driven card detection.  */
-       mmc->caps |= MMC_CAP_NEEDS_POLL;
        mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
 
-       if (pdata && (pdata->wires == 4 || pdata->wires == 0))
-               mmc->caps |= MMC_CAP_4_BIT_DATA;
-
-       if (pdata && (pdata->wires == 8))
-               mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
-
        id_entry = platform_get_device_id(pdev);
        if (id_entry)
                host->version = id_entry->driver_data;
 
        mmc->ops = &mmc_davinci_ops;
-       mmc->f_min = 312500;
-       mmc->f_max = 25000000;
-       if (pdata && pdata->max_freq)
-               mmc->f_max = pdata->max_freq;
-       if (pdata && pdata->caps)
-               mmc->caps |= pdata->caps;
        mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 
        /* With no iommu coalescing pages, each phys_seg is a hw_seg.
@@ -1354,6 +1355,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
        mmc_davinci_cpufreq_deregister(host);
 cpu_freq_fail:
        davinci_release_dma_channels(host);
+parse_fail:
 dma_probe_defer:
        clk_disable_unprepare(host->clk);
 clk_prepare_enable_fail:
index 7ab3d749b5aee1728e7aa7a37b530cd42eb741a7..e1335289316c84839acc46a1a298984a0ace9a93 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mmc/mmc.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #include "dw_mmc.h"
@@ -161,20 +162,13 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
                set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_exynos_suspend(struct device *dev)
-{
-       struct dw_mci *host = dev_get_drvdata(dev);
-
-       return dw_mci_suspend(host);
-}
-
-static int dw_mci_exynos_resume(struct device *dev)
+#ifdef CONFIG_PM
+static int dw_mci_exynos_runtime_resume(struct device *dev)
 {
        struct dw_mci *host = dev_get_drvdata(dev);
 
        dw_mci_exynos_config_smu(host);
-       return dw_mci_resume(host);
+       return dw_mci_runtime_resume(dev);
 }
 
 /**
@@ -211,10 +205,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
        return 0;
 }
 #else
-#define dw_mci_exynos_suspend          NULL
-#define dw_mci_exynos_resume           NULL
 #define dw_mci_exynos_resume_noirq     NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
 
 static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
 {
@@ -524,14 +516,42 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
 {
        const struct dw_mci_drv_data *drv_data;
        const struct of_device_id *match;
+       int ret;
 
        match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
        drv_data = match->data;
-       return dw_mci_pltfm_register(pdev, drv_data);
+
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       ret = dw_mci_pltfm_register(pdev, drv_data);
+       if (ret) {
+               pm_runtime_disable(&pdev->dev);
+               pm_runtime_set_suspended(&pdev->dev);
+               pm_runtime_put_noidle(&pdev->dev);
+
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dw_mci_exynos_remove(struct platform_device *pdev)
+{
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+
+       return dw_mci_pltfm_remove(pdev);
 }
 
 static const struct dev_pm_ops dw_mci_exynos_pmops = {
-       SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+                          dw_mci_exynos_runtime_resume,
+                          NULL)
        .resume_noirq = dw_mci_exynos_resume_noirq,
        .thaw_noirq = dw_mci_exynos_resume_noirq,
        .restore_noirq = dw_mci_exynos_resume_noirq,
@@ -539,7 +559,7 @@ static const struct dev_pm_ops dw_mci_exynos_pmops = {
 
 static struct platform_driver dw_mci_exynos_pltfm_driver = {
        .probe          = dw_mci_exynos_probe,
-       .remove         = dw_mci_pltfm_remove,
+       .remove         = dw_mci_exynos_remove,
        .driver         = {
                .name           = "dwmmc_exynos",
                .of_match_table = dw_mci_exynos_match,
index 624789496dcea22509184eb3d6a157a825c9a8f6..9821e6bd5d5ecf30ba045ea81f5ee397f3cd8a90 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 
@@ -162,35 +163,13 @@ static int dw_mci_k3_probe(struct platform_device *pdev)
        return dw_mci_pltfm_register(pdev, drv_data);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_k3_suspend(struct device *dev)
-{
-       struct dw_mci *host = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dw_mci_suspend(host);
-       if (!ret)
-               clk_disable_unprepare(host->ciu_clk);
-
-       return ret;
-}
-
-static int dw_mci_k3_resume(struct device *dev)
-{
-       struct dw_mci *host = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(host->ciu_clk);
-       if (ret) {
-               dev_err(host->dev, "failed to enable ciu clock\n");
-               return ret;
-       }
-
-       return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
+static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+                          dw_mci_runtime_resume,
+                          NULL)
+};
 
 static struct platform_driver dw_mci_k3_pltfm_driver = {
        .probe          = dw_mci_k3_probe,
@@ -198,7 +177,7 @@ static struct platform_driver dw_mci_k3_pltfm_driver = {
        .driver         = {
                .name           = "dwmmc_k3",
                .of_match_table = dw_mci_k3_match,
-               .pm             = &dw_mci_k3_pmops,
+               .pm             = &dw_mci_k3_dev_pm_ops,
        },
 };
 
index 4c69fbd2981190734d4ef1ff9861df73c1ab5806..ab82796b01e2665eca6c82645f7938981f1b469a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
@@ -79,25 +80,13 @@ static void dw_mci_pci_remove(struct pci_dev *pdev)
        dw_mci_remove(host);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_pci_suspend(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct dw_mci *host = pci_get_drvdata(pdev);
-
-       return dw_mci_suspend(host);
-}
-
-static int dw_mci_pci_resume(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct dw_mci *host = pci_get_drvdata(pdev);
-
-       return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume);
+static const struct dev_pm_ops dw_mci_pci_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+                          dw_mci_runtime_resume,
+                          NULL)
+};
 
 static const struct pci_device_id dw_mci_pci_id[] = {
        { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) },
@@ -111,7 +100,7 @@ static struct pci_driver dw_mci_pci_driver = {
        .probe          = dw_mci_pci_probe,
        .remove         = dw_mci_pci_remove,
        .driver         =       {
-               .pm =   &dw_mci_pci_pmops
+               .pm =   &dw_mci_pci_dev_pm_ops,
        },
 };
 
index dbbc4303bdd0fb2ce0fa6c5d478b0f5a9f1ed70b..1236d49ba36e201f51c12b8da0f12b8eec8da093 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
@@ -58,26 +59,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
 }
 EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
 
-#ifdef CONFIG_PM_SLEEP
-/*
- * TODO: we should probably disable the clock to the card in the suspend path.
- */
-static int dw_mci_pltfm_suspend(struct device *dev)
-{
-       struct dw_mci *host = dev_get_drvdata(dev);
-
-       return dw_mci_suspend(host);
-}
-
-static int dw_mci_pltfm_resume(struct device *dev)
-{
-       struct dw_mci *host = dev_get_drvdata(dev);
-
-       return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
+const struct dev_pm_ops dw_mci_pltfm_pmops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+                          dw_mci_runtime_resume,
+                          NULL)
+};
 EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
 
 static const struct of_device_id dw_mci_pltfm_match[] = {
index 25eae359a5ea181b00986a0aabe8cd3a5a76bd8b..9a46e4694227b67148d88fd7d24d8e9bc8ea11ce 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/dw_mmc.h>
 #include <linux/of_address.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 #include "dw_mmc.h"
@@ -325,6 +327,7 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
 {
        const struct dw_mci_drv_data *drv_data;
        const struct of_device_id *match;
+       int ret;
 
        if (!pdev->dev.of_node)
                return -ENODEV;
@@ -332,16 +335,49 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
        match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
        drv_data = match->data;
 
-       return dw_mci_pltfm_register(pdev, drv_data);
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+       pm_runtime_use_autosuspend(&pdev->dev);
+
+       ret = dw_mci_pltfm_register(pdev, drv_data);
+       if (ret) {
+               pm_runtime_disable(&pdev->dev);
+               pm_runtime_set_suspended(&pdev->dev);
+               pm_runtime_put_noidle(&pdev->dev);
+               return ret;
+       }
+
+       pm_runtime_put_autosuspend(&pdev->dev);
+
+       return 0;
 }
 
+static int dw_mci_rockchip_remove(struct platform_device *pdev)
+{
+       pm_runtime_get_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+
+       return dw_mci_pltfm_remove(pdev);
+}
+
+static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+                          dw_mci_runtime_resume,
+                          NULL)
+};
+
 static struct platform_driver dw_mci_rockchip_pltfm_driver = {
        .probe          = dw_mci_rockchip_probe,
-       .remove         = dw_mci_pltfm_remove,
+       .remove         = dw_mci_rockchip_remove,
        .driver         = {
                .name           = "dwmmc_rockchip",
                .of_match_table = dw_mci_rockchip_match,
-               .pm             = &dw_mci_pltfm_pmops,
+               .pm             = &dw_mci_rockchip_dev_pm_ops,
        },
 };
 
index df478ae72e23235ca8939128b2ad37497aaf4b71..b44306b886cb6d7a383abae4d985cd18ea1d48e3 100644 (file)
@@ -54,7 +54,7 @@
 #define DW_MCI_DMA_THRESHOLD   16
 
 #define DW_MCI_FREQ_MAX        200000000       /* unit: HZ */
-#define DW_MCI_FREQ_MIN        400000          /* unit: HZ */
+#define DW_MCI_FREQ_MIN        100000          /* unit: HZ */
 
 #define IDMAC_INT_CLR          (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
                                 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
@@ -165,12 +165,14 @@ static const struct file_operations dw_mci_req_fops = {
 
 static int dw_mci_regs_show(struct seq_file *s, void *v)
 {
-       seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
-       seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
-       seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
-       seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
-       seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
-       seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+       struct dw_mci *host = s->private;
+
+       seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
+       seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
+       seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
+       seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
+       seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
+       seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
 
        return 0;
 }
@@ -234,7 +236,6 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
 
 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
 {
-       struct mmc_data *data;
        struct dw_mci_slot *slot = mmc_priv(mmc);
        struct dw_mci *host = slot->host;
        u32 cmdr;
@@ -289,10 +290,9 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
        if (cmd->flags & MMC_RSP_CRC)
                cmdr |= SDMMC_CMD_RESP_CRC;
 
-       data = cmd->data;
-       if (data) {
+       if (cmd->data) {
                cmdr |= SDMMC_CMD_DAT_EXP;
-               if (data->flags & MMC_DATA_WRITE)
+               if (cmd->data->flags & MMC_DATA_WRITE)
                        cmdr |= SDMMC_CMD_DAT_WR;
        }
 
@@ -335,6 +335,9 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
        cmdr = stop->opcode | SDMMC_CMD_STOP |
                SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 
+       if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
+               cmdr |= SDMMC_CMD_USE_HOLD_REG;
+
        return cmdr;
 }
 
@@ -380,7 +383,7 @@ static void dw_mci_start_command(struct dw_mci *host,
 
 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
 {
-       struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+       struct mmc_command *stop = &host->stop_abort;
 
        dw_mci_start_command(host, stop, host->stop_cmdr);
 }
@@ -409,12 +412,13 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
 {
        struct mmc_data *data = host->data;
 
-       if (data)
-               if (!data->host_cookie)
-                       dma_unmap_sg(host->dev,
-                                    data->sg,
-                                    data->sg_len,
-                                    dw_mci_get_dma_dir(data));
+       if (data && data->host_cookie == COOKIE_MAPPED) {
+               dma_unmap_sg(host->dev,
+                            data->sg,
+                            data->sg_len,
+                            dw_mci_get_dma_dir(data));
+               data->host_cookie = COOKIE_UNMAPPED;
+       }
 }
 
 static void dw_mci_idmac_reset(struct dw_mci *host)
@@ -612,7 +616,7 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
        return 0;
 err_own_bit:
        /* restore the descriptor chain as it's polluted */
-       dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+       dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
        memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
        dw_mci_idmac_init(host);
        return -EINVAL;
@@ -688,7 +692,7 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
        return 0;
 err_own_bit:
        /* restore the descriptor chain as it's polluted */
-       dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+       dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
        memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
        dw_mci_idmac_init(host);
        return -EINVAL;
@@ -845,13 +849,13 @@ static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
 
 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
                                   struct mmc_data *data,
-                                  bool next)
+                                  int cookie)
 {
        struct scatterlist *sg;
        unsigned int i, sg_len;
 
-       if (!next && data->host_cookie)
-               return data->host_cookie;
+       if (data->host_cookie == COOKIE_PRE_MAPPED)
+               return data->sg_len;
 
        /*
         * We don't do DMA on "complex" transfers, i.e. with
@@ -876,15 +880,13 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
        if (sg_len == 0)
                return -EINVAL;
 
-       if (next)
-               data->host_cookie = sg_len;
+       data->host_cookie = cookie;
 
        return sg_len;
 }
 
 static void dw_mci_pre_req(struct mmc_host *mmc,
-                          struct mmc_request *mrq,
-                          bool is_first_req)
+                          struct mmc_request *mrq)
 {
        struct dw_mci_slot *slot = mmc_priv(mmc);
        struct mmc_data *data = mrq->data;
@@ -892,13 +894,12 @@ static void dw_mci_pre_req(struct mmc_host *mmc,
        if (!slot->host->use_dma || !data)
                return;
 
-       if (data->host_cookie) {
-               data->host_cookie = 0;
-               return;
-       }
+       /* This data might be unmapped at this time */
+       data->host_cookie = COOKIE_UNMAPPED;
 
-       if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
-               data->host_cookie = 0;
+       if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
+                               COOKIE_PRE_MAPPED) < 0)
+               data->host_cookie = COOKIE_UNMAPPED;
 }
 
 static void dw_mci_post_req(struct mmc_host *mmc,
@@ -911,12 +912,12 @@ static void dw_mci_post_req(struct mmc_host *mmc,
        if (!slot->host->use_dma || !data)
                return;
 
-       if (data->host_cookie)
+       if (data->host_cookie != COOKIE_UNMAPPED)
                dma_unmap_sg(slot->host->dev,
                             data->sg,
                             data->sg_len,
                             dw_mci_get_dma_dir(data));
-       data->host_cookie = 0;
+       data->host_cookie = COOKIE_UNMAPPED;
 }
 
 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
@@ -1022,7 +1023,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
        if (!host->use_dma)
                return -ENODEV;
 
-       sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+       sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
        if (sg_len < 0) {
                host->dma_ops->stop(host);
                return sg_len;
@@ -1175,13 +1176,24 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 
-               if (clock != slot->__clk_old || force_clkinit)
+               if ((clock != slot->__clk_old &&
+                       !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
+                       force_clkinit) {
                        dev_info(&slot->mmc->class_dev,
                                 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
                                 slot->id, host->bus_hz, clock,
                                 div ? ((host->bus_hz / div) >> 1) :
                                 host->bus_hz, div);
 
+                       /*
+                        * If card is polling, display the message only
+                        * one time at boot time.
+                        */
+                       if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
+                                       slot->mmc->f_min == clock)
+                               set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
+               }
+
                /* disable clock */
                mci_writel(host, CLKENA, 0);
                mci_writel(host, CLKSRC, 0);
@@ -1273,10 +1285,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
                spin_unlock_irqrestore(&host->irq_lock, irqflags);
        }
 
-       if (mrq->stop)
-               host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
-       else
-               host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
+       host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
 }
 
 static void dw_mci_start_request(struct dw_mci *host,
@@ -1527,22 +1536,34 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
        int gpio_cd = mmc_gpio_get_cd(mmc);
 
        /* Use platform get_cd function, else try onboard card detect */
-       if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
+       if (((mmc->caps & MMC_CAP_NEEDS_POLL)
+                               || !mmc_card_is_removable(mmc))) {
                present = 1;
-       else if (gpio_cd >= 0)
+
+               if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+                       if (mmc->caps & MMC_CAP_NEEDS_POLL) {
+                               dev_info(&mmc->class_dev,
+                                       "card is polling.\n");
+                       } else {
+                               dev_info(&mmc->class_dev,
+                                       "card is non-removable.\n");
+                       }
+                       set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+               }
+
+               return present;
+       } else if (gpio_cd >= 0)
                present = gpio_cd;
        else
                present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
                        == 0 ? 1 : 0;
 
        spin_lock_bh(&host->lock);
-       if (present) {
-               set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+       if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
                dev_dbg(&mmc->class_dev, "card is present\n");
-       } else {
-               clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+       else if (!present &&
+                       !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
                dev_dbg(&mmc->class_dev, "card is not present\n");
-       }
        spin_unlock_bh(&host->lock);
 
        return present;
@@ -1889,8 +1910,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
                                dw_mci_stop_dma(host);
-                               if (data->stop ||
-                                   !(host->data_status & (SDMMC_INT_DRTO |
+                               if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
                                state = STATE_DATA_ERROR;
@@ -1926,8 +1946,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
                                dw_mci_stop_dma(host);
-                               if (data->stop ||
-                                   !(host->data_status & (SDMMC_INT_DRTO |
+                               if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
                                state = STATE_DATA_ERROR;
@@ -2003,7 +2022,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
                        host->cmd = NULL;
                        host->data = NULL;
 
-                       if (mrq->stop)
+                       if (!mrq->sbc && mrq->stop)
                                dw_mci_command_complete(host, mrq->stop);
                        else
                                host->cmd_status = 0;
@@ -2615,6 +2634,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
                mmc->f_min = DW_MCI_FREQ_MIN;
                mmc->f_max = DW_MCI_FREQ_MAX;
        } else {
+               dev_info(host->dev,
+                       "'clock-freq-min-max' property was deprecated.\n");
                mmc->f_min = freq[0];
                mmc->f_max = freq[1];
        }
@@ -3267,26 +3288,46 @@ EXPORT_SYMBOL(dw_mci_remove);
 
 
 
-#ifdef CONFIG_PM_SLEEP
-/*
- * TODO: we should probably disable the clock to the card in the suspend path.
- */
-int dw_mci_suspend(struct dw_mci *host)
+#ifdef CONFIG_PM
+int dw_mci_runtime_suspend(struct device *dev)
 {
+       struct dw_mci *host = dev_get_drvdata(dev);
+
        if (host->use_dma && host->dma_ops->exit)
                host->dma_ops->exit(host);
 
+       clk_disable_unprepare(host->ciu_clk);
+
+       if (host->cur_slot &&
+           (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+            !mmc_card_is_removable(host->cur_slot->mmc)))
+               clk_disable_unprepare(host->biu_clk);
+
        return 0;
 }
-EXPORT_SYMBOL(dw_mci_suspend);
+EXPORT_SYMBOL(dw_mci_runtime_suspend);
 
-int dw_mci_resume(struct dw_mci *host)
+int dw_mci_runtime_resume(struct device *dev)
 {
-       int i, ret;
+       int i, ret = 0;
+       struct dw_mci *host = dev_get_drvdata(dev);
+
+       if (host->cur_slot &&
+           (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+            !mmc_card_is_removable(host->cur_slot->mmc))) {
+               ret = clk_prepare_enable(host->biu_clk);
+               if (ret)
+                       return ret;
+       }
+
+       ret = clk_prepare_enable(host->ciu_clk);
+       if (ret)
+               goto err;
 
        if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+               clk_disable_unprepare(host->ciu_clk);
                ret = -ENODEV;
-               return ret;
+               goto err;
        }
 
        if (host->use_dma && host->dma_ops->init)
@@ -3296,8 +3337,8 @@ int dw_mci_resume(struct dw_mci *host)
         * Restore the initial value at FIFOTH register
         * And Invalidate the prev_blksz with zero
         */
-       mci_writel(host, FIFOTH, host->fifoth_val);
-       host->prev_blksz = 0;
+        mci_writel(host, FIFOTH, host->fifoth_val);
+        host->prev_blksz = 0;
 
        /* Put in max timeout */
        mci_writel(host, TMOUT, 0xFFFFFFFF);
@@ -3323,9 +3364,17 @@ int dw_mci_resume(struct dw_mci *host)
        dw_mci_enable_cd(host);
 
        return 0;
+
+err:
+       if (host->cur_slot &&
+           (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+            !mmc_card_is_removable(host->cur_slot->mmc)))
+               clk_disable_unprepare(host->biu_clk);
+
+       return ret;
 }
-EXPORT_SYMBOL(dw_mci_resume);
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SYMBOL(dw_mci_runtime_resume);
+#endif /* CONFIG_PM */
 
 static int __init dw_mci_init(void)
 {
index e8cd2dec3263d308ed0ebc9f52271c4b72bc5f05..c59465829387757ddd3d8257c409b400534f048c 100644 (file)
 
 extern int dw_mci_probe(struct dw_mci *host);
 extern void dw_mci_remove(struct dw_mci *host);
-#ifdef CONFIG_PM_SLEEP
-extern int dw_mci_suspend(struct dw_mci *host);
-extern int dw_mci_resume(struct dw_mci *host);
+#ifdef CONFIG_PM
+extern int dw_mci_runtime_suspend(struct device *device);
+extern int dw_mci_runtime_resume(struct device *device);
 #endif
 
 /**
@@ -272,6 +272,7 @@ struct dw_mci_slot {
 #define DW_MMC_CARD_NEED_INIT  1
 #define DW_MMC_CARD_NO_LOW_PWR 2
 #define DW_MMC_CARD_NO_USE_HOLD 3
+#define DW_MMC_CARD_NEEDS_POLL 4
        int                     id;
        int                     sdio_id;
 };
index 684087db170b218c45dfc0bb45e9479b7e61014f..819ad32964fc567cbeba35a75e25975ae78b2d90 100644 (file)
@@ -320,8 +320,7 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
 }
 
 static void jz4740_mmc_pre_request(struct mmc_host *mmc,
-                                  struct mmc_request *mrq,
-                                  bool is_first_req)
+                                  struct mmc_request *mrq)
 {
        struct jz4740_mmc_host *host = mmc_priv(mmc);
        struct mmc_data *data = mrq->data;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
new file mode 100644 (file)
index 0000000..b352760
--- /dev/null
@@ -0,0 +1,851 @@
+/*
+ * Amlogic SD/eMMC driver for the GX/S905 family SoCs
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Kevin Hilman <khilman@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+
+#define DRIVER_NAME "meson-gx-mmc"
+
+#define SD_EMMC_CLOCK 0x0
+#define   CLK_DIV_SHIFT 0
+#define   CLK_DIV_WIDTH 6
+#define   CLK_DIV_MASK 0x3f
+#define   CLK_DIV_MAX 63
+#define   CLK_SRC_SHIFT 6
+#define   CLK_SRC_WIDTH 2
+#define   CLK_SRC_MASK 0x3
+#define   CLK_SRC_XTAL 0   /* external crystal */
+#define   CLK_SRC_XTAL_RATE 24000000
+#define   CLK_SRC_PLL 1    /* FCLK_DIV2 */
+#define   CLK_SRC_PLL_RATE 1000000000
+#define   CLK_PHASE_SHIFT 8
+#define   CLK_PHASE_MASK 0x3
+#define   CLK_PHASE_0 0
+#define   CLK_PHASE_90 1
+#define   CLK_PHASE_180 2
+#define   CLK_PHASE_270 3
+#define   CLK_ALWAYS_ON BIT(24)
+
+#define SD_EMMC_DElAY 0x4
+#define SD_EMMC_ADJUST 0x8
+#define SD_EMMC_CALOUT 0x10
+#define SD_EMMC_START 0x40
+#define   START_DESC_INIT BIT(0)
+#define   START_DESC_BUSY BIT(1)
+#define   START_DESC_ADDR_SHIFT 2
+#define   START_DESC_ADDR_MASK (~0x3)
+
+#define SD_EMMC_CFG 0x44
+#define   CFG_BUS_WIDTH_SHIFT 0
+#define   CFG_BUS_WIDTH_MASK 0x3
+#define   CFG_BUS_WIDTH_1 0x0
+#define   CFG_BUS_WIDTH_4 0x1
+#define   CFG_BUS_WIDTH_8 0x2
+#define   CFG_DDR BIT(2)
+#define   CFG_BLK_LEN_SHIFT 4
+#define   CFG_BLK_LEN_MASK 0xf
+#define   CFG_RESP_TIMEOUT_SHIFT 8
+#define   CFG_RESP_TIMEOUT_MASK 0xf
+#define   CFG_RC_CC_SHIFT 12
+#define   CFG_RC_CC_MASK 0xf
+#define   CFG_STOP_CLOCK BIT(22)
+#define   CFG_CLK_ALWAYS_ON BIT(18)
+#define   CFG_AUTO_CLK BIT(23)
+
+#define SD_EMMC_STATUS 0x48
+#define   STATUS_BUSY BIT(31)
+
+#define SD_EMMC_IRQ_EN 0x4c
+#define   IRQ_EN_MASK 0x3fff
+#define   IRQ_RXD_ERR_SHIFT 0
+#define   IRQ_RXD_ERR_MASK 0xff
+#define   IRQ_TXD_ERR BIT(8)
+#define   IRQ_DESC_ERR BIT(9)
+#define   IRQ_RESP_ERR BIT(10)
+#define   IRQ_RESP_TIMEOUT BIT(11)
+#define   IRQ_DESC_TIMEOUT BIT(12)
+#define   IRQ_END_OF_CHAIN BIT(13)
+#define   IRQ_RESP_STATUS BIT(14)
+#define   IRQ_SDIO BIT(15)
+
+#define SD_EMMC_CMD_CFG 0x50
+#define SD_EMMC_CMD_ARG 0x54
+#define SD_EMMC_CMD_DAT 0x58
+#define SD_EMMC_CMD_RSP 0x5c
+#define SD_EMMC_CMD_RSP1 0x60
+#define SD_EMMC_CMD_RSP2 0x64
+#define SD_EMMC_CMD_RSP3 0x68
+
+#define SD_EMMC_RXD 0x94
+#define SD_EMMC_TXD 0x94
+#define SD_EMMC_LAST_REG SD_EMMC_TXD
+
+#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
+#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
+#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
+#define MUX_CLK_NUM_PARENTS 2
+
+struct meson_host {
+       struct  device          *dev;
+       struct  mmc_host        *mmc;
+       struct  mmc_request     *mrq;
+       struct  mmc_command     *cmd;
+
+       spinlock_t lock;
+       void __iomem *regs;
+       int irq;
+       u32 ocr_mask;
+       struct clk *core_clk;
+       struct clk_mux mux;
+       struct clk *mux_clk;
+       struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
+       unsigned long mux_parent_rate[MUX_CLK_NUM_PARENTS];
+
+       struct clk_divider cfg_div;
+       struct clk *cfg_div_clk;
+
+       unsigned int bounce_buf_size;
+       void *bounce_buf;
+       dma_addr_t bounce_dma_addr;
+
+       bool vqmmc_enabled;
+};
+
+struct sd_emmc_desc {
+       u32 cmd_cfg;
+       u32 cmd_arg;
+       u32 cmd_data;
+       u32 cmd_resp;
+};
+#define CMD_CFG_LENGTH_SHIFT 0
+#define CMD_CFG_LENGTH_MASK 0x1ff
+#define CMD_CFG_BLOCK_MODE BIT(9)
+#define CMD_CFG_R1B BIT(10)
+#define CMD_CFG_END_OF_CHAIN BIT(11)
+#define CMD_CFG_TIMEOUT_SHIFT 12
+#define CMD_CFG_TIMEOUT_MASK 0xf
+#define CMD_CFG_NO_RESP BIT(16)
+#define CMD_CFG_NO_CMD BIT(17)
+#define CMD_CFG_DATA_IO BIT(18)
+#define CMD_CFG_DATA_WR BIT(19)
+#define CMD_CFG_RESP_NOCRC BIT(20)
+#define CMD_CFG_RESP_128 BIT(21)
+#define CMD_CFG_RESP_NUM BIT(22)
+#define CMD_CFG_DATA_NUM BIT(23)
+#define CMD_CFG_CMD_INDEX_SHIFT 24
+#define CMD_CFG_CMD_INDEX_MASK 0x3f
+#define CMD_CFG_ERROR BIT(30)
+#define CMD_CFG_OWNER BIT(31)
+
+#define CMD_DATA_MASK (~0x3)
+#define CMD_DATA_BIG_ENDIAN BIT(1)
+#define CMD_DATA_SRAM BIT(0)
+#define CMD_RESP_MASK (~0x1)
+#define CMD_RESP_SRAM BIT(0)
+
+static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
+{
+       struct mmc_host *mmc = host->mmc;
+       int ret = 0;
+       u32 cfg;
+
+       if (clk_rate) {
+               if (WARN_ON(clk_rate > mmc->f_max))
+                       clk_rate = mmc->f_max;
+               else if (WARN_ON(clk_rate < mmc->f_min))
+                       clk_rate = mmc->f_min;
+       }
+
+       if (clk_rate == mmc->actual_clock)
+               return 0;
+
+       /* stop clock */
+       cfg = readl(host->regs + SD_EMMC_CFG);
+       if (!(cfg & CFG_STOP_CLOCK)) {
+               cfg |= CFG_STOP_CLOCK;
+               writel(cfg, host->regs + SD_EMMC_CFG);
+       }
+
+       dev_dbg(host->dev, "change clock rate %u -> %lu\n",
+               mmc->actual_clock, clk_rate);
+
+       if (clk_rate == 0) {
+               mmc->actual_clock = 0;
+               return 0;
+       }
+
+       ret = clk_set_rate(host->cfg_div_clk, clk_rate);
+       if (ret)
+               dev_warn(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
+                        clk_rate, ret);
+       else if (clk_rate && clk_rate != clk_get_rate(host->cfg_div_clk))
+               dev_warn(host->dev, "divider requested rate %lu != actual rate %lu: ret=%d\n",
+                        clk_rate, clk_get_rate(host->cfg_div_clk), ret);
+       else
+               mmc->actual_clock = clk_rate;
+
+       /* (re)start clock, if non-zero */
+       if (!ret && clk_rate) {
+               cfg = readl(host->regs + SD_EMMC_CFG);
+               cfg &= ~CFG_STOP_CLOCK;
+               writel(cfg, host->regs + SD_EMMC_CFG);
+       }
+
+       return ret;
+}
+
+/*
+ * The SD/eMMC IP block has an internal mux and divider used for
+ * generating the MMC clock.  Use the clock framework to create and
+ * manage these clocks.
+ */
+static int meson_mmc_clk_init(struct meson_host *host)
+{
+       struct clk_init_data init;
+       char clk_name[32];
+       int i, ret = 0;
+       const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
+       unsigned int mux_parent_count = 0;
+       const char *clk_div_parents[1];
+       unsigned int f_min = UINT_MAX;
+       u32 clk_reg, cfg;
+
+       /* get the mux parents */
+       for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
+               char name[16];
+
+               snprintf(name, sizeof(name), "clkin%d", i);
+               host->mux_parent[i] = devm_clk_get(host->dev, name);
+               if (IS_ERR(host->mux_parent[i])) {
+                       ret = PTR_ERR(host->mux_parent[i]);
+                       if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER)
+                               dev_err(host->dev, "Missing clock %s\n", name);
+                       host->mux_parent[i] = NULL;
+                       return ret;
+               }
+
+               host->mux_parent_rate[i] = clk_get_rate(host->mux_parent[i]);
+               mux_parent_names[i] = __clk_get_name(host->mux_parent[i]);
+               mux_parent_count++;
+               if (host->mux_parent_rate[i] < f_min)
+                       f_min = host->mux_parent_rate[i];
+       }
+
+       /* cacluate f_min based on input clocks, and max divider value */
+       if (f_min != UINT_MAX)
+               f_min = DIV_ROUND_UP(CLK_SRC_XTAL_RATE, CLK_DIV_MAX);
+       else
+               f_min = 4000000;  /* default min: 400 MHz */
+       host->mmc->f_min = f_min;
+
+       /* create the mux */
+       snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
+       init.name = clk_name;
+       init.ops = &clk_mux_ops;
+       init.flags = 0;
+       init.parent_names = mux_parent_names;
+       init.num_parents = mux_parent_count;
+
+       host->mux.reg = host->regs + SD_EMMC_CLOCK;
+       host->mux.shift = CLK_SRC_SHIFT;
+       host->mux.mask = CLK_SRC_MASK;
+       host->mux.flags = 0;
+       host->mux.table = NULL;
+       host->mux.hw.init = &init;
+
+       host->mux_clk = devm_clk_register(host->dev, &host->mux.hw);
+       if (WARN_ON(IS_ERR(host->mux_clk)))
+               return PTR_ERR(host->mux_clk);
+
+       /* create the divider */
+       snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
+       init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL);
+       init.ops = &clk_divider_ops;
+       init.flags = CLK_SET_RATE_PARENT;
+       clk_div_parents[0] = __clk_get_name(host->mux_clk);
+       init.parent_names = clk_div_parents;
+       init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+       host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
+       host->cfg_div.shift = CLK_DIV_SHIFT;
+       host->cfg_div.width = CLK_DIV_WIDTH;
+       host->cfg_div.hw.init = &init;
+       host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
+               CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
+
+       host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw);
+       if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk)))
+               return PTR_ERR(host->cfg_div_clk);
+
+       /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+       clk_reg = 0;
+       clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT;
+       clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT;
+       clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT;
+       clk_reg &= ~CLK_ALWAYS_ON;
+       writel(clk_reg, host->regs + SD_EMMC_CLOCK);
+
+       /* Ensure clock starts in "auto" mode, not "always on" */
+       cfg = readl(host->regs + SD_EMMC_CFG);
+       cfg &= ~CFG_CLK_ALWAYS_ON;
+       cfg |= CFG_AUTO_CLK;
+       writel(cfg, host->regs + SD_EMMC_CFG);
+
+       ret = clk_prepare_enable(host->cfg_div_clk);
+       if (!ret)
+               ret = meson_mmc_clk_set(host, f_min);
+
+       if (!ret)
+               clk_disable_unprepare(host->cfg_div_clk);
+
+       return ret;
+}
+
+static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct meson_host *host = mmc_priv(mmc);
+       u32 bus_width;
+       u32 val, orig;
+
+       /*
+        * GPIO regulator, only controls switching between 1v8 and
+        * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
+        */
+       switch (ios->power_mode) {
+       case MMC_POWER_OFF:
+               if (!IS_ERR(mmc->supply.vmmc))
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+               if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+                       regulator_disable(mmc->supply.vqmmc);
+                       host->vqmmc_enabled = false;
+               }
+
+               break;
+
+       case MMC_POWER_UP:
+               if (!IS_ERR(mmc->supply.vmmc))
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+               break;
+
+       case MMC_POWER_ON:
+               if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+                       int ret = regulator_enable(mmc->supply.vqmmc);
+
+                       if (ret < 0)
+                               dev_err(mmc_dev(mmc),
+                                       "failed to enable vqmmc regulator\n");
+                       else
+                               host->vqmmc_enabled = true;
+               }
+
+               break;
+       }
+
+
+       meson_mmc_clk_set(host, ios->clock);
+
+       /* Bus width */
+       val = readl(host->regs + SD_EMMC_CFG);
+       switch (ios->bus_width) {
+       case MMC_BUS_WIDTH_1:
+               bus_width = CFG_BUS_WIDTH_1;
+               break;
+       case MMC_BUS_WIDTH_4:
+               bus_width = CFG_BUS_WIDTH_4;
+               break;
+       case MMC_BUS_WIDTH_8:
+               bus_width = CFG_BUS_WIDTH_8;
+               break;
+       default:
+               dev_err(host->dev, "Invalid ios->bus_width: %u.  Setting to 4.\n",
+                       ios->bus_width);
+               bus_width = CFG_BUS_WIDTH_4;
+               return;
+       }
+
+       val = readl(host->regs + SD_EMMC_CFG);
+       orig = val;
+
+       val &= ~(CFG_BUS_WIDTH_MASK << CFG_BUS_WIDTH_SHIFT);
+       val |= bus_width << CFG_BUS_WIDTH_SHIFT;
+
+       val &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+       val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT;
+
+       val &= ~(CFG_RESP_TIMEOUT_MASK << CFG_RESP_TIMEOUT_SHIFT);
+       val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT;
+
+       val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
+       val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
+
+       writel(val, host->regs + SD_EMMC_CFG);
+
+       if (val != orig)
+               dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
+                       __func__, orig, val);
+}
+
+static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       struct meson_host *host = mmc_priv(mmc);
+
+       WARN_ON(host->mrq != mrq);
+
+       host->mrq = NULL;
+       host->cmd = NULL;
+       mmc_request_done(host->mmc, mrq);
+
+       return 0;
+}
+
+static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+       struct meson_host *host = mmc_priv(mmc);
+       struct sd_emmc_desc *desc, desc_tmp;
+       u32 cfg;
+       u8 blk_len, cmd_cfg_timeout;
+       unsigned int xfer_bytes = 0;
+
+       /* Setup descriptors */
+       dma_rmb();
+       desc = &desc_tmp;
+       memset(desc, 0, sizeof(struct sd_emmc_desc));
+
+       desc->cmd_cfg |= (cmd->opcode & CMD_CFG_CMD_INDEX_MASK) <<
+               CMD_CFG_CMD_INDEX_SHIFT;
+       desc->cmd_cfg |= CMD_CFG_OWNER;  /* owned by CPU */
+       desc->cmd_arg = cmd->arg;
+
+       /* Response */
+       if (cmd->flags & MMC_RSP_PRESENT) {
+               desc->cmd_cfg &= ~CMD_CFG_NO_RESP;
+               if (cmd->flags & MMC_RSP_136)
+                       desc->cmd_cfg |= CMD_CFG_RESP_128;
+               desc->cmd_cfg |= CMD_CFG_RESP_NUM;
+               desc->cmd_resp = 0;
+
+               if (!(cmd->flags & MMC_RSP_CRC))
+                       desc->cmd_cfg |= CMD_CFG_RESP_NOCRC;
+
+               if (cmd->flags & MMC_RSP_BUSY)
+                       desc->cmd_cfg |= CMD_CFG_R1B;
+       } else {
+               desc->cmd_cfg |= CMD_CFG_NO_RESP;
+       }
+
+       /* data? */
+       if (cmd->data) {
+               desc->cmd_cfg |= CMD_CFG_DATA_IO;
+               if (cmd->data->blocks > 1) {
+                       desc->cmd_cfg |= CMD_CFG_BLOCK_MODE;
+                       desc->cmd_cfg |=
+                               (cmd->data->blocks & CMD_CFG_LENGTH_MASK) <<
+                               CMD_CFG_LENGTH_SHIFT;
+
+                       /* check if block-size matches, if not update */
+                       cfg = readl(host->regs + SD_EMMC_CFG);
+                       blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+                       blk_len >>= CFG_BLK_LEN_SHIFT;
+                       if (blk_len != ilog2(cmd->data->blksz)) {
+                               dev_warn(host->dev, "%s: update blk_len %d -> %d\n",
+                                       __func__, blk_len,
+                                        ilog2(cmd->data->blksz));
+                               blk_len = ilog2(cmd->data->blksz);
+                               cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+                               cfg |= blk_len << CFG_BLK_LEN_SHIFT;
+                               writel(cfg, host->regs + SD_EMMC_CFG);
+                       }
+               } else {
+                       desc->cmd_cfg &= ~CMD_CFG_BLOCK_MODE;
+                       desc->cmd_cfg |=
+                               (cmd->data->blksz & CMD_CFG_LENGTH_MASK) <<
+                               CMD_CFG_LENGTH_SHIFT;
+               }
+
+               cmd->data->bytes_xfered = 0;
+               xfer_bytes = cmd->data->blksz * cmd->data->blocks;
+               if (cmd->data->flags & MMC_DATA_WRITE) {
+                       desc->cmd_cfg |= CMD_CFG_DATA_WR;
+                       WARN_ON(xfer_bytes > host->bounce_buf_size);
+                       sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len,
+                                         host->bounce_buf, xfer_bytes);
+                       cmd->data->bytes_xfered = xfer_bytes;
+                       dma_wmb();
+               } else {
+                       desc->cmd_cfg &= ~CMD_CFG_DATA_WR;
+               }
+
+               if (xfer_bytes > 0) {
+                       desc->cmd_cfg &= ~CMD_CFG_DATA_NUM;
+                       desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
+               } else {
+                       /* write data to data_addr */
+                       desc->cmd_cfg |= CMD_CFG_DATA_NUM;
+                       desc->cmd_data = 0;
+               }
+
+               cmd_cfg_timeout = 12;
+       } else {
+               desc->cmd_cfg &= ~CMD_CFG_DATA_IO;
+               cmd_cfg_timeout = 10;
+       }
+       desc->cmd_cfg |= (cmd_cfg_timeout & CMD_CFG_TIMEOUT_MASK) <<
+               CMD_CFG_TIMEOUT_SHIFT;
+
+       host->cmd = cmd;
+
+       /* Last descriptor */
+       desc->cmd_cfg |= CMD_CFG_END_OF_CHAIN;
+       writel(desc->cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
+       writel(desc->cmd_data, host->regs + SD_EMMC_CMD_DAT);
+       writel(desc->cmd_resp, host->regs + SD_EMMC_CMD_RSP);
+       wmb(); /* ensure descriptor is written before kicked */
+       writel(desc->cmd_arg, host->regs + SD_EMMC_CMD_ARG);
+}
+
+static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       struct meson_host *host = mmc_priv(mmc);
+
+       WARN_ON(host->mrq != NULL);
+
+       /* Stop execution */
+       writel(0, host->regs + SD_EMMC_START);
+
+       /* clear, ack, enable all interrupts */
+       writel(0, host->regs + SD_EMMC_IRQ_EN);
+       writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+       writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
+
+       host->mrq = mrq;
+
+       if (mrq->sbc)
+               meson_mmc_start_cmd(mmc, mrq->sbc);
+       else
+               meson_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+       struct meson_host *host = mmc_priv(mmc);
+
+       if (cmd->flags & MMC_RSP_136) {
+               cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
+               cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
+               cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
+               cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
+       } else if (cmd->flags & MMC_RSP_PRESENT) {
+               cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
+       }
+
+       return 0;
+}
+
+static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
+{
+       struct meson_host *host = dev_id;
+       struct mmc_request *mrq;
+       struct mmc_command *cmd = host->cmd;
+       u32 irq_en, status, raw_status;
+       irqreturn_t ret = IRQ_HANDLED;
+
+       if (WARN_ON(!host))
+               return IRQ_NONE;
+
+       mrq = host->mrq;
+
+       if (WARN_ON(!mrq))
+               return IRQ_NONE;
+
+       if (WARN_ON(!cmd))
+               return IRQ_NONE;
+
+       spin_lock(&host->lock);
+       irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
+       raw_status = readl(host->regs + SD_EMMC_STATUS);
+       status = raw_status & irq_en;
+
+       if (!status) {
+               dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
+                        raw_status, irq_en);
+               ret = IRQ_NONE;
+               goto out;
+       }
+
+       cmd->error = 0;
+       if (status & IRQ_RXD_ERR_MASK) {
+               dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
+               cmd->error = -EILSEQ;
+       }
+       if (status & IRQ_TXD_ERR) {
+               dev_dbg(host->dev, "Unhandled IRQ: TXD error\n");
+               cmd->error = -EILSEQ;
+       }
+       if (status & IRQ_DESC_ERR)
+               dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n");
+       if (status & IRQ_RESP_ERR) {
+               dev_dbg(host->dev, "Unhandled IRQ: Response error\n");
+               cmd->error = -EILSEQ;
+       }
+       if (status & IRQ_RESP_TIMEOUT) {
+               dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n");
+               cmd->error = -ETIMEDOUT;
+       }
+       if (status & IRQ_DESC_TIMEOUT) {
+               dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n");
+               cmd->error = -ETIMEDOUT;
+       }
+       if (status & IRQ_SDIO)
+               dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
+
+       if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS))
+               ret = IRQ_WAKE_THREAD;
+       else  {
+               dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
+                        status, cmd->opcode, cmd->arg,
+                        cmd->flags, mrq->stop ? 1 : 0);
+               if (cmd->data) {
+                       struct mmc_data *data = cmd->data;
+
+                       dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
+                                data->blksz, data->blocks, data->flags,
+                                data->flags & MMC_DATA_WRITE ? "write" : "",
+                                data->flags & MMC_DATA_READ ? "read" : "");
+               }
+       }
+
+out:
+       /* ack all (enabled) interrupts */
+       writel(status, host->regs + SD_EMMC_STATUS);
+
+       if (ret == IRQ_HANDLED) {
+               meson_mmc_read_resp(host->mmc, cmd);
+               meson_mmc_request_done(host->mmc, cmd->mrq);
+       }
+
+       spin_unlock(&host->lock);
+       return ret;
+}
+
+static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
+{
+       struct meson_host *host = dev_id;
+       struct mmc_request *mrq = host->mrq;
+       struct mmc_command *cmd = host->cmd;
+       struct mmc_data *data;
+       unsigned int xfer_bytes;
+       int ret = IRQ_HANDLED;
+
+       if (WARN_ON(!mrq))
+               ret = IRQ_NONE;
+
+       if (WARN_ON(!cmd))
+               ret = IRQ_NONE;
+
+       data = cmd->data;
+       if (data) {
+               xfer_bytes = data->blksz * data->blocks;
+               if (data->flags & MMC_DATA_READ) {
+                       WARN_ON(xfer_bytes > host->bounce_buf_size);
+                       sg_copy_from_buffer(data->sg, data->sg_len,
+                                           host->bounce_buf, xfer_bytes);
+                       data->bytes_xfered = xfer_bytes;
+               }
+       }
+
+       meson_mmc_read_resp(host->mmc, cmd);
+       if (!data || !data->stop || mrq->sbc)
+               meson_mmc_request_done(host->mmc, mrq);
+       else
+               meson_mmc_start_cmd(host->mmc, data->stop);
+
+       return ret;
+}
+
+/*
+ * NOTE: we only need this until the GPIO/pinctrl driver can handle
+ * interrupts.  For now, the MMC core will use this for polling.
+ */
+static int meson_mmc_get_cd(struct mmc_host *mmc)
+{
+       int status = mmc_gpio_get_cd(mmc);
+
+       if (status == -ENOSYS)
+               return 1; /* assume present */
+
+       return status;
+}
+
+static const struct mmc_host_ops meson_mmc_ops = {
+       .request        = meson_mmc_request,
+       .set_ios        = meson_mmc_set_ios,
+       .get_cd         = meson_mmc_get_cd,
+};
+
+static int meson_mmc_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct meson_host *host;
+       struct mmc_host *mmc;
+       int ret;
+
+       mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
+       if (!mmc)
+               return -ENOMEM;
+       host = mmc_priv(mmc);
+       host->mmc = mmc;
+       host->dev = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, host);
+
+       spin_lock_init(&host->lock);
+
+       /* Get regulators and the supported OCR mask */
+       host->vqmmc_enabled = false;
+       ret = mmc_regulator_get_supply(mmc);
+       if (ret == -EPROBE_DEFER)
+               goto free_host;
+
+       ret = mmc_of_parse(mmc);
+       if (ret) {
+               dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
+               goto free_host;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       host->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(host->regs)) {
+               ret = PTR_ERR(host->regs);
+               goto free_host;
+       }
+
+       host->irq = platform_get_irq(pdev, 0);
+       if (host->irq == 0) {
+               dev_err(&pdev->dev, "failed to get interrupt resource.\n");
+               ret = -EINVAL;
+               goto free_host;
+       }
+
+       host->core_clk = devm_clk_get(&pdev->dev, "core");
+       if (IS_ERR(host->core_clk)) {
+               ret = PTR_ERR(host->core_clk);
+               goto free_host;
+       }
+
+       ret = clk_prepare_enable(host->core_clk);
+       if (ret)
+               goto free_host;
+
+       ret = meson_mmc_clk_init(host);
+       if (ret)
+               goto free_host;
+
+       /* Stop execution */
+       writel(0, host->regs + SD_EMMC_START);
+
+       /* clear, ack, enable all interrupts */
+       writel(0, host->regs + SD_EMMC_IRQ_EN);
+       writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+
+       ret = devm_request_threaded_irq(&pdev->dev, host->irq,
+                                       meson_mmc_irq, meson_mmc_irq_thread,
+                                       IRQF_SHARED, DRIVER_NAME, host);
+       if (ret)
+               goto free_host;
+
+       /* data bounce buffer */
+       host->bounce_buf_size = SZ_512K;
+       host->bounce_buf =
+               dma_alloc_coherent(host->dev, host->bounce_buf_size,
+                                  &host->bounce_dma_addr, GFP_KERNEL);
+       if (host->bounce_buf == NULL) {
+               dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
+               ret = -ENOMEM;
+               goto free_host;
+       }
+
+       mmc->ops = &meson_mmc_ops;
+       mmc_add_host(mmc);
+
+       return 0;
+
+free_host:
+       clk_disable_unprepare(host->cfg_div_clk);
+       clk_disable_unprepare(host->core_clk);
+       mmc_free_host(mmc);
+       return ret;
+}
+
+static int meson_mmc_remove(struct platform_device *pdev)
+{
+       struct meson_host *host = dev_get_drvdata(&pdev->dev);
+
+       if (WARN_ON(!host))
+               return 0;
+
+       if (host->bounce_buf)
+               dma_free_coherent(host->dev, host->bounce_buf_size,
+                                 host->bounce_buf, host->bounce_dma_addr);
+
+       clk_disable_unprepare(host->cfg_div_clk);
+       clk_disable_unprepare(host->core_clk);
+
+       mmc_free_host(host->mmc);
+       return 0;
+}
+
+static const struct of_device_id meson_mmc_of_match[] = {
+       { .compatible = "amlogic,meson-gx-mmc", },
+       { .compatible = "amlogic,meson-gxbb-mmc", },
+       { .compatible = "amlogic,meson-gxl-mmc", },
+       { .compatible = "amlogic,meson-gxm-mmc", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
+
+static struct platform_driver meson_mmc_driver = {
+       .probe          = meson_mmc_probe,
+       .remove         = meson_mmc_remove,
+       .driver         = {
+               .name = DRIVER_NAME,
+               .of_match_table = of_match_ptr(meson_mmc_of_match),
+       },
+};
+
+module_platform_driver(meson_mmc_driver);
+
+MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
+MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
+MODULE_LICENSE("GPL v2");
index df990bb8c8736fafe2833e912672b5b6a170d4bc..01a804792f3007c1b8512b0cbdf4aea1a000cb82 100644 (file)
@@ -71,7 +71,12 @@ static unsigned int fmax = 515633;
  * @f_max: maximum clk frequency supported by the controller.
  * @signal_direction: input/out direction of bus signals can be indicated
  * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
- * @busy_detect: true if busy detection on dat0 is supported
+ * @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
+ * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
+ *                   indicating that the card is busy
+ * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
+ *                   getting busy end detection interrupts
  * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
  * @explicit_mclk_control: enable explicit mclk control in driver.
  * @qcom_fifo: enables qcom specific fifo pio read logic.
@@ -98,6 +103,9 @@ struct variant_data {
        bool                    signal_direction;
        bool                    pwrreg_clkgate;
        bool                    busy_detect;
+       u32                     busy_dpsm_flag;
+       u32                     busy_detect_flag;
+       u32                     busy_detect_mask;
        bool                    pwrreg_nopower;
        bool                    explicit_mclk_control;
        bool                    qcom_fifo;
@@ -137,7 +145,7 @@ static struct variant_data variant_u300 = {
        .clkreg_enable          = MCI_ST_U300_HWFCEN,
        .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
        .datalength_bits        = 16,
-       .datactrl_mask_sdio     = MCI_ST_DPSM_SDIOEN,
+       .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                        = true,
        .pwrreg_powerup         = MCI_PWR_ON,
        .f_max                  = 100000000,
@@ -152,7 +160,7 @@ static struct variant_data variant_nomadik = {
        .clkreg                 = MCI_CLK_ENABLE,
        .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
        .datalength_bits        = 24,
-       .datactrl_mask_sdio     = MCI_ST_DPSM_SDIOEN,
+       .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
        .pwrreg_powerup         = MCI_PWR_ON,
@@ -170,7 +178,7 @@ static struct variant_data variant_ux500 = {
        .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
        .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
        .datalength_bits        = 24,
-       .datactrl_mask_sdio     = MCI_ST_DPSM_SDIOEN,
+       .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
        .pwrreg_powerup         = MCI_PWR_ON,
@@ -178,6 +186,9 @@ static struct variant_data variant_ux500 = {
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
        .busy_detect            = true,
+       .busy_dpsm_flag         = MCI_DPSM_ST_BUSYMODE,
+       .busy_detect_flag       = MCI_ST_CARDBUSY,
+       .busy_detect_mask       = MCI_ST_BUSYENDMASK,
        .pwrreg_nopower         = true,
 };
 
@@ -188,9 +199,9 @@ static struct variant_data variant_ux500v2 = {
        .clkreg_enable          = MCI_ST_UX500_HWFCEN,
        .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
        .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
-       .datactrl_mask_ddrmode  = MCI_ST_DPSM_DDRMODE,
+       .datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
        .datalength_bits        = 24,
-       .datactrl_mask_sdio     = MCI_ST_DPSM_SDIOEN,
+       .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
        .blksz_datactrl16       = true,
@@ -199,6 +210,9 @@ static struct variant_data variant_ux500v2 = {
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
        .busy_detect            = true,
+       .busy_dpsm_flag         = MCI_DPSM_ST_BUSYMODE,
+       .busy_detect_flag       = MCI_ST_CARDBUSY,
+       .busy_detect_mask       = MCI_ST_BUSYENDMASK,
        .pwrreg_nopower         = true,
 };
 
@@ -210,7 +224,7 @@ static struct variant_data variant_qcom = {
                                  MCI_QCOM_CLK_SELECT_IN_FBCLK,
        .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
        .datactrl_mask_ddrmode  = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
-       .data_cmd_enable        = MCI_QCOM_CSPM_DATCMD,
+       .data_cmd_enable        = MCI_CPSM_QCOM_DATCMD,
        .blksz_datactrl4        = true,
        .datalength_bits        = 24,
        .pwrreg_powerup         = MCI_PWR_UP,
@@ -220,6 +234,7 @@ static struct variant_data variant_qcom = {
        .qcom_dml               = true,
 };
 
+/* Busy detection for the ST Micro variant */
 static int mmci_card_busy(struct mmc_host *mmc)
 {
        struct mmci_host *host = mmc_priv(mmc);
@@ -227,7 +242,7 @@ static int mmci_card_busy(struct mmc_host *mmc)
        int busy = 0;
 
        spin_lock_irqsave(&host->lock, flags);
-       if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
+       if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
                busy = 1;
        spin_unlock_irqrestore(&host->lock, flags);
 
@@ -294,8 +309,8 @@ static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
  */
 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
 {
-       /* Keep ST Micro busy mode if enabled */
-       datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
+       /* Keep busy mode in DPSM if enabled */
+       datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
 
        if (host->datactrl_reg != datactrl) {
                host->datactrl_reg = datactrl;
@@ -684,8 +699,7 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
        next->dma_chan = NULL;
 }
 
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
-                            bool is_first_req)
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct mmci_host *host = mmc_priv(mmc);
        struct mmc_data *data = mrq->data;
@@ -973,37 +987,66 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
             unsigned int status)
 {
        void __iomem *base = host->base;
-       bool sbc, busy_resp;
+       bool sbc;
 
        if (!cmd)
                return;
 
        sbc = (cmd == host->mrq->sbc);
-       busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
 
-       if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
-               MCI_CMDSENT|MCI_CMDRESPEND)))
+       /*
+        * We need to be one of these interrupts to be considered worth
+        * handling. Note that we tag on any latent IRQs postponed
+        * due to waiting for busy status.
+        */
+       if (!((status|host->busy_status) &
+             (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
                return;
 
-       /* Check if we need to wait for busy completion. */
-       if (host->busy_status && (status & MCI_ST_CARDBUSY))
-               return;
+       /*
+        * ST Micro variant: handle busy detection.
+        */
+       if (host->variant->busy_detect) {
+               bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
 
-       /* Enable busy completion if needed and supported. */
-       if (!host->busy_status && busy_resp &&
-               !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
-               (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
-               writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
-                       base + MMCIMASK0);
-               host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
-               return;
-       }
+               /* We are busy with a command, return */
+               if (host->busy_status &&
+                   (status & host->variant->busy_detect_flag))
+                       return;
+
+               /*
+                * We were not busy, but we now got a busy response on
+                * something that was not an error, and we double-check
+                * that the special busy status bit is still set before
+                * proceeding.
+                */
+               if (!host->busy_status && busy_resp &&
+                   !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
+                   (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+                       /* Unmask the busy IRQ */
+                       writel(readl(base + MMCIMASK0) |
+                              host->variant->busy_detect_mask,
+                              base + MMCIMASK0);
+                       /*
+                        * Now cache the last response status code (until
+                        * the busy bit goes low), and return.
+                        */
+                       host->busy_status =
+                               status & (MCI_CMDSENT|MCI_CMDRESPEND);
+                       return;
+               }
 
-       /* At busy completion, mask the IRQ and complete the request. */
-       if (host->busy_status) {
-               writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
-                       base + MMCIMASK0);
-               host->busy_status = 0;
+               /*
+                * At this point we are not busy with a command, we have
+                * not received a new busy request, mask the busy IRQ and
+                * fall through to process the IRQ.
+                */
+               if (host->busy_status) {
+                       writel(readl(base + MMCIMASK0) &
+                              ~host->variant->busy_detect_mask,
+                              base + MMCIMASK0);
+                       host->busy_status = 0;
+               }
        }
 
        host->cmd = NULL;
@@ -1257,9 +1300,11 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                        mmci_data_irq(host, host->data, status);
                }
 
-               /* Don't poll for busy completion in irq context. */
-               if (host->busy_status)
-                       status &= ~MCI_ST_CARDBUSY;
+               /*
+                * Don't poll for busy completion in irq context.
+                */
+               if (host->variant->busy_detect && host->busy_status)
+                       status &= ~host->variant->busy_detect_flag;
 
                ret = 1;
        } while (status);
@@ -1612,9 +1657,18 @@ static int mmci_probe(struct amba_device *dev,
        /* We support these capabilities. */
        mmc->caps |= MMC_CAP_CMD23;
 
+       /*
+        * Enable busy detection.
+        */
        if (variant->busy_detect) {
                mmci_ops.card_busy = mmci_card_busy;
-               mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+               /*
+                * Not all variants have a flag to enable busy detection
+                * in the DPSM, but if they do, set it here.
+                */
+               if (variant->busy_dpsm_flag)
+                       mmci_write_datactrlreg(host,
+                                              host->variant->busy_dpsm_flag);
                mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
                mmc->max_busy_timeout = 0;
        }
index a1f5e4f49e2a3367038268f5bfc7ed43961482e4..56322c6afba4c9a32e4f54c2386aa192ead02bdf 100644 (file)
 #define MCI_QCOM_CLK_SELECT_IN_DDR_MODE        (BIT(14) | BIT(15))
 
 #define MMCIARGUMENT           0x008
-#define MMCICOMMAND            0x00c
-#define MCI_CPSM_RESPONSE      (1 << 6)
-#define MCI_CPSM_LONGRSP       (1 << 7)
-#define MCI_CPSM_INTERRUPT     (1 << 8)
-#define MCI_CPSM_PENDING       (1 << 9)
-#define MCI_CPSM_ENABLE                (1 << 10)
-/* Argument flag extenstions in the ST Micro versions */
-#define MCI_ST_SDIO_SUSP       (1 << 11)
-#define MCI_ST_ENCMD_COMPL     (1 << 12)
-#define MCI_ST_NIEN            (1 << 13)
-#define MCI_ST_CE_ATACMD       (1 << 14)
 
-/* Modified on Qualcomm Integrations */
-#define MCI_QCOM_CSPM_DATCMD           BIT(12)
-#define MCI_QCOM_CSPM_MCIABORT         BIT(13)
-#define MCI_QCOM_CSPM_CCSENABLE                BIT(14)
-#define MCI_QCOM_CSPM_CCSDISABLE       BIT(15)
-#define MCI_QCOM_CSPM_AUTO_CMD19       BIT(16)
-#define MCI_QCOM_CSPM_AUTO_CMD21       BIT(21)
+/* The command register controls the Command Path State Machine (CPSM) */
+#define MMCICOMMAND            0x00c
+#define MCI_CPSM_RESPONSE      BIT(6)
+#define MCI_CPSM_LONGRSP       BIT(7)
+#define MCI_CPSM_INTERRUPT     BIT(8)
+#define MCI_CPSM_PENDING       BIT(9)
+#define MCI_CPSM_ENABLE                BIT(10)
+/* Command register flag extenstions in the ST Micro versions */
+#define MCI_CPSM_ST_SDIO_SUSP          BIT(11)
+#define MCI_CPSM_ST_ENCMD_COMPL                BIT(12)
+#define MCI_CPSM_ST_NIEN               BIT(13)
+#define MCI_CPSM_ST_CE_ATACMD          BIT(14)
+/* Command register flag extensions in the Qualcomm versions */
+#define MCI_CPSM_QCOM_PROGENA          BIT(11)
+#define MCI_CPSM_QCOM_DATCMD           BIT(12)
+#define MCI_CPSM_QCOM_MCIABORT         BIT(13)
+#define MCI_CPSM_QCOM_CCSENABLE                BIT(14)
+#define MCI_CPSM_QCOM_CCSDISABLE       BIT(15)
+#define MCI_CPSM_QCOM_AUTO_CMD19       BIT(16)
+#define MCI_CPSM_QCOM_AUTO_CMD21       BIT(21)
 
 #define MMCIRESPCMD            0x010
 #define MMCIRESPONSE0          0x014
 #define MMCIRESPONSE3          0x020
 #define MMCIDATATIMER          0x024
 #define MMCIDATALENGTH         0x028
+
+/* The data control register controls the Data Path State Machine (DPSM) */
 #define MMCIDATACTRL           0x02c
-#define MCI_DPSM_ENABLE                (1 << 0)
-#define MCI_DPSM_DIRECTION     (1 << 1)
-#define MCI_DPSM_MODE          (1 << 2)
-#define MCI_DPSM_DMAENABLE     (1 << 3)
-#define MCI_DPSM_BLOCKSIZE     (1 << 4)
+#define MCI_DPSM_ENABLE                BIT(0)
+#define MCI_DPSM_DIRECTION     BIT(1)
+#define MCI_DPSM_MODE          BIT(2)
+#define MCI_DPSM_DMAENABLE     BIT(3)
+#define MCI_DPSM_BLOCKSIZE     BIT(4)
 /* Control register extensions in the ST Micro U300 and Ux500 versions */
-#define MCI_ST_DPSM_RWSTART    (1 << 8)
-#define MCI_ST_DPSM_RWSTOP     (1 << 9)
-#define MCI_ST_DPSM_RWMOD      (1 << 10)
-#define MCI_ST_DPSM_SDIOEN     (1 << 11)
+#define MCI_DPSM_ST_RWSTART    BIT(8)
+#define MCI_DPSM_ST_RWSTOP     BIT(9)
+#define MCI_DPSM_ST_RWMOD      BIT(10)
+#define MCI_DPSM_ST_SDIOEN     BIT(11)
 /* Control register extensions in the ST Micro Ux500 versions */
-#define MCI_ST_DPSM_DMAREQCTL  (1 << 12)
-#define MCI_ST_DPSM_DBOOTMODEEN        (1 << 13)
-#define MCI_ST_DPSM_BUSYMODE   (1 << 14)
-#define MCI_ST_DPSM_DDRMODE    (1 << 15)
+#define MCI_DPSM_ST_DMAREQCTL  BIT(12)
+#define MCI_DPSM_ST_DBOOTMODEEN        BIT(13)
+#define MCI_DPSM_ST_BUSYMODE   BIT(14)
+#define MCI_DPSM_ST_DDRMODE    BIT(15)
+/* Control register extensions in the Qualcomm versions */
+#define MCI_DPSM_QCOM_DATA_PEND        BIT(17)
+#define MCI_DPSM_QCOM_RX_DATA_PEND BIT(20)
 
 #define MMCIDATACNT            0x030
 #define MMCISTATUS             0x034
 /* Extended status bits for the ST Micro variants */
 #define MCI_ST_SDIOITMASK      (1 << 22)
 #define MCI_ST_CEATAENDMASK    (1 << 23)
-#define MCI_ST_BUSYEND         (1 << 24)
+#define MCI_ST_BUSYENDMASK     (1 << 24)
 
 #define MMCIMASK1              0x040
 #define MMCIFIFOCNT            0x048
index 84e9afcb5c099bd73f080f3c8660fd7780bc3d02..10ef2ae1d2f6d80e1f563265263eb25cc7cd7ed4 100644 (file)
@@ -927,8 +927,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
                msdc_start_command(host, mrq, mrq->cmd);
 }
 
-static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
-               bool is_first_req)
+static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct msdc_host *host = mmc_priv(mmc);
        struct mmc_data *data = mrq->data;
@@ -1713,6 +1712,7 @@ static const struct of_device_id msdc_of_ids[] = {
        {   .compatible = "mediatek,mt8135-mmc", },
        {}
 };
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
 
 static struct platform_driver mt_msdc_driver = {
        .probe = msdc_drv_probe,
index 5f2f24a7360d87988981372e5ba7eea438d22a78..ad11c4cc12eddc65c04908597ccfa6f162cc4cd4 100644 (file)
@@ -1565,8 +1565,7 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
        }
 }
 
-static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
-                              bool is_first_req)
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct omap_hsmmc_host *host = mmc_priv(mmc);
 
index 3ccaa1415f33b2a1b323a13cdfedf7d92cb1f083..ecb99a8d2fa21fe036c8973abc4ed00d5956e440 100644 (file)
@@ -190,8 +190,7 @@ static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
        return using_cookie;
 }
 
-static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
-               bool is_first_req)
+static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct realtek_pci_sdmmc *host = mmc_priv(mmc);
        struct mmc_data *data = mrq->data;
index 6e9c0f8fddb1064c64c195812fdf8ad220728360..dc1abd14acbc76301746cec3182f453cd8980278 100644 (file)
@@ -1374,6 +1374,8 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
 
        mutex_init(&host->host_mutex);
        rtsx_usb_init_host(host);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
        pm_runtime_enable(&pdev->dev);
 
 #ifdef RTSX_USB_USE_LEDS_CLASS
@@ -1428,6 +1430,7 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
 
        mmc_free_host(mmc);
        pm_runtime_disable(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
        platform_set_drvdata(pdev, NULL);
 
        dev_dbg(&(pdev->dev),
index c531deef3258a8bccfa66e88cb351cb7ef17b08e..932a4b1fed33ffafb9a9c219e79976b6e23d8bfa 100644 (file)
@@ -28,7 +28,6 @@
 #include <mach/dma.h>
 #include <mach/gpio-samsung.h>
 
-#include <linux/platform_data/dma-s3c24xx.h>
 #include <linux/platform_data/mmc-s3cmci.h>
 
 #include "s3cmci.h"
@@ -1682,19 +1681,13 @@ static int s3cmci_probe(struct platform_device *pdev)
                gpio_direction_input(host->pdata->gpio_wprotect);
        }
 
-       /* depending on the dma state, get a dma channel to use. */
+       /* Depending on the dma state, get a DMA channel to use. */
 
        if (s3cmci_host_usedma(host)) {
-               dma_cap_mask_t mask;
-
-               dma_cap_zero(mask);
-               dma_cap_set(DMA_SLAVE, mask);
-
-               host->dma = dma_request_slave_channel_compat(mask,
-                       s3c24xx_dma_filter, (void *)DMACH_SDI, &pdev->dev, "rx-tx");
-               if (!host->dma) {
+               host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+               ret = PTR_ERR_OR_ZERO(host->dma);
+               if (ret) {
                        dev_err(&pdev->dev, "cannot get DMA channel.\n");
-                       ret = -EBUSY;
                        goto probe_free_gpio_wp;
                }
        }
index 81d4dc034793ddb72c8ddb1d29c7d1bbb65f1a13..160f695cc09c611cf4aabdeeff236d672c66b639 100644 (file)
@@ -328,6 +328,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
        { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
        { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
        { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+       { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio },
        { "80860F14" , "3" , &sdhci_acpi_slot_int_sd   },
        { "80860F16" , NULL, &sdhci_acpi_slot_int_sd   },
        { "INT33BB"  , "2" , &sdhci_acpi_slot_int_sdio },
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
new file mode 100644 (file)
index 0000000..1501cfd
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci-pltfm.h"
+
+/* HRS - Host Register Set (specific to Cadence) */
+#define SDHCI_CDNS_HRS04               0x10            /* PHY access port */
+#define   SDHCI_CDNS_HRS04_ACK                 BIT(26)
+#define   SDHCI_CDNS_HRS04_RD                  BIT(25)
+#define   SDHCI_CDNS_HRS04_WR                  BIT(24)
+#define   SDHCI_CDNS_HRS04_RDATA_SHIFT         12
+#define   SDHCI_CDNS_HRS04_WDATA_SHIFT         8
+#define   SDHCI_CDNS_HRS04_ADDR_SHIFT          0
+
+#define SDHCI_CDNS_HRS06               0x18            /* eMMC control */
+#define   SDHCI_CDNS_HRS06_TUNE_UP             BIT(15)
+#define   SDHCI_CDNS_HRS06_TUNE_SHIFT          8
+#define   SDHCI_CDNS_HRS06_TUNE_MASK           0x3f
+#define   SDHCI_CDNS_HRS06_MODE_MASK           0x7
+#define   SDHCI_CDNS_HRS06_MODE_SD             0x0
+#define   SDHCI_CDNS_HRS06_MODE_MMC_SDR                0x2
+#define   SDHCI_CDNS_HRS06_MODE_MMC_DDR                0x3
+#define   SDHCI_CDNS_HRS06_MODE_MMC_HS200      0x4
+#define   SDHCI_CDNS_HRS06_MODE_MMC_HS400      0x5
+
+/* SRS - Slot Register Set (SDHCI-compatible) */
+#define SDHCI_CDNS_SRS_BASE            0x200
+
+/* PHY */
+#define SDHCI_CDNS_PHY_DLY_SD_HS       0x00
+#define SDHCI_CDNS_PHY_DLY_SD_DEFAULT  0x01
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR12   0x02
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR25   0x03
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR50   0x04
+#define SDHCI_CDNS_PHY_DLY_UHS_DDR50   0x05
+#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06
+#define SDHCI_CDNS_PHY_DLY_EMMC_SDR    0x07
+#define SDHCI_CDNS_PHY_DLY_EMMC_DDR    0x08
+
+/*
+ * The tuned val register is 6 bit-wide, but not the whole of the range is
+ * available.  The range 0-42 seems to be available (then 43 wraps around to 0)
+ * but I am not quite sure if it is official.  Use only 0 to 39 for safety.
+ */
+#define SDHCI_CDNS_MAX_TUNING_LOOP     40
+
+struct sdhci_cdns_priv {
+       void __iomem *hrs_addr;
+};
+
+static void sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
+                                    u8 addr, u8 data)
+{
+       void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
+       u32 tmp;
+
+       tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
+             (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+       writel(tmp, reg);
+
+       tmp |= SDHCI_CDNS_HRS04_WR;
+       writel(tmp, reg);
+
+       tmp &= ~SDHCI_CDNS_HRS04_WR;
+       writel(tmp, reg);
+}
+
+static void sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
+{
+       sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_HS, 4);
+       sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_DEFAULT, 4);
+       sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_LEGACY, 9);
+       sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_SDR, 2);
+       sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_DDR, 3);
+}
+
+static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+       return sdhci_pltfm_priv(pltfm_host);
+}
+
+static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
+{
+       /*
+        * Cadence's spec says the Timeout Clock Frequency is the same as the
+        * Base Clock Frequency.  Divide it by 1000 to return a value in kHz.
+        */
+       return host->max_clk / 1000;
+}
+
+static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
+                                        unsigned int timing)
+{
+       struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+       u32 mode, tmp;
+
+       switch (timing) {
+       case MMC_TIMING_MMC_HS:
+               mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+               break;
+       case MMC_TIMING_MMC_DDR52:
+               mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+               break;
+       case MMC_TIMING_MMC_HS200:
+               mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+               break;
+       case MMC_TIMING_MMC_HS400:
+               mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+               break;
+       default:
+               mode = SDHCI_CDNS_HRS06_MODE_SD;
+               break;
+       }
+
+       /* The speed mode for eMMC is selected by HRS06 register */
+       tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
+       tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
+       tmp |= mode;
+       writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
+
+       /* For SD, fall back to the default handler */
+       if (mode == SDHCI_CDNS_HRS06_MODE_SD)
+               sdhci_set_uhs_signaling(host, timing);
+}
+
+static const struct sdhci_ops sdhci_cdns_ops = {
+       .set_clock = sdhci_set_clock,
+       .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+       .set_bus_width = sdhci_set_bus_width,
+       .reset = sdhci_reset,
+       .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
+       .ops = &sdhci_cdns_ops,
+};
+
+static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
+{
+       struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+       void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
+       u32 tmp;
+
+       if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+               return -EINVAL;
+
+       tmp = readl(reg);
+       tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
+       tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+       tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
+       writel(tmp, reg);
+
+       return readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
+                                 0, 1);
+}
+
+static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       int cur_streak = 0;
+       int max_streak = 0;
+       int end_of_streak = 0;
+       int i;
+
+       /*
+        * This handler only implements the eMMC tuning that is specific to
+        * this controller.  Fall back to the standard method for SD timing.
+        */
+       if (host->timing != MMC_TIMING_MMC_HS200)
+               return sdhci_execute_tuning(mmc, opcode);
+
+       if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
+               return -EINVAL;
+
+       for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
+               if (sdhci_cdns_set_tune_val(host, i) ||
+                   mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */
+                       cur_streak = 0;
+               } else { /* good */
+                       cur_streak++;
+                       if (cur_streak > max_streak) {
+                               max_streak = cur_streak;
+                               end_of_streak = i;
+                       }
+               }
+       }
+
+       if (!max_streak) {
+               dev_err(mmc_dev(host->mmc), "no tuning point found\n");
+               return -EIO;
+       }
+
+       return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+}
+
+static int sdhci_cdns_probe(struct platform_device *pdev)
+{
+       struct sdhci_host *host;
+       struct sdhci_pltfm_host *pltfm_host;
+       struct sdhci_cdns_priv *priv;
+       struct clk *clk;
+       int ret;
+
+       clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       ret = clk_prepare_enable(clk);
+       if (ret)
+               return ret;
+
+       host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, sizeof(*priv));
+       if (IS_ERR(host)) {
+               ret = PTR_ERR(host);
+               goto disable_clk;
+       }
+
+       pltfm_host = sdhci_priv(host);
+       pltfm_host->clk = clk;
+
+       priv = sdhci_cdns_priv(host);
+       priv->hrs_addr = host->ioaddr;
+       host->ioaddr += SDHCI_CDNS_SRS_BASE;
+       host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
+
+       ret = mmc_of_parse(host->mmc);
+       if (ret)
+               goto free;
+
+       sdhci_cdns_phy_init(priv);
+
+       ret = sdhci_add_host(host);
+       if (ret)
+               goto free;
+
+       return 0;
+free:
+       sdhci_pltfm_free(pdev);
+disable_clk:
+       clk_disable_unprepare(clk);
+
+       return ret;
+}
+
+static const struct of_device_id sdhci_cdns_match[] = {
+       { .compatible = "cdns,sd4hc" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
+
+static struct platform_driver sdhci_cdns_driver = {
+       .driver = {
+               .name = "sdhci-cdns",
+               .pm = &sdhci_pltfm_pmops,
+               .of_match_table = sdhci_cdns_match,
+       },
+       .probe = sdhci_cdns_probe,
+       .remove = sdhci_pltfm_unregister,
+};
+module_platform_driver(sdhci_cdns_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("Cadence SD/SDIO/eMMC Host Controller Driver");
+MODULE_LICENSE("GPL");
index 72624666585008866dfea479b74112a642dd9059..d7046d67415a429a16502ffc5bcc14c515720b3e 100644 (file)
@@ -143,6 +143,14 @@ static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg)
 }
 
 static const struct sdhci_ops sdhci_iproc_ops = {
+       .set_clock = sdhci_set_clock,
+       .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+       .set_bus_width = sdhci_set_bus_width,
+       .reset = sdhci_reset,
+       .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_ops sdhci_iproc_32only_ops = {
        .read_l = sdhci_iproc_readl,
        .read_w = sdhci_iproc_readw,
        .read_b = sdhci_iproc_readb,
@@ -156,6 +164,28 @@ static const struct sdhci_ops sdhci_iproc_ops = {
        .set_uhs_signaling = sdhci_set_uhs_signaling,
 };
 
+static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
+       .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+       .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+       .ops = &sdhci_iproc_32only_ops,
+};
+
+static const struct sdhci_iproc_data iproc_cygnus_data = {
+       .pdata = &sdhci_iproc_cygnus_pltfm_data,
+       .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+                       & SDHCI_MAX_BLOCK_MASK) |
+               SDHCI_CAN_VDD_330 |
+               SDHCI_CAN_VDD_180 |
+               SDHCI_CAN_DO_SUSPEND |
+               SDHCI_CAN_DO_HISPD |
+               SDHCI_CAN_DO_ADMA2 |
+               SDHCI_CAN_DO_SDMA,
+       .caps1 = SDHCI_DRIVER_TYPE_C |
+                SDHCI_DRIVER_TYPE_D |
+                SDHCI_SUPPORT_DDR50,
+       .mmc_caps = MMC_CAP_1_8V_DDR,
+};
+
 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
        .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
        .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
@@ -182,7 +212,7 @@ static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
        .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
                  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
                  SDHCI_QUIRK_MISSING_CAPS,
-       .ops = &sdhci_iproc_ops,
+       .ops = &sdhci_iproc_32only_ops,
 };
 
 static const struct sdhci_iproc_data bcm2835_data = {
@@ -194,7 +224,8 @@ static const struct sdhci_iproc_data bcm2835_data = {
 
 static const struct of_device_id sdhci_iproc_of_match[] = {
        { .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
-       { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
+       { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
+       { .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
        { }
 };
 MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
index 90ed2e12d345d4ee91f6088aecac306897e2c266..32879b845b7548ef299add09d84d24e445296f2c 100644 (file)
@@ -18,7 +18,9 @@
 #include <linux/of_device.h>
 #include <linux/delay.h>
 #include <linux/mmc/mmc.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/iopoll.h>
 
 #include "sdhci-pltfm.h"
 
@@ -31,6 +33,7 @@
 #define HC_MODE_EN             0x1
 #define CORE_POWER             0x0
 #define CORE_SW_RST            BIT(7)
+#define FF_CLK_SW_RST_DIS      BIT(13)
 
 #define CORE_PWRCTL_STATUS     0xdc
 #define CORE_PWRCTL_MASK       0xe0
@@ -49,6 +52,7 @@
 #define INT_MASK               0xf
 #define MAX_PHASES             16
 #define CORE_DLL_LOCK          BIT(7)
+#define CORE_DDR_DLL_LOCK      BIT(11)
 #define CORE_DLL_EN            BIT(16)
 #define CORE_CDR_EN            BIT(17)
 #define CORE_CK_OUT_EN         BIT(18)
 #define CORE_DLL_PDN           BIT(29)
 #define CORE_DLL_RST           BIT(30)
 #define CORE_DLL_CONFIG                0x100
+#define CORE_CMD_DAT_TRACK_SEL BIT(0)
 #define CORE_DLL_STATUS                0x108
 
+#define CORE_DLL_CONFIG_2      0x1b4
+#define CORE_DDR_CAL_EN                BIT(0)
+#define CORE_FLL_CYCLE_CNT     BIT(18)
+#define CORE_DLL_CLOCK_DISABLE BIT(21)
+
 #define CORE_VENDOR_SPEC       0x10c
 #define CORE_CLK_PWRSAVE       BIT(1)
+#define CORE_HC_MCLK_SEL_DFLT  (2 << 8)
+#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
+#define CORE_HC_MCLK_SEL_MASK  (3 << 8)
+#define CORE_HC_SELECT_IN_EN   BIT(18)
+#define CORE_HC_SELECT_IN_HS400        (6 << 19)
+#define CORE_HC_SELECT_IN_MASK (7 << 19)
+
+#define CORE_CSR_CDC_CTLR_CFG0         0x130
+#define CORE_SW_TRIG_FULL_CALIB                BIT(16)
+#define CORE_HW_AUTOCAL_ENA            BIT(17)
+
+#define CORE_CSR_CDC_CTLR_CFG1         0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0    0x138
+#define CORE_TIMER_ENA                 BIT(16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1    0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG      0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG    0x144
+#define CORE_CDC_OFFSET_CFG            0x14C
+#define CORE_CSR_CDC_DELAY_CFG         0x150
+#define CORE_CDC_SLAVE_DDA_CFG         0x160
+#define CORE_CSR_CDC_STATUS0           0x164
+#define CORE_CALIBRATION_DONE          BIT(0)
+
+#define CORE_CDC_ERROR_CODE_MASK       0x7000000
+
+#define CORE_CSR_CDC_GEN_CFG           0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF     BIT(0)
+#define CORE_CDC_SWITCH_RC_EN          BIT(1)
+
+#define CORE_DDR_200_CFG               0x184
+#define CORE_CDC_T4_DLY_SEL            BIT(0)
+#define CORE_START_CDC_TRAFFIC         BIT(6)
+#define CORE_VENDOR_SPEC3      0x1b0
+#define CORE_PWRSAVE_DLL       BIT(3)
+
+#define CORE_DDR_CONFIG                0x1b8
+#define DDR_CONFIG_POR_VAL     0x80040853
 
 #define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
 
+#define INVALID_TUNING_PHASE   -1
+#define SDHCI_MSM_MIN_CLOCK    400000
+#define CORE_FREQ_100MHZ       (100 * 1000 * 1000)
+
 #define CDR_SELEXT_SHIFT       20
 #define CDR_SELEXT_MASK                (0xf << CDR_SELEXT_SHIFT)
 #define CMUX_SHIFT_PHASE_SHIFT 24
 #define CMUX_SHIFT_PHASE_MASK  (7 << CMUX_SHIFT_PHASE_SHIFT)
 
+#define MSM_MMC_AUTOSUSPEND_DELAY_MS   50
 struct sdhci_msm_host {
        struct platform_device *pdev;
        void __iomem *core_mem; /* MSM SDCC mapped address */
@@ -75,7 +128,14 @@ struct sdhci_msm_host {
        struct clk *clk;        /* main SD/MMC bus clock */
        struct clk *pclk;       /* SDHC peripheral bus clock */
        struct clk *bus_clk;    /* SDHC bus voter clock */
+       struct clk *xo_clk;     /* TCXO clk needed for FLL feature of cm_dll*/
+       unsigned long clk_rate;
        struct mmc_host *mmc;
+       bool use_14lpp_dll_reset;
+       bool tuning_done;
+       bool calibration_done;
+       u8 saved_tuning_phase;
+       bool use_cdclp533;
 };
 
 /* Platform specific tuning */
@@ -115,6 +175,9 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
        u32 config;
        struct mmc_host *mmc = host->mmc;
 
+       if (phase > 0xf)
+               return -EINVAL;
+
        spin_lock_irqsave(&host->lock, flags);
 
        config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
@@ -136,9 +199,9 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
        config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
        writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
 
-       /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_CK_OUT_EN;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
 
        /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
        rc = msm_dll_poll_ck_out_en(host, 1);
@@ -163,8 +226,8 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
  * Find out the greatest range of consecuitive selected
  * DLL clock output phases that can be used as sampling
  * setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
  * Select the 3/4 of the range and configure the DLL with the
  * selected DLL clock output phase.
  */
@@ -303,8 +366,11 @@ static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
 static int msm_init_cm_dll(struct sdhci_host *host)
 {
        struct mmc_host *mmc = host->mmc;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        int wait_cnt = 50;
        unsigned long flags;
+       u32 config;
 
        spin_lock_irqsave(&host->lock, flags);
 
@@ -313,33 +379,73 @@ static int msm_init_cm_dll(struct sdhci_host *host)
         * tuning is in progress. Keeping PWRSAVE ON may
         * turn off the clock.
         */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
-                       & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+       config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+       config &= ~CORE_CLK_PWRSAVE;
+       writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+       if (msm_host->use_14lpp_dll_reset) {
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+               config &= ~CORE_CK_OUT_EN;
+               writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+               config |= CORE_DLL_CLOCK_DISABLE;
+               writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+       }
 
-       /* Write 1 to DLL_RST bit of DLL_CONFIG register */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_DLL_RST;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
 
-       /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_DLL_PDN;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
        msm_cm_dll_set_freq(host);
 
-       /* Write 0 to DLL_RST bit of DLL_CONFIG register */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+       if (msm_host->use_14lpp_dll_reset &&
+           !IS_ERR_OR_NULL(msm_host->xo_clk)) {
+               u32 mclk_freq = 0;
+
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+               config &= CORE_FLL_CYCLE_CNT;
+               if (config)
+                       mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
+                                       clk_get_rate(msm_host->xo_clk));
+               else
+                       mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
+                                       clk_get_rate(msm_host->xo_clk));
+
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+               config &= ~(0xFF << 10);
+               config |= mclk_freq << 10;
 
-       /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+               writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+               /* wait for 5us before enabling DLL clock */
+               udelay(5);
+       }
+
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config &= ~CORE_DLL_RST;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config &= ~CORE_DLL_PDN;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+       if (msm_host->use_14lpp_dll_reset) {
+               msm_cm_dll_set_freq(host);
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+               config &= ~CORE_DLL_CLOCK_DISABLE;
+               writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+       }
 
-       /* Set DLL_EN bit to 1. */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_DLL_EN;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
 
-       /* Set CK_OUT_EN bit to 1. */
-       writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
-                       | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_CK_OUT_EN;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
 
        /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
        while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
@@ -358,6 +464,200 @@ static int msm_init_cm_dll(struct sdhci_host *host)
        return 0;
 }
 
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       u32 config, calib_done;
+       int ret;
+
+       pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+       /*
+        * Retuning in HS400 (DDR mode) will fail, just reset the
+        * tuning block and restore the saved tuning phase.
+        */
+       ret = msm_init_cm_dll(host);
+       if (ret)
+               goto out;
+
+       /* Set the selected phase in delay line hw block */
+       ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+       if (ret)
+               goto out;
+
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_CMD_DAT_TRACK_SEL;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+       config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+       config &= ~CORE_CDC_T4_DLY_SEL;
+       writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+       config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+       config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
+       writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+       config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+       config |= CORE_CDC_SWITCH_RC_EN;
+       writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+       config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+       config &= ~CORE_START_CDC_TRAFFIC;
+       writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+       /*
+        * Perform CDC Register Initialization Sequence
+        *
+        * CORE_CSR_CDC_CTLR_CFG0       0x11800EC
+        * CORE_CSR_CDC_CTLR_CFG1       0x3011111
+        * CORE_CSR_CDC_CAL_TIMER_CFG0  0x1201000
+        * CORE_CSR_CDC_CAL_TIMER_CFG1  0x4
+        * CORE_CSR_CDC_REFCOUNT_CFG    0xCB732020
+        * CORE_CSR_CDC_COARSE_CAL_CFG  0xB19
+        * CORE_CSR_CDC_DELAY_CFG       0x3AC
+        * CORE_CDC_OFFSET_CFG          0x0
+        * CORE_CDC_SLAVE_DDA_CFG       0x16334
+        */
+
+       writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+       writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+       writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+       writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+       writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+       writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+       writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+       writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+       writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+       /* CDC HW Calibration */
+
+       config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+       config |= CORE_SW_TRIG_FULL_CALIB;
+       writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+       config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+       config &= ~CORE_SW_TRIG_FULL_CALIB;
+       writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+       config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+       config |= CORE_HW_AUTOCAL_ENA;
+       writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+       config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+       config |= CORE_TIMER_ENA;
+       writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+       ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+                                        calib_done,
+                                        (calib_done & CORE_CALIBRATION_DONE),
+                                        1, 50);
+
+       if (ret == -ETIMEDOUT) {
+               pr_err("%s: %s: CDC calibration was not completed\n",
+                      mmc_hostname(host->mmc), __func__);
+               goto out;
+       }
+
+       ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+                       & CORE_CDC_ERROR_CODE_MASK;
+       if (ret) {
+               pr_err("%s: %s: CDC error code %d\n",
+                      mmc_hostname(host->mmc), __func__, ret);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+       config |= CORE_START_CDC_TRAFFIC;
+       writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+out:
+       pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+                __func__, ret);
+       return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+       u32 dll_status, config;
+       int ret;
+
+       pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+       /*
+        * Currently the CORE_DDR_CONFIG register defaults to desired
+        * configuration on reset. Currently reprogramming the power on
+        * reset (POR) value in case it might have been modified by
+        * bootloaders. In the future, if this changes, then the desired
+        * values will need to be programmed appropriately.
+        */
+       writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
+
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+       config |= CORE_DDR_CAL_EN;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+
+       ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
+                                        dll_status,
+                                        (dll_status & CORE_DDR_DLL_LOCK),
+                                        10, 1000);
+
+       if (ret == -ETIMEDOUT) {
+               pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
+                      mmc_hostname(host->mmc), __func__);
+               goto out;
+       }
+
+       config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
+       config |= CORE_PWRSAVE_DLL;
+       writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
+
+       /*
+        * Drain writebuffer to ensure above DLL calibration
+        * and PWRSAVE DLL is enabled.
+        */
+       wmb();
+out:
+       pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+                __func__, ret);
+       return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       int ret;
+       u32 config;
+
+       pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+       /*
+        * Retuning in HS400 (DDR mode) will fail, just reset the
+        * tuning block and restore the saved tuning phase.
+        */
+       ret = msm_init_cm_dll(host);
+       if (ret)
+               goto out;
+
+       /* Set the selected phase in delay line hw block */
+       ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+       if (ret)
+               goto out;
+
+       config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+       config |= CORE_CMD_DAT_TRACK_SEL;
+       writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+       if (msm_host->use_cdclp533)
+               ret = sdhci_msm_cdclp533_calibration(host);
+       else
+               ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+       pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+                __func__, ret);
+       return ret;
+}
+
 static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
 {
        int tuning_seq_cnt = 3;
@@ -365,14 +665,17 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
        int rc;
        struct mmc_host *mmc = host->mmc;
        struct mmc_ios ios = host->mmc->ios;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
 
        /*
         * Tuning is required for SDR104, HS200 and HS400 cards and
         * if clock frequency is greater than 100MHz in these modes.
         */
-       if (host->clock <= 100 * 1000 * 1000 ||
-           !((ios.timing == MMC_TIMING_MMC_HS200) ||
-             (ios.timing == MMC_TIMING_UHS_SDR104)))
+       if (host->clock <= CORE_FREQ_100MHZ ||
+           !(ios.timing == MMC_TIMING_MMC_HS400 ||
+           ios.timing == MMC_TIMING_MMC_HS200 ||
+           ios.timing == MMC_TIMING_UHS_SDR104))
                return 0;
 
 retry:
@@ -388,6 +691,7 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
                if (rc)
                        return rc;
 
+               msm_host->saved_tuning_phase = phase;
                rc = mmc_send_tuning(mmc, opcode, NULL);
                if (!rc) {
                        /* Tuning is successful at this tuning point */
@@ -423,6 +727,8 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
                rc = -EIO;
        }
 
+       if (!rc)
+               msm_host->tuning_done = true;
        return rc;
 }
 
@@ -430,7 +736,10 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
                                        unsigned int uhs)
 {
        struct mmc_host *mmc = host->mmc;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        u16 ctrl_2;
+       u32 config;
 
        ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
        /* Select Bus Speed Mode for host */
@@ -445,6 +754,7 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
        case MMC_TIMING_UHS_SDR50:
                ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
                break;
+       case MMC_TIMING_MMC_HS400:
        case MMC_TIMING_MMC_HS200:
        case MMC_TIMING_UHS_SDR104:
                ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
@@ -461,15 +771,42 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
         * provide feedback clock, the mode selection can be any value less
         * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
         */
-       if (host->clock <= 100000000 &&
-           (uhs == MMC_TIMING_MMC_HS400 ||
-            uhs == MMC_TIMING_MMC_HS200 ||
-            uhs == MMC_TIMING_UHS_SDR104))
-               ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+       if (host->clock <= CORE_FREQ_100MHZ) {
+               if (uhs == MMC_TIMING_MMC_HS400 ||
+                   uhs == MMC_TIMING_MMC_HS200 ||
+                   uhs == MMC_TIMING_UHS_SDR104)
+                       ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+               /*
+                * DLL is not required for clock <= 100MHz
+                * Thus, make sure DLL it is disabled when not required
+                */
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+               config |= CORE_DLL_RST;
+               writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+               config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+               config |= CORE_DLL_PDN;
+               writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+               /*
+                * The DLL needs to be restored and CDCLP533 recalibrated
+                * when the clock frequency is set back to 400MHz.
+                */
+               msm_host->calibration_done = false;
+       }
 
        dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
                mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
        sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+       spin_unlock_irq(&host->lock);
+       /* CDCLP533 HW calibration is only required for HS400 mode*/
+       if (host->clock > CORE_FREQ_100MHZ &&
+           msm_host->tuning_done && !msm_host->calibration_done &&
+           mmc->ios.timing == MMC_TIMING_MMC_HS400)
+               if (!sdhci_msm_hs400_dll_calibration(host))
+                       msm_host->calibration_done = true;
+       spin_lock_irq(&host->lock);
 }
 
 static void sdhci_msm_voltage_switch(struct sdhci_host *host)
@@ -505,6 +842,183 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+       return clk_round_rate(msm_host->clk, ULONG_MAX);
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+       return SDHCI_MSM_MIN_CLOCK;
+}
+
+/**
+ * __sdhci_msm_set_clock - sdhci_msm clock control.
+ *
+ * Description:
+ * MSM controller does not use internal divider and
+ * instead directly control the GCC clock as per
+ * HW recommendation.
+ **/
+void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+       u16 clk;
+       /*
+        * Keep actual_clock as zero -
+        * - since there is no divider used so no need of having actual_clock.
+        * - MSM controller uses SDCLK for data timeout calculation. If
+        *   actual_clock is zero, host->clock is taken for calculation.
+        */
+       host->mmc->actual_clock = 0;
+
+       sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+       if (clock == 0)
+               return;
+
+       /*
+        * MSM controller do not use clock divider.
+        * Thus read SDHCI_CLOCK_CONTROL and only enable
+        * clock with no divider value programmed.
+        */
+       clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+       sdhci_enable_clk(host, clk);
+}
+
+/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       struct mmc_ios curr_ios = host->mmc->ios;
+       u32 config, dll_lock;
+       int rc;
+
+       if (!clock) {
+               msm_host->clk_rate = clock;
+               goto out;
+       }
+
+       spin_unlock_irq(&host->lock);
+       /*
+        * The SDHC requires internal clock frequency to be double the
+        * actual clock that will be set for DDR mode. The controller
+        * uses the faster clock(100/400MHz) for some of its parts and
+        * send the actual required clock (50/200MHz) to the card.
+        */
+       if (curr_ios.timing == MMC_TIMING_UHS_DDR50 ||
+           curr_ios.timing == MMC_TIMING_MMC_DDR52 ||
+           curr_ios.timing == MMC_TIMING_MMC_HS400)
+               clock *= 2;
+       /*
+        * In general all timing modes are controlled via UHS mode select in
+        * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+        * their respective modes defined here, hence we use these values.
+        *
+        * HS200 - SDR104 (Since they both are equivalent in functionality)
+        * HS400 - This involves multiple configurations
+        *              Initially SDR104 - when tuning is required as HS200
+        *              Then when switching to DDR @ 400MHz (HS400) we use
+        *              the vendor specific HC_SELECT_IN to control the mode.
+        *
+        * In addition to controlling the modes we also need to select the
+        * correct input clock for DLL depending on the mode.
+        *
+        * HS400 - divided clock (free running MCLK/2)
+        * All other modes - default (free running MCLK)
+        */
+       if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+               /* Select the divided clock (free running MCLK/2) */
+               config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+               config &= ~CORE_HC_MCLK_SEL_MASK;
+               config |= CORE_HC_MCLK_SEL_HS400;
+
+               writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+               /*
+                * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+                * register
+                */
+               if (msm_host->tuning_done && !msm_host->calibration_done) {
+                       /*
+                        * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+                        * field in VENDOR_SPEC_FUNC
+                        */
+                       config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+                       config |= CORE_HC_SELECT_IN_HS400;
+                       config |= CORE_HC_SELECT_IN_EN;
+                       writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+               }
+               if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
+                       /*
+                        * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
+                        * CORE_DLL_STATUS to be set.  This should get set
+                        * within 15 us at 200 MHz.
+                        */
+                       rc = readl_relaxed_poll_timeout(host->ioaddr +
+                                                       CORE_DLL_STATUS,
+                                                       dll_lock,
+                                                       (dll_lock &
+                                                       (CORE_DLL_LOCK |
+                                                       CORE_DDR_DLL_LOCK)), 10,
+                                                       1000);
+                       if (rc == -ETIMEDOUT)
+                               pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+                                      mmc_hostname(host->mmc), dll_lock);
+               }
+       } else {
+               if (!msm_host->use_cdclp533) {
+                       config = readl_relaxed(host->ioaddr +
+                                       CORE_VENDOR_SPEC3);
+                       config &= ~CORE_PWRSAVE_DLL;
+                       writel_relaxed(config, host->ioaddr +
+                                       CORE_VENDOR_SPEC3);
+               }
+
+               config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+               config &= ~CORE_HC_MCLK_SEL_MASK;
+               config |= CORE_HC_MCLK_SEL_DFLT;
+               writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+               /*
+                * Disable HC_SELECT_IN to be able to use the UHS mode select
+                * configuration from Host Control2 register for all other
+                * modes.
+                * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+                * in VENDOR_SPEC_FUNC
+                */
+               config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+               config &= ~CORE_HC_SELECT_IN_EN;
+               config &= ~CORE_HC_SELECT_IN_MASK;
+               writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+       }
+
+       /*
+        * Make sure above writes impacting free running MCLK are completed
+        * before changing the clk_rate at GCC.
+        */
+       wmb();
+
+       rc = clk_set_rate(msm_host->clk, clock);
+       if (rc) {
+               pr_err("%s: Failed to set clock at rate %u at timing %d\n",
+                      mmc_hostname(host->mmc), clock,
+                      curr_ios.timing);
+               goto out_lock;
+       }
+       msm_host->clk_rate = clock;
+       pr_debug("%s: Setting clock at rate %lu at timing %d\n",
+                mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+                curr_ios.timing);
+
+out_lock:
+       spin_lock_irq(&host->lock);
+out:
+       __sdhci_msm_set_clock(host, clock);
+}
+
 static const struct of_device_id sdhci_msm_dt_match[] = {
        { .compatible = "qcom,sdhci-msm-v4" },
        {},
@@ -515,7 +1029,9 @@ MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
 static const struct sdhci_ops sdhci_msm_ops = {
        .platform_execute_tuning = sdhci_msm_execute_tuning,
        .reset = sdhci_reset,
-       .set_clock = sdhci_set_clock,
+       .set_clock = sdhci_msm_set_clock,
+       .get_min_clock = sdhci_msm_get_min_clock,
+       .get_max_clock = sdhci_msm_get_max_clock,
        .set_bus_width = sdhci_set_bus_width,
        .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
        .voltage_switch = sdhci_msm_voltage_switch,
@@ -524,7 +1040,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
        .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
                  SDHCI_QUIRK_NO_CARD_NO_RESET |
-                 SDHCI_QUIRK_SINGLE_POWER_WRITE,
+                 SDHCI_QUIRK_SINGLE_POWER_WRITE |
+                 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+       .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
        .ops = &sdhci_msm_ops,
 };
 
@@ -536,7 +1054,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        struct resource *core_memres;
        int ret;
        u16 host_version, core_minor;
-       u32 core_version, caps;
+       u32 core_version, config;
        u8 core_major;
 
        host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
@@ -554,6 +1072,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 
        sdhci_get_of_property(pdev);
 
+       msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
        /* Setup SDCC bus voter clock. */
        msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
        if (!IS_ERR(msm_host->bus_clk)) {
@@ -586,6 +1106,16 @@ static int sdhci_msm_probe(struct platform_device *pdev)
                goto pclk_disable;
        }
 
+       /*
+        * xo clock is needed for FLL feature of cm_dll.
+        * In case if xo clock is not mentioned in DT, warn and proceed.
+        */
+       msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
+       if (IS_ERR(msm_host->xo_clk)) {
+               ret = PTR_ERR(msm_host->xo_clk);
+               dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
+       }
+
        /* Vote for maximum clock rate for maximum performance */
        ret = clk_set_rate(msm_host->clk, INT_MAX);
        if (ret)
@@ -604,9 +1134,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
-       /* Reset the core and Enable SDHC mode */
-       writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
-                      CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+       config = readl_relaxed(msm_host->core_mem + CORE_POWER);
+       config |= CORE_SW_RST;
+       writel_relaxed(config, msm_host->core_mem + CORE_POWER);
 
        /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
        usleep_range(1000, 5000);
@@ -619,6 +1149,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        /* Set HC_MODE_EN bit in HC_MODE register */
        writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
 
+       config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
+       config |= FF_CLK_SW_RST_DIS;
+       writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
+
        host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
        dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
                host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
@@ -631,14 +1165,24 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
                core_version, core_major, core_minor);
 
+       if (core_major == 1 && core_minor >= 0x42)
+               msm_host->use_14lpp_dll_reset = true;
+
+       /*
+        * SDCC 5 controller with major version 1, minor version 0x34 and later
+        * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+        */
+       if (core_major == 1 && core_minor < 0x34)
+               msm_host->use_cdclp533 = true;
+
        /*
         * Support for some capabilities is not advertised by newer
         * controller versions and must be explicitly enabled.
         */
        if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
-               caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
-               caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
-               writel_relaxed(caps, host->ioaddr +
+               config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+               config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
+               writel_relaxed(config, host->ioaddr +
                               CORE_VENDOR_SPEC_CAPABILITIES0);
        }
 
@@ -659,12 +1203,26 @@ static int sdhci_msm_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        MSM_MMC_AUTOSUSPEND_DELAY_MS);
+       pm_runtime_use_autosuspend(&pdev->dev);
+
        ret = sdhci_add_host(host);
        if (ret)
-               goto clk_disable;
+               goto pm_runtime_disable;
+
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
 
        return 0;
 
+pm_runtime_disable:
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
 clk_disable:
        clk_disable_unprepare(msm_host->clk);
 pclk_disable:
@@ -686,6 +1244,11 @@ static int sdhci_msm_remove(struct platform_device *pdev)
                    0xffffffff);
 
        sdhci_remove_host(host, dead);
+
+       pm_runtime_get_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+
        clk_disable_unprepare(msm_host->clk);
        clk_disable_unprepare(msm_host->pclk);
        if (!IS_ERR(msm_host->bus_clk))
@@ -694,12 +1257,57 @@ static int sdhci_msm_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+       struct sdhci_host *host = dev_get_drvdata(dev);
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+       clk_disable_unprepare(msm_host->clk);
+       clk_disable_unprepare(msm_host->pclk);
+
+       return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+       struct sdhci_host *host = dev_get_drvdata(dev);
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       int ret;
+
+       ret = clk_prepare_enable(msm_host->clk);
+       if (ret) {
+               dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
+               return ret;
+       }
+       ret = clk_prepare_enable(msm_host->pclk);
+       if (ret) {
+               dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
+               clk_disable_unprepare(msm_host->clk);
+               return ret;
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_msm_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
+                          sdhci_msm_runtime_resume,
+                          NULL)
+};
+
 static struct platform_driver sdhci_msm_driver = {
        .probe = sdhci_msm_probe,
        .remove = sdhci_msm_remove,
        .driver = {
                   .name = "sdhci_msm",
                   .of_match_table = sdhci_msm_dt_match,
+                  .pm = &sdhci_msm_pm_ops,
        },
 };
 
index a9b7fc06c434326a05c77dfc2e4f6cce32317d30..2f9ad213377a2ccb9091693ba749266fa4b420c4 100644 (file)
@@ -100,6 +100,7 @@ static const struct of_device_id sdhci_at91_dt_match[] = {
        { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
        {}
 };
+MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
 
 #ifdef CONFIG_PM
 static int sdhci_at91_runtime_suspend(struct device *dev)
index 1bb11e4a9fe53f7e01eb81bb87482f6bd96a56f1..9a6eb4492172fafd81cc9a2e6162f7d8b331b0c4 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/of.h>
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/sys_soc.h>
 #include <linux/mmc/host.h>
 #include "sdhci-pltfm.h"
 #include "sdhci-esdhc.h"
@@ -28,6 +29,7 @@
 struct sdhci_esdhc {
        u8 vendor_ver;
        u8 spec_ver;
+       bool quirk_incorrect_hostver;
 };
 
 /**
@@ -87,6 +89,8 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
 static u16 esdhc_readw_fixup(struct sdhci_host *host,
                                     int spec_reg, u32 value)
 {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
        u16 ret;
        int shift = (spec_reg & 0x2) * 8;
 
@@ -94,6 +98,12 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
                ret = value & 0xffff;
        else
                ret = (value >> shift) & 0xffff;
+       /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
+        * vendor version and spec version information.
+        */
+       if ((spec_reg == SDHCI_HOST_VERSION) &&
+           (esdhc->quirk_incorrect_hostver))
+               ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
        return ret;
 }
 
@@ -572,6 +582,12 @@ static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
        .ops = &sdhci_esdhc_le_ops,
 };
 
+static struct soc_device_attribute soc_incorrect_hostver[] = {
+       { .family = "QorIQ T4240", .revision = "1.0", },
+       { .family = "QorIQ T4240", .revision = "2.0", },
+       { },
+};
+
 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host;
@@ -585,6 +601,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
        esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
                             SDHCI_VENDOR_VER_SHIFT;
        esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
+       if (soc_device_match(soc_incorrect_hostver))
+               esdhc->quirk_incorrect_hostver = true;
+       else
+               esdhc->quirk_incorrect_hostver = false;
 }
 
 static int sdhci_esdhc_probe(struct platform_device *pdev)
index 1d9e00a00e9fc986eb0bb01ff55a0966f03f7d5c..1a72d32af07f333504f7d85ff287fcd5dbf62c7b 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/mmc/slot-gpio.h>
 #include <linux/mmc/sdhci-pci-data.h>
+#include <linux/acpi.h>
 
 #include "sdhci.h"
 #include "sdhci-pci.h"
@@ -375,6 +376,44 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
        return 0;
 }
 
+#ifdef CONFIG_ACPI
+static int ni_set_max_freq(struct sdhci_pci_slot *slot)
+{
+       acpi_status status;
+       unsigned long long max_freq;
+
+       status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
+                                      "MXFQ", NULL, &max_freq);
+       if (ACPI_FAILURE(status)) {
+               dev_err(&slot->chip->pdev->dev,
+                       "MXFQ not found in acpi table\n");
+               return -EINVAL;
+       }
+
+       slot->host->mmc->f_max = max_freq * 1000000;
+
+       return 0;
+}
+#else
+static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
+{
+       return 0;
+}
+#endif
+
+static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+       int err;
+
+       err = ni_set_max_freq(slot);
+       if (err)
+               return err;
+
+       slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+                                MMC_CAP_WAIT_WHILE_BUSY;
+       return 0;
+}
+
 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
        slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
@@ -390,7 +429,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
        slot->cd_override_level = true;
        if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
            slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
-           slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) {
+           slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+           slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) {
                slot->host->mmc_host_ops.get_cd = bxt_get_cd;
                slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
        }
@@ -447,6 +487,15 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
        .ops            = &sdhci_intel_byt_ops,
 };
 
+static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
+       .quirks         = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+       .quirks2        = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
+                         SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+       .allow_runtime_pm = true,
+       .probe_slot     = ni_byt_sdio_probe_slot,
+       .ops            = &sdhci_intel_byt_ops,
+};
+
 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
        .quirks         = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
        .quirks2        = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
@@ -1076,6 +1125,14 @@ static const struct pci_device_id pci_ids[] = {
                .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
        },
 
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+               .subvendor      = PCI_VENDOR_ID_NI,
+               .subdevice      = 0x7884,
+               .driver_data    = (kernel_ulong_t)&sdhci_ni_byt_sdio,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = PCI_DEVICE_ID_INTEL_BYT_SDIO,
@@ -1276,6 +1333,30 @@ static const struct pci_device_id pci_ids[] = {
                .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
        },
 
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_GLK_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_GLK_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_GLK_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
@@ -1735,11 +1816,16 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
        host->mmc->slotno = slotno;
        host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
 
-       if (slot->cd_idx >= 0 &&
-           mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
-                                slot->cd_override_level, 0, NULL)) {
-               dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
-               slot->cd_idx = -1;
+       if (slot->cd_idx >= 0) {
+               ret = mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
+                                          slot->cd_override_level, 0, NULL);
+               if (ret == -EPROBE_DEFER)
+                       goto remove;
+
+               if (ret) {
+                       dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
+                       slot->cd_idx = -1;
+               }
        }
 
        ret = sdhci_add_host(host);
index 6bccf56bc5fff654d203ead074a6a53ae0409623..4abdaed72bd481b9195a42d0ead707c02227b7fa 100644 (file)
@@ -34,6 +34,9 @@
 #define PCI_DEVICE_ID_INTEL_APL_SD     0x5aca
 #define PCI_DEVICE_ID_INTEL_APL_EMMC   0x5acc
 #define PCI_DEVICE_ID_INTEL_APL_SDIO   0x5ad0
+#define PCI_DEVICE_ID_INTEL_GLK_SD     0x31ca
+#define PCI_DEVICE_ID_INTEL_GLK_EMMC   0x31cc
+#define PCI_DEVICE_ID_INTEL_GLK_SDIO   0x31d0
 
 /*
  * PCI registers
index 3280f207795939138ff5ec0d2689184e0f48f172..957839d0fe376611443ed29d34c4d0c56ec5981a 100644 (file)
@@ -106,7 +106,7 @@ extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
 
 static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
 {
-       return (void *)host->private;
+       return host->private;
 }
 
 extern const struct dev_pm_ops sdhci_pltfm_pmops;
index 784c5a848fb4fb9e7f4581cfce5e8aac6e04aa83..de219ca7ea7c398d73e250f0d281b4e1fdc0a2db 100644 (file)
@@ -121,7 +121,9 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
         * speed possible with selected clock source and skip the division.
         */
        if (ourhost->no_divider) {
+               spin_unlock_irq(&ourhost->host->lock);
                rate = clk_round_rate(clksrc, wanted);
+               spin_lock_irq(&ourhost->host->lock);
                return wanted - rate;
        }
 
index 42ef3ebb1d8cf9d57f30e48d21c3a5250aea16ea..111991e5b9a0e7ecf587eaf402a5ecd0d2906f55 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
 
 #include <linux/leds.h>
 
@@ -1343,20 +1344,10 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
 }
 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
 
-void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
 {
-       u16 clk;
        unsigned long timeout;
 
-       host->mmc->actual_clock = 0;
-
-       sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-
-       if (clock == 0)
-               return;
-
-       clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
-
        clk |= SDHCI_CLOCK_INT_EN;
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
@@ -1377,6 +1368,22 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
        clk |= SDHCI_CLOCK_CARD_EN;
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
+EXPORT_SYMBOL_GPL(sdhci_enable_clk);
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+       u16 clk;
+
+       host->mmc->actual_clock = 0;
+
+       sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+       if (clock == 0)
+               return;
+
+       clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+       sdhci_enable_clk(host, clk);
+}
 EXPORT_SYMBOL_GPL(sdhci_set_clock);
 
 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
@@ -1623,7 +1630,14 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 
        if ((ios->timing == MMC_TIMING_SD_HS ||
-            ios->timing == MMC_TIMING_MMC_HS)
+            ios->timing == MMC_TIMING_MMC_HS ||
+            ios->timing == MMC_TIMING_MMC_HS400 ||
+            ios->timing == MMC_TIMING_MMC_HS200 ||
+            ios->timing == MMC_TIMING_MMC_DDR52 ||
+            ios->timing == MMC_TIMING_UHS_SDR50 ||
+            ios->timing == MMC_TIMING_UHS_SDR104 ||
+            ios->timing == MMC_TIMING_UHS_DDR50 ||
+            ios->timing == MMC_TIMING_UHS_SDR25)
            && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
                ctrl |= SDHCI_CTRL_HISPD;
        else
@@ -1632,16 +1646,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        if (host->version >= SDHCI_SPEC_300) {
                u16 clk, ctrl_2;
 
-               /* In case of UHS-I modes, set High Speed Enable */
-               if ((ios->timing == MMC_TIMING_MMC_HS400) ||
-                   (ios->timing == MMC_TIMING_MMC_HS200) ||
-                   (ios->timing == MMC_TIMING_MMC_DDR52) ||
-                   (ios->timing == MMC_TIMING_UHS_SDR50) ||
-                   (ios->timing == MMC_TIMING_UHS_SDR104) ||
-                   (ios->timing == MMC_TIMING_UHS_DDR50) ||
-                   (ios->timing == MMC_TIMING_UHS_SDR25))
-                       ctrl |= SDHCI_CTRL_HISPD;
-
                if (!host->preset_enabled) {
                        sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
                        /*
@@ -1948,11 +1952,157 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
        return 0;
 }
 
-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+static void sdhci_start_tuning(struct sdhci_host *host)
 {
-       struct sdhci_host *host = mmc_priv(mmc);
        u16 ctrl;
-       int tuning_loop_counter = MAX_TUNING_LOOP;
+
+       ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+       ctrl |= SDHCI_CTRL_EXEC_TUNING;
+       if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+               ctrl |= SDHCI_CTRL_TUNED_CLK;
+       sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+       /*
+        * As per the Host Controller spec v3.00, tuning command
+        * generates Buffer Read Ready interrupt, so enable that.
+        *
+        * Note: The spec clearly says that when tuning sequence
+        * is being performed, the controller does not generate
+        * interrupts other than Buffer Read Ready interrupt. But
+        * to make sure we don't hit a controller bug, we _only_
+        * enable Buffer Read Ready interrupt here.
+        */
+       sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+       sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_end_tuning(struct sdhci_host *host)
+{
+       sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_reset_tuning(struct sdhci_host *host)
+{
+       u16 ctrl;
+
+       ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+       ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+       ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+       sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode,
+                              unsigned long flags)
+{
+       sdhci_reset_tuning(host);
+
+       sdhci_do_reset(host, SDHCI_RESET_CMD);
+       sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+       sdhci_end_tuning(host);
+
+       spin_unlock_irqrestore(&host->lock, flags);
+       mmc_abort_tuning(host->mmc, opcode);
+       spin_lock_irqsave(&host->lock, flags);
+}
+
+/*
+ * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
+ * tuning command does not have a data payload (or rather the hardware does it
+ * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
+ * interrupt setup is different to other commands and there is no timeout
+ * interrupt so special handling is needed.
+ */
+static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode,
+                             unsigned long flags)
+{
+       struct mmc_host *mmc = host->mmc;
+       struct mmc_command cmd = {0};
+       struct mmc_request mrq = {NULL};
+
+       cmd.opcode = opcode;
+       cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+       cmd.mrq = &mrq;
+
+       mrq.cmd = &cmd;
+       /*
+        * In response to CMD19, the card sends 64 bytes of tuning
+        * block to the Host Controller. So we set the block size
+        * to 64 here.
+        */
+       if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
+           mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+               sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
+       else
+               sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+
+       /*
+        * The tuning block is sent by the card to the host controller.
+        * So we set the TRNS_READ bit in the Transfer Mode register.
+        * This also takes care of setting DMA Enable and Multi Block
+        * Select in the same register to 0.
+        */
+       sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+       sdhci_send_command(host, &cmd);
+
+       host->cmd = NULL;
+
+       sdhci_del_timer(host, &mrq);
+
+       host->tuning_done = 0;
+
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       /* Wait for Buffer Read Ready interrupt */
+       wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
+                          msecs_to_jiffies(50));
+
+       spin_lock_irqsave(&host->lock, flags);
+}
+
+static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode,
+                                  unsigned long flags)
+{
+       int i;
+
+       /*
+        * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
+        * of loops reaches 40 times.
+        */
+       for (i = 0; i < MAX_TUNING_LOOP; i++) {
+               u16 ctrl;
+
+               sdhci_send_tuning(host, opcode, flags);
+
+               if (!host->tuning_done) {
+                       pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+                               mmc_hostname(host->mmc));
+                       sdhci_abort_tuning(host, opcode, flags);
+                       return;
+               }
+
+               ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+               if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
+                       if (ctrl & SDHCI_CTRL_TUNED_CLK)
+                               return; /* Success! */
+                       break;
+               }
+
+               /* eMMC spec does not require a delay between tuning cycles */
+               if (opcode == MMC_SEND_TUNING_BLOCK)
+                       mdelay(1);
+       }
+
+       pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
+               mmc_hostname(host->mmc));
+       sdhci_reset_tuning(host);
+}
+
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
        int err = 0;
        unsigned long flags;
        unsigned int tuning_count = 0;
@@ -2003,144 +2153,22 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        if (host->ops->platform_execute_tuning) {
                spin_unlock_irqrestore(&host->lock, flags);
-               err = host->ops->platform_execute_tuning(host, opcode);
-               return err;
+               return host->ops->platform_execute_tuning(host, opcode);
        }
 
-       ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-       ctrl |= SDHCI_CTRL_EXEC_TUNING;
-       if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
-               ctrl |= SDHCI_CTRL_TUNED_CLK;
-       sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
-       /*
-        * As per the Host Controller spec v3.00, tuning command
-        * generates Buffer Read Ready interrupt, so enable that.
-        *
-        * Note: The spec clearly says that when tuning sequence
-        * is being performed, the controller does not generate
-        * interrupts other than Buffer Read Ready interrupt. But
-        * to make sure we don't hit a controller bug, we _only_
-        * enable Buffer Read Ready interrupt here.
-        */
-       sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
-       sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
-
-       /*
-        * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
-        * of loops reaches 40 times.
-        */
-       do {
-               struct mmc_command cmd = {0};
-               struct mmc_request mrq = {NULL};
-
-               cmd.opcode = opcode;
-               cmd.arg = 0;
-               cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
-               cmd.retries = 0;
-               cmd.data = NULL;
-               cmd.mrq = &mrq;
-               cmd.error = 0;
-
-               if (tuning_loop_counter-- == 0)
-                       break;
-
-               mrq.cmd = &cmd;
-
-               /*
-                * In response to CMD19, the card sends 64 bytes of tuning
-                * block to the Host Controller. So we set the block size
-                * to 64 here.
-                */
-               if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
-                       if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
-                               sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
-                                            SDHCI_BLOCK_SIZE);
-                       else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
-                               sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
-                                            SDHCI_BLOCK_SIZE);
-               } else {
-                       sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
-                                    SDHCI_BLOCK_SIZE);
-               }
-
-               /*
-                * The tuning block is sent by the card to the host controller.
-                * So we set the TRNS_READ bit in the Transfer Mode register.
-                * This also takes care of setting DMA Enable and Multi Block
-                * Select in the same register to 0.
-                */
-               sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
-
-               sdhci_send_command(host, &cmd);
-
-               host->cmd = NULL;
-               sdhci_del_timer(host, &mrq);
-
-               spin_unlock_irqrestore(&host->lock, flags);
-               /* Wait for Buffer Read Ready interrupt */
-               wait_event_timeout(host->buf_ready_int,
-                                       (host->tuning_done == 1),
-                                       msecs_to_jiffies(50));
-               spin_lock_irqsave(&host->lock, flags);
-
-               if (!host->tuning_done) {
-                       pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
-
-                       sdhci_do_reset(host, SDHCI_RESET_CMD);
-                       sdhci_do_reset(host, SDHCI_RESET_DATA);
-
-                       ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-                       ctrl &= ~SDHCI_CTRL_TUNED_CLK;
-                       ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
-                       sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
-                       err = -EIO;
-                       goto out;
-               }
+       host->mmc->retune_period = tuning_count;
 
-               host->tuning_done = 0;
+       sdhci_start_tuning(host);
 
-               ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+       __sdhci_execute_tuning(host, opcode, flags);
 
-               /* eMMC spec does not require a delay between tuning cycles */
-               if (opcode == MMC_SEND_TUNING_BLOCK)
-                       mdelay(1);
-       } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
-
-       /*
-        * The Host Driver has exhausted the maximum number of loops allowed,
-        * so use fixed sampling frequency.
-        */
-       if (tuning_loop_counter < 0) {
-               ctrl &= ~SDHCI_CTRL_TUNED_CLK;
-               sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-       }
-       if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
-               pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
-               err = -EIO;
-       }
-
-out:
-       if (tuning_count) {
-               /*
-                * In case tuning fails, host controllers which support
-                * re-tuning can try tuning again at a later time, when the
-                * re-tuning timer expires.  So for these controllers, we
-                * return 0. Since there might be other controllers who do not
-                * have this capability, we return error for them.
-                */
-               err = 0;
-       }
-
-       host->mmc->retune_period = err ? 0 : tuning_count;
-
-       sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
-       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+       sdhci_end_tuning(host);
 out_unlock:
        spin_unlock_irqrestore(&host->lock, flags);
+
        return err;
 }
+EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
 
 static int sdhci_select_drive_strength(struct mmc_card *card,
                                       unsigned int max_dtr, int host_drv,
@@ -2198,8 +2226,7 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
        data->host_cookie = COOKIE_UNMAPPED;
 }
 
-static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
-                              bool is_first_req)
+static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct sdhci_host *host = mmc_priv(mmc);
 
@@ -3010,6 +3037,8 @@ static int sdhci_set_dma_mask(struct sdhci_host *host)
 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
 {
        u16 v;
+       u64 dt_caps_mask = 0;
+       u64 dt_caps = 0;
 
        if (host->read_caps)
                return;
@@ -3024,18 +3053,35 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
 
        sdhci_do_reset(host, SDHCI_RESET_ALL);
 
+       of_property_read_u64(mmc_dev(host->mmc)->of_node,
+                            "sdhci-caps-mask", &dt_caps_mask);
+       of_property_read_u64(mmc_dev(host->mmc)->of_node,
+                            "sdhci-caps", &dt_caps);
+
        v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
        host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
 
        if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
                return;
 
-       host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);
+       if (caps) {
+               host->caps = *caps;
+       } else {
+               host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+               host->caps &= ~lower_32_bits(dt_caps_mask);
+               host->caps |= lower_32_bits(dt_caps);
+       }
 
        if (host->version < SDHCI_SPEC_300)
                return;
 
-       host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
+       if (caps1) {
+               host->caps1 = *caps1;
+       } else {
+               host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+               host->caps1 &= ~upper_32_bits(dt_caps_mask);
+               host->caps1 |= upper_32_bits(dt_caps);
+       }
 }
 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
 
index 2570455b219a469c1669ff4d3f9935ca89e37c6f..0b66f210ae82c5d64f8301a81a6e938a39048b7f 100644 (file)
@@ -656,7 +656,7 @@ extern void sdhci_free_host(struct sdhci_host *host);
 
 static inline void *sdhci_priv(struct sdhci_host *host)
 {
-       return (void *)host->private;
+       return host->private;
 }
 
 extern void sdhci_card_detect(struct sdhci_host *host);
@@ -682,6 +682,7 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
                   unsigned int *actual_clock);
 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_enable_clk(struct sdhci_host *host, u16 clk);
 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                     unsigned short vdd);
 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
@@ -689,6 +690,7 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
 void sdhci_set_bus_width(struct sdhci_host *host, int width);
 void sdhci_reset(struct sdhci_host *host, u8 mask);
 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
 
 #ifdef CONFIG_PM
 extern int sdhci_suspend_host(struct sdhci_host *host);
index 49edff7fee49bc2b48f924d6e17469a140d416e8..d46c2d00c18244b1c082497a42b6a97ca019ae60 100644 (file)
 
 #define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
 
+struct sh_mobile_sdhi_scc {
+       unsigned long clk_rate; /* clock rate for SDR104 */
+       u32 tap;                /* sampling clock position for SDR104 */
+};
+
 struct sh_mobile_sdhi_of_data {
        unsigned long tmio_flags;
+       u32           tmio_ocr_mask;
        unsigned long capabilities;
        unsigned long capabilities2;
        enum dma_slave_buswidth dma_buswidth;
        dma_addr_t dma_rx_offset;
        unsigned bus_shift;
+       int scc_offset;
+       struct sh_mobile_sdhi_scc *taps;
+       int taps_num;
 };
 
 static const struct sh_mobile_sdhi_of_data of_default_cfg = {
        .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
 };
 
+static const struct sh_mobile_sdhi_of_data of_rz_compatible = {
+       .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
+       .tmio_ocr_mask  = MMC_VDD_32_33,
+       .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
 static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
                          TMIO_MMC_CLK_ACTUAL,
        .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
 };
 
+/* Definitions for sampling clocks */
+static struct sh_mobile_sdhi_scc rcar_gen2_scc_taps[] = {
+       {
+               .clk_rate = 156000000,
+               .tap = 0x00000703,
+       },
+       {
+               .clk_rate = 0,
+               .tap = 0x00000300,
+       },
+};
+
 static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
                          TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
        .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
        .dma_buswidth   = DMA_SLAVE_BUSWIDTH_4_BYTES,
        .dma_rx_offset  = 0x2000,
+       .scc_offset     = 0x0300,
+       .taps           = rcar_gen2_scc_taps,
+       .taps_num       = ARRAY_SIZE(rcar_gen2_scc_taps),
+};
+
+/* Definitions for sampling clocks */
+static struct sh_mobile_sdhi_scc rcar_gen3_scc_taps[] = {
+       {
+               .clk_rate = 0,
+               .tap = 0x00000300,
+       },
 };
 
 static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
@@ -79,6 +117,9 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
                          TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
        .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
        .bus_shift      = 2,
+       .scc_offset     = 0x1000,
+       .taps           = rcar_gen3_scc_taps,
+       .taps_num       = ARRAY_SIZE(rcar_gen3_scc_taps),
 };
 
 static const struct of_device_id sh_mobile_sdhi_of_match[] = {
@@ -86,6 +127,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
        { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
        { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
        { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
+       { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
        { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
        { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
        { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
@@ -105,6 +147,7 @@ struct sh_mobile_sdhi {
        struct tmio_mmc_dma dma_priv;
        struct pinctrl *pinctrl;
        struct pinctrl_state *pins_default, *pins_uhs;
+       void __iomem *scc_ctl;
 };
 
 static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
@@ -255,6 +298,201 @@ static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
        return pinctrl_select_state(priv->pinctrl, pin_state);
 }
 
+/* SCC registers */
+#define SH_MOBILE_SDHI_SCC_DTCNTL      0x000
+#define SH_MOBILE_SDHI_SCC_TAPSET      0x002
+#define SH_MOBILE_SDHI_SCC_DT2FF       0x004
+#define SH_MOBILE_SDHI_SCC_CKSEL       0x006
+#define SH_MOBILE_SDHI_SCC_RVSCNTL     0x008
+#define SH_MOBILE_SDHI_SCC_RVSREQ      0x00A
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN                BIT(0)
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK  0xff
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_CKSEL register */
+#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL         BIT(0)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSCNTL register */
+#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN       BIT(0)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */
+#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR       BIT(2)
+
+static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
+                               struct sh_mobile_sdhi *priv, int addr)
+{
+       return readl(priv->scc_ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_scc_write32(struct tmio_mmc_host *host,
+                                 struct sh_mobile_sdhi *priv,
+                                 int addr, u32 val)
+{
+       writel(val, priv->scc_ctl + (addr << host->bus_shift));
+}
+
+static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
+{
+       struct sh_mobile_sdhi *priv;
+
+       if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
+               return 0;
+
+       priv = host_to_priv(host);
+
+       /* set sampling clock selection range */
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+                      0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+
+       /* Initialize SCC */
+       sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, 0x0);
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+                      SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL));
+
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+                       sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+                      SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+                       sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+                      ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+
+       /* Read TAPNUM */
+       return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
+               SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT) &
+               SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
+}
+
+static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
+                                        unsigned long tap)
+{
+       struct sh_mobile_sdhi *priv = host_to_priv(host);
+
+       /* Set sampling clock position */
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
+}
+
+#define SH_MOBILE_SDHI_MAX_TAP 3
+
+static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
+{
+       struct sh_mobile_sdhi *priv = host_to_priv(host);
+       unsigned long tap_cnt;  /* counter of tuning success */
+       unsigned long tap_set;  /* tap position */
+       unsigned long tap_start;/* start position of tuning success */
+       unsigned long tap_end;  /* end position of tuning success */
+       unsigned long ntap;     /* temporary counter of tuning success */
+       unsigned long i;
+
+       /* Clear SCC_RVSREQ */
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+
+       /*
+        * Find the longest consecutive run of successful probes.  If that
+        * is more than SH_MOBILE_SDHI_MAX_TAP probes long then use the
+        * center index as the tap.
+        */
+       tap_cnt = 0;
+       ntap = 0;
+       tap_start = 0;
+       tap_end = 0;
+       for (i = 0; i < host->tap_num * 2; i++) {
+               if (test_bit(i, host->taps))
+                       ntap++;
+               else {
+                       if (ntap > tap_cnt) {
+                               tap_start = i - ntap;
+                               tap_end = i - 1;
+                               tap_cnt = ntap;
+                       }
+                       ntap = 0;
+               }
+       }
+
+       if (ntap > tap_cnt) {
+               tap_start = i - ntap;
+               tap_end = i - 1;
+               tap_cnt = ntap;
+       }
+
+       if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
+               tap_set = (tap_start + tap_end) / 2 % host->tap_num;
+       else
+               return -EIO;
+
+       /* Set SCC */
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap_set);
+
+       /* Enable auto re-tuning */
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+                      SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN |
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+       return 0;
+}
+
+
+static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
+{
+       struct sh_mobile_sdhi *priv;
+
+       if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
+               return 0;
+
+       priv = host_to_priv(host);
+
+       /* Check SCC error */
+       if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
+           SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &&
+           sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
+           SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
+               /* Clear SCC error */
+               sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+               return true;
+       }
+
+       return false;
+}
+
+static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
+{
+       struct sh_mobile_sdhi *priv;
+
+       if (!(host->mmc->caps & MMC_CAP_UHS_SDR104))
+               return;
+
+       priv = host_to_priv(host);
+
+       /* Reset SCC */
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+                       sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+                      ~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL &
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+                       sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+                      ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+       sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+                      ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+                      sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+}
+
 static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
 {
        int timeout = 1000;
@@ -325,7 +563,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
        struct tmio_mmc_data *mmd = pdev->dev.platform_data;
        struct tmio_mmc_host *host;
        struct resource *res;
-       int irq, ret, i = 0;
+       int irq, ret, i;
        struct tmio_mmc_dma *dma_priv;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -364,6 +602,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
                const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
 
                mmc_data->flags |= of_data->tmio_flags;
+               mmc_data->ocr_mask = of_data->tmio_ocr_mask;
                mmc_data->capabilities |= of_data->capabilities;
                mmc_data->capabilities2 |= of_data->capabilities2;
                mmc_data->dma_rx_offset = of_data->dma_rx_offset;
@@ -384,6 +623,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
                host->card_busy = sh_mobile_sdhi_card_busy;
                host->start_signal_voltage_switch =
                        sh_mobile_sdhi_start_signal_voltage_switch;
+               host->init_tuning       = sh_mobile_sdhi_init_tuning;
+               host->prepare_tuning    = sh_mobile_sdhi_prepare_tuning;
+               host->select_tuning     = sh_mobile_sdhi_select_tuning;
+               host->check_scc_error   = sh_mobile_sdhi_check_scc_error;
+               host->hw_reset          = sh_mobile_sdhi_hw_reset;
        }
 
        /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -424,6 +668,34 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
        if (ret < 0)
                goto efree;
 
+       if (host->mmc->caps & MMC_CAP_UHS_SDR104) {
+               host->mmc->caps |= MMC_CAP_HW_RESET;
+
+               if (of_id && of_id->data) {
+                       const struct sh_mobile_sdhi_of_data *of_data;
+                       const struct sh_mobile_sdhi_scc *taps;
+                       bool hit = false;
+
+                       of_data = of_id->data;
+                       taps = of_data->taps;
+
+                       for (i = 0; i < of_data->taps_num; i++) {
+                               if (taps[i].clk_rate == 0 ||
+                                   taps[i].clk_rate == host->mmc->f_max) {
+                                       host->scc_tappos = taps->tap;
+                                       hit = true;
+                                       break;
+                               }
+                       }
+
+                       if (!hit)
+                               dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+
+                       priv->scc_ctl = host->ctl + of_data->scc_offset;
+               }
+       }
+
+       i = 0;
        while (1) {
                irq = platform_get_irq(pdev, i);
                if (irq < 0)
index c0a5c676d0e82bd662c673c00acd385914d501e3..b1d1303389a71eb17f6def0e9941b1687061f589 100644 (file)
@@ -822,10 +822,13 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                break;
 
        case MMC_POWER_UP:
-               host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
-                                                    ios->vdd);
-               if (host->ferror)
-                       return;
+               if (!IS_ERR(mmc->supply.vmmc)) {
+                       host->ferror = mmc_regulator_set_ocr(mmc,
+                                                            mmc->supply.vmmc,
+                                                            ios->vdd);
+                       if (host->ferror)
+                               return;
+               }
 
                if (!IS_ERR(mmc->supply.vqmmc)) {
                        host->ferror = regulator_enable(mmc->supply.vqmmc);
@@ -847,7 +850,9 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        case MMC_POWER_OFF:
                dev_dbg(mmc_dev(mmc), "power off!\n");
                sunxi_mmc_reset_host(host);
-               mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+               if (!IS_ERR(mmc->supply.vmmc))
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
                if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled)
                        regulator_disable(mmc->supply.vqmmc);
                host->vqmmc_enabled = false;
index 8e126afd988cca5637f1900e0c72541ace250e3b..9e20bcf3aa8d2da5fcb263e81a15845326592b36 100644 (file)
@@ -153,9 +153,12 @@ struct tmio_mmc_host {
        struct mutex            ios_lock;       /* protect set_ios() context */
        bool                    native_hotplug;
        bool                    sdio_irq_enabled;
+       u32                     scc_tappos;
 
-       int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+       /* Mandatory callback */
        int (*clk_enable)(struct tmio_mmc_host *host);
+
+       /* Optional callbacks */
        unsigned int (*clk_update)(struct tmio_mmc_host *host,
                                   unsigned int new_clock);
        void (*clk_disable)(struct tmio_mmc_host *host);
@@ -164,6 +167,21 @@ struct tmio_mmc_host {
        int (*card_busy)(struct mmc_host *mmc);
        int (*start_signal_voltage_switch)(struct mmc_host *mmc,
                                           struct mmc_ios *ios);
+       int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+       void (*hw_reset)(struct tmio_mmc_host *host);
+       void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
+       bool (*check_scc_error)(struct tmio_mmc_host *host);
+
+       /*
+        * Mandatory callback for tuning to occur which is optional for SDR50
+        * and mandatory for SDR104.
+        */
+       unsigned int (*init_tuning)(struct tmio_mmc_host *host);
+       int (*select_tuning)(struct tmio_mmc_host *host);
+
+       /* Tuning values: 1 for success, 0 for failure */
+       DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
+       unsigned int tap_num;
 };
 
 struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
@@ -245,6 +263,12 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int ad
               readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
 }
 
+static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
+               u32 *buf, int count)
+{
+       readsl(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
 static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
 {
        /* If there is a hook and it returns non-zero then there
@@ -267,4 +291,10 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int
        writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
 }
 
+static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
+               const u32 *buf, int count)
+{
+       writesl(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
 #endif
index 700567603107a094381efac7ab02fd2b29b02d58..2064fa1a5bf11f9a5ca994b94087e389703bb550 100644 (file)
@@ -22,7 +22,6 @@
  * TODO:
  *   Investigate using a workqueue for PIO transfers
  *   Eliminate FIXMEs
- *   SDIO support
  *   Better Power management
  *   Handle MMC errors better
  *   double buffer support
@@ -36,6 +35,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/mfd/tmio.h>
+#include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/slot-gpio.h>
@@ -298,6 +298,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
        if (mrq->cmd->error || (mrq->data && mrq->data->error))
                tmio_mmc_abort_dma(host);
 
+       if (host->check_scc_error)
+               host->check_scc_error(host);
+
        mmc_request_done(host->mmc, mrq);
 }
 
@@ -393,6 +396,36 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
        /*
         * Transfer the data
         */
+       if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
+               u8 data[4] = { };
+
+               if (is_read)
+                       sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+                                          count >> 2);
+               else
+                       sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+                                           count >> 2);
+
+               /* if count was multiple of 4 */
+               if (!(count & 0x3))
+                       return;
+
+               buf8 = (u8 *)(buf + (count >> 2));
+               count %= 4;
+
+               if (is_read) {
+                       sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
+                                          (u32 *)data, 1);
+                       memcpy(buf8, data, count);
+               } else {
+                       memcpy(data, buf8, count);
+                       sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
+                                           (u32 *)data, 1);
+               }
+
+               return;
+       }
+
        if (is_read)
                sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
        else
@@ -522,7 +555,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
        schedule_work(&host->done);
 }
 
-static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
 {
        struct mmc_data *data;
        spin_lock(&host->lock);
@@ -531,6 +564,9 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
        if (!data)
                goto out;
 
+       if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
+           stat & TMIO_STAT_TXUNDERRUN)
+               data->error = -EILSEQ;
        if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
                u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
                bool done = false;
@@ -579,8 +615,6 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
                goto out;
        }
 
-       host->cmd = NULL;
-
        /* This controller is sicker than the PXA one. Not only do we need to
         * drop the top 8 bits of the first response word, we also need to
         * modify the order of the response for short response command types.
@@ -600,14 +634,16 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
 
        if (stat & TMIO_STAT_CMDTIMEOUT)
                cmd->error = -ETIMEDOUT;
-       else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
+       else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
+                stat & TMIO_STAT_STOPBIT_ERR ||
+                stat & TMIO_STAT_CMD_IDX_ERR)
                cmd->error = -EILSEQ;
 
        /* If there is data to handle we enable data IRQs here, and
         * we will ultimatley finish the request in the data_end handler.
         * If theres no data or we encountered an error, finish now.
         */
-       if (host->data && !cmd->error) {
+       if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
                if (host->data->flags & MMC_DATA_READ) {
                        if (host->force_pio || !host->chan_rx)
                                tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
@@ -668,7 +704,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
        /* Data transfer completion */
        if (ireg & TMIO_STAT_DATAEND) {
                tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
-               tmio_mmc_data_irq(host);
+               tmio_mmc_data_irq(host, status);
                return true;
        }
 
@@ -687,7 +723,7 @@ static void tmio_mmc_sdio_irq(int irq, void *devid)
                return;
 
        status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
-       ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
+       ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
 
        sdio_status = status & ~TMIO_SDIO_MASK_ALL;
        if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
@@ -756,6 +792,63 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
        return 0;
 }
 
+static void tmio_mmc_hw_reset(struct mmc_host *mmc)
+{
+       struct tmio_mmc_host *host = mmc_priv(mmc);
+
+       if (host->hw_reset)
+               host->hw_reset(host);
+}
+
+static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct tmio_mmc_host *host = mmc_priv(mmc);
+       int i, ret = 0;
+
+       if (!host->tap_num) {
+               if (!host->init_tuning || !host->select_tuning)
+                       /* Tuning is not supported */
+                       goto out;
+
+               host->tap_num = host->init_tuning(host);
+               if (!host->tap_num)
+                       /* Tuning is not supported */
+                       goto out;
+       }
+
+       if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
+               dev_warn_once(&host->pdev->dev,
+                     "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
+               goto out;
+       }
+
+       bitmap_zero(host->taps, host->tap_num * 2);
+
+       /* Issue CMD19 twice for each tap */
+       for (i = 0; i < 2 * host->tap_num; i++) {
+               if (host->prepare_tuning)
+                       host->prepare_tuning(host, i % host->tap_num);
+
+               ret = mmc_send_tuning(mmc, opcode, NULL);
+               if (ret && ret != -EILSEQ)
+                       goto out;
+               if (ret == 0)
+                       set_bit(i, host->taps);
+
+               mdelay(1);
+       }
+
+       ret = host->select_tuning(host);
+
+out:
+       if (ret < 0) {
+               dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
+               tmio_mmc_hw_reset(mmc);
+       }
+
+       return ret;
+}
+
 /* Process requests from the MMC layer */
 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
@@ -972,6 +1065,8 @@ static struct mmc_host_ops tmio_mmc_ops = {
        .get_cd         = mmc_gpio_get_cd,
        .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
        .multi_io_quirk = tmio_multi_io_quirk,
+       .hw_reset       = tmio_mmc_hw_reset,
+       .execute_tuning = tmio_mmc_execute_tuning,
 };
 
 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
@@ -1218,6 +1313,11 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
 }
 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
 
+static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
+{
+       return host->tap_num && mmc_can_retune(host->mmc);
+}
+
 int tmio_mmc_host_runtime_resume(struct device *dev)
 {
        struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1231,6 +1331,9 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
 
        tmio_mmc_enable_dma(host, true);
 
+       if (tmio_mmc_can_retune(host) && host->select_tuning(host))
+               dev_warn(&host->pdev->dev, "Tuning selection failed\n");
+
        return 0;
 }
 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
index c3fd16d997caba4d903d6e8d3ea35acd0f350075..80a3b11f3217143bee5e15fdb24f2f1c346a1105 100644 (file)
@@ -1395,23 +1395,25 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
         */
        host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
                WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(mmc_dev(host->mmc), host->dma_addr))
+               goto kfree;
 
        /*
         * ISA DMA must be aligned on a 64k basis.
         */
        if ((host->dma_addr & 0xffff) != 0)
-               goto kfree;
+               goto unmap;
        /*
         * ISA cannot access memory above 16 MB.
         */
        else if (host->dma_addr >= 0x1000000)
-               goto kfree;
+               goto unmap;
 
        host->dma = dma;
 
        return;
 
-kfree:
+unmap:
        /*
         * If we've gotten here then there is some kind of alignment bug
         */
@@ -1421,6 +1423,7 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
                WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
        host->dma_addr = 0;
 
+kfree:
        kfree(host->dma_buffer);
        host->dma_buffer = NULL;
 
@@ -1434,7 +1437,7 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
 
 static void wbsd_release_dma(struct wbsd_host *host)
 {
-       if (host->dma_addr) {
+       if (!dma_mapping_error(mmc_dev(host->mmc), host->dma_addr)) {
                dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
                        WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
        }
index e6e90e80519a7db028fb23dcc6f121b1c64fc0e6..f31bceb69c0d00390d1a9352b7e46e225e8c1170 100644 (file)
@@ -1,8 +1,7 @@
 menu "SOC (System On Chip) specific Drivers"
 
 source "drivers/soc/bcm/Kconfig"
-source "drivers/soc/fsl/qbman/Kconfig"
-source "drivers/soc/fsl/qe/Kconfig"
+source "drivers/soc/fsl/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
new file mode 100644 (file)
index 0000000..7a9fb9b
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Freescale SOC drivers
+#
+
+source "drivers/soc/fsl/qbman/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
+
+config FSL_GUTS
+       bool
+       select SOC_BUS
+       help
+         The global utilities block controls power management, I/O device
+         enabling, power-onreset(POR) configuration monitoring, alternate
+         function selection for multiplexed signals,and clock control.
+         This driver is to manage and access global utilities block.
+         Initially only reading SVR and registering soc device are supported.
+         Other guts accesses, such as reading RCW, should eventually be moved
+         into this driver as well.
index 75e1f5334821080e0643b47ae1fd592dc7124509..44b3bebef24a3dac1de85c1615ddfde51fa9453c 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_FSL_DPAA)                 += qbman/
 obj-$(CONFIG_QUICC_ENGINE)             += qe/
 obj-$(CONFIG_CPM)                      += qe/
+obj-$(CONFIG_FSL_GUTS)                 += guts.o
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
new file mode 100644 (file)
index 0000000..6af7a11
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Freescale QorIQ Platforms GUTS Driver
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/fsl/guts.h>
+
+struct guts {
+       struct ccsr_guts __iomem *regs;
+       bool little_endian;
+};
+
+struct fsl_soc_die_attr {
+       char    *die;
+       u32     svr;
+       u32     mask;
+};
+
+static struct guts *guts;
+static struct soc_device_attribute soc_dev_attr;
+static struct soc_device *soc_dev;
+
+
+/* SoC die attribute definition for QorIQ platform */
+static const struct fsl_soc_die_attr fsl_soc_die[] = {
+       /*
+        * Power Architecture-based SoCs T Series
+        */
+
+       /* Die: T4240, SoC: T4240/T4160/T4080 */
+       { .die          = "T4240",
+         .svr          = 0x82400000,
+         .mask         = 0xfff00000,
+       },
+       /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
+       { .die          = "T1040",
+         .svr          = 0x85200000,
+         .mask         = 0xfff00000,
+       },
+       /* Die: T2080, SoC: T2080/T2081 */
+       { .die          = "T2080",
+         .svr          = 0x85300000,
+         .mask         = 0xfff00000,
+       },
+       /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
+       { .die          = "T1024",
+         .svr          = 0x85400000,
+         .mask         = 0xfff00000,
+       },
+
+       /*
+        * ARM-based SoCs LS Series
+        */
+
+       /* Die: LS1043A, SoC: LS1043A/LS1023A */
+       { .die          = "LS1043A",
+         .svr          = 0x87920000,
+         .mask         = 0xffff0000,
+       },
+       /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
+       { .die          = "LS2080A",
+         .svr          = 0x87010000,
+         .mask         = 0xff3f0000,
+       },
+       /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
+       { .die          = "LS1088A",
+         .svr          = 0x87030000,
+         .mask         = 0xff3f0000,
+       },
+       /* Die: LS1012A, SoC: LS1012A */
+       { .die          = "LS1012A",
+         .svr          = 0x87040000,
+         .mask         = 0xffff0000,
+       },
+       /* Die: LS1046A, SoC: LS1046A/LS1026A */
+       { .die          = "LS1046A",
+         .svr          = 0x87070000,
+         .mask         = 0xffff0000,
+       },
+       /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
+       { .die          = "LS2088A",
+         .svr          = 0x87090000,
+         .mask         = 0xff3f0000,
+       },
+       /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
+       { .die          = "LS1021A",
+         .svr          = 0x87000000,
+         .mask         = 0xfff70000,
+       },
+       { },
+};
+
+static const struct fsl_soc_die_attr *fsl_soc_die_match(
+       u32 svr, const struct fsl_soc_die_attr *matches)
+{
+       while (matches->svr) {
+               if (matches->svr == (svr & matches->mask))
+                       return matches;
+               matches++;
+       };
+       return NULL;
+}
+
+u32 fsl_guts_get_svr(void)
+{
+       u32 svr = 0;
+
+       if (!guts || !guts->regs)
+               return svr;
+
+       if (guts->little_endian)
+               svr = ioread32(&guts->regs->svr);
+       else
+               svr = ioread32be(&guts->regs->svr);
+
+       return svr;
+}
+EXPORT_SYMBOL(fsl_guts_get_svr);
+
+static int fsl_guts_probe(struct platform_device *pdev)
+{
+       struct device_node *root, *np = pdev->dev.of_node;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       const struct fsl_soc_die_attr *soc_die;
+       const char *machine;
+       u32 svr;
+
+       /* Initialize guts */
+       guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
+       if (!guts)
+               return -ENOMEM;
+
+       guts->little_endian = of_property_read_bool(np, "little-endian");
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       guts->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(guts->regs))
+               return PTR_ERR(guts->regs);
+
+       /* Register soc device */
+       root = of_find_node_by_path("/");
+       if (of_property_read_string(root, "model", &machine))
+               of_property_read_string_index(root, "compatible", 0, &machine);
+       of_node_put(root);
+       if (machine)
+               soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+
+       svr = fsl_guts_get_svr();
+       soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+       if (soc_die) {
+               soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
+                                                    "QorIQ %s", soc_die->die);
+       } else {
+               soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
+       }
+       soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
+                                            "svr:0x%08x", svr);
+       soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+                                              (svr >>  4) & 0xf, svr & 0xf);
+
+       soc_dev = soc_device_register(&soc_dev_attr);
+       if (IS_ERR(soc_dev))
+               return PTR_ERR(soc_dev);
+
+       pr_info("Machine: %s\n", soc_dev_attr.machine);
+       pr_info("SoC family: %s\n", soc_dev_attr.family);
+       pr_info("SoC ID: %s, Revision: %s\n",
+               soc_dev_attr.soc_id, soc_dev_attr.revision);
+       return 0;
+}
+
+static int fsl_guts_remove(struct platform_device *dev)
+{
+       soc_device_unregister(soc_dev);
+       return 0;
+}
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Freescale QorIQ SOCs.
+ */
+static const struct of_device_id fsl_guts_of_match[] = {
+       { .compatible = "fsl,qoriq-device-config-1.0", },
+       { .compatible = "fsl,qoriq-device-config-2.0", },
+       { .compatible = "fsl,p1010-guts", },
+       { .compatible = "fsl,p1020-guts", },
+       { .compatible = "fsl,p1021-guts", },
+       { .compatible = "fsl,p1022-guts", },
+       { .compatible = "fsl,p1023-guts", },
+       { .compatible = "fsl,p2020-guts", },
+       { .compatible = "fsl,bsc9131-guts", },
+       { .compatible = "fsl,bsc9132-guts", },
+       { .compatible = "fsl,mpc8536-guts", },
+       { .compatible = "fsl,mpc8544-guts", },
+       { .compatible = "fsl,mpc8548-guts", },
+       { .compatible = "fsl,mpc8568-guts", },
+       { .compatible = "fsl,mpc8569-guts", },
+       { .compatible = "fsl,mpc8572-guts", },
+       { .compatible = "fsl,ls1021a-dcfg", },
+       { .compatible = "fsl,ls1043a-dcfg", },
+       { .compatible = "fsl,ls2080a-dcfg", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+
+static struct platform_driver fsl_guts_driver = {
+       .driver = {
+               .name = "fsl-guts",
+               .of_match_table = fsl_guts_of_match,
+       },
+       .probe = fsl_guts_probe,
+       .remove = fsl_guts_remove,
+};
+
+static int __init fsl_guts_init(void)
+{
+       return platform_driver_register(&fsl_guts_driver);
+}
+core_initcall(fsl_guts_init);
+
+static void __exit fsl_guts_exit(void)
+{
+       platform_driver_unregister(&fsl_guts_driver);
+}
+module_exit(fsl_guts_exit);
index 649e9171a9b3baa63f0ad8343e718c8b8e999061..3efa3b861d44cae46670532c9db208d8630099a9 100644 (file)
  * #ifdefs.
  */
 struct ccsr_guts {
-       __be32  porpllsr;       /* 0x.0000 - POR PLL Ratio Status Register */
-       __be32  porbmsr;        /* 0x.0004 - POR Boot Mode Status Register */
-       __be32  porimpscr;      /* 0x.0008 - POR I/O Impedance Status and Control Register */
-       __be32  pordevsr;       /* 0x.000c - POR I/O Device Status Register */
-       __be32  pordbgmsr;      /* 0x.0010 - POR Debug Mode Status Register */
-       __be32  pordevsr2;      /* 0x.0014 - POR device status register 2 */
+       u32     porpllsr;       /* 0x.0000 - POR PLL Ratio Status Register */
+       u32     porbmsr;        /* 0x.0004 - POR Boot Mode Status Register */
+       u32     porimpscr;      /* 0x.0008 - POR I/O Impedance Status and
+                                *           Control Register
+                                */
+       u32     pordevsr;       /* 0x.000c - POR I/O Device Status Register */
+       u32     pordbgmsr;      /* 0x.0010 - POR Debug Mode Status Register */
+       u32     pordevsr2;      /* 0x.0014 - POR device status register 2 */
        u8      res018[0x20 - 0x18];
-       __be32  porcir;         /* 0x.0020 - POR Configuration Information Register */
+       u32     porcir;         /* 0x.0020 - POR Configuration Information
+                                *           Register
+                                */
        u8      res024[0x30 - 0x24];
-       __be32  gpiocr;         /* 0x.0030 - GPIO Control Register */
+       u32     gpiocr;         /* 0x.0030 - GPIO Control Register */
        u8      res034[0x40 - 0x34];
-       __be32  gpoutdr;        /* 0x.0040 - General-Purpose Output Data Register */
+       u32     gpoutdr;        /* 0x.0040 - General-Purpose Output Data
+                                *           Register
+                                */
        u8      res044[0x50 - 0x44];
-       __be32  gpindr;         /* 0x.0050 - General-Purpose Input Data Register */
+       u32     gpindr;         /* 0x.0050 - General-Purpose Input Data
+                                *           Register
+                                */
        u8      res054[0x60 - 0x54];
-       __be32  pmuxcr;         /* 0x.0060 - Alternate Function Signal Multiplex Control */
-        __be32  pmuxcr2;       /* 0x.0064 - Alternate function signal multiplex control 2 */
-        __be32  dmuxcr;                /* 0x.0068 - DMA Mux Control Register */
+       u32     pmuxcr;         /* 0x.0060 - Alternate Function Signal
+                                *           Multiplex Control
+                                */
+       u32     pmuxcr2;        /* 0x.0064 - Alternate function signal
+                                *           multiplex control 2
+                                */
+       u32     dmuxcr;         /* 0x.0068 - DMA Mux Control Register */
         u8     res06c[0x70 - 0x6c];
-       __be32  devdisr;        /* 0x.0070 - Device Disable Control */
+       u32     devdisr;        /* 0x.0070 - Device Disable Control */
 #define CCSR_GUTS_DEVDISR_TB1  0x00001000
 #define CCSR_GUTS_DEVDISR_TB0  0x00004000
-       __be32  devdisr2;       /* 0x.0074 - Device Disable Control 2 */
+       u32     devdisr2;       /* 0x.0074 - Device Disable Control 2 */
        u8      res078[0x7c - 0x78];
-       __be32  pmjcr;          /* 0x.007c - 4 Power Management Jog Control Register */
-       __be32  powmgtcsr;      /* 0x.0080 - Power Management Status and Control Register */
-       __be32  pmrccr;         /* 0x.0084 - Power Management Reset Counter Configuration Register */
-       __be32  pmpdccr;        /* 0x.0088 - Power Management Power Down Counter Configuration Register */
-       __be32  pmcdr;          /* 0x.008c - 4Power management clock disable register */
-       __be32  mcpsumr;        /* 0x.0090 - Machine Check Summary Register */
-       __be32  rstrscr;        /* 0x.0094 - Reset Request Status and Control Register */
-       __be32  ectrstcr;       /* 0x.0098 - Exception reset control register */
-       __be32  autorstsr;      /* 0x.009c - Automatic reset status register */
-       __be32  pvr;            /* 0x.00a0 - Processor Version Register */
-       __be32  svr;            /* 0x.00a4 - System Version Register */
+       u32     pmjcr;          /* 0x.007c - 4 Power Management Jog Control
+                                *           Register
+                                */
+       u32     powmgtcsr;      /* 0x.0080 - Power Management Status and
+                                *           Control Register
+                                */
+       u32     pmrccr;         /* 0x.0084 - Power Management Reset Counter
+                                *           Configuration Register
+                                */
+       u32     pmpdccr;        /* 0x.0088 - Power Management Power Down Counter
+                                *           Configuration Register
+                                */
+       u32     pmcdr;          /* 0x.008c - 4Power management clock disable
+                                *           register
+                                */
+       u32     mcpsumr;        /* 0x.0090 - Machine Check Summary Register */
+       u32     rstrscr;        /* 0x.0094 - Reset Request Status and
+                                *           Control Register
+                                */
+       u32     ectrstcr;       /* 0x.0098 - Exception reset control register */
+       u32     autorstsr;      /* 0x.009c - Automatic reset status register */
+       u32     pvr;            /* 0x.00a0 - Processor Version Register */
+       u32     svr;            /* 0x.00a4 - System Version Register */
        u8      res0a8[0xb0 - 0xa8];
-       __be32  rstcr;          /* 0x.00b0 - Reset Control Register */
+       u32     rstcr;          /* 0x.00b0 - Reset Control Register */
        u8      res0b4[0xc0 - 0xb4];
-       __be32  iovselsr;       /* 0x.00c0 - I/O voltage select status register
+       u32     iovselsr;       /* 0x.00c0 - I/O voltage select status register
                                             Called 'elbcvselcr' on 86xx SOCs */
        u8      res0c4[0x100 - 0xc4];
-       __be32  rcwsr[16];      /* 0x.0100 - Reset Control Word Status registers
+       u32     rcwsr[16];      /* 0x.0100 - Reset Control Word Status registers
                                             There are 16 registers */
        u8      res140[0x224 - 0x140];
-       __be32  iodelay1;       /* 0x.0224 - IO delay control register 1 */
-       __be32  iodelay2;       /* 0x.0228 - IO delay control register 2 */
+       u32     iodelay1;       /* 0x.0224 - IO delay control register 1 */
+       u32     iodelay2;       /* 0x.0228 - IO delay control register 2 */
        u8      res22c[0x604 - 0x22c];
-       __be32  pamubypenr;     /* 0x.604 - PAMU bypass enable register */
+       u32     pamubypenr;     /* 0x.604 - PAMU bypass enable register */
        u8      res608[0x800 - 0x608];
-       __be32  clkdvdr;        /* 0x.0800 - Clock Divide Register */
+       u32     clkdvdr;        /* 0x.0800 - Clock Divide Register */
        u8      res804[0x900 - 0x804];
-       __be32  ircr;           /* 0x.0900 - Infrared Control Register */
+       u32     ircr;           /* 0x.0900 - Infrared Control Register */
        u8      res904[0x908 - 0x904];
-       __be32  dmacr;          /* 0x.0908 - DMA Control Register */
+       u32     dmacr;          /* 0x.0908 - DMA Control Register */
        u8      res90c[0x914 - 0x90c];
-       __be32  elbccr;         /* 0x.0914 - eLBC Control Register */
+       u32     elbccr;         /* 0x.0914 - eLBC Control Register */
        u8      res918[0xb20 - 0x918];
-       __be32  ddr1clkdr;      /* 0x.0b20 - DDR1 Clock Disable Register */
-       __be32  ddr2clkdr;      /* 0x.0b24 - DDR2 Clock Disable Register */
-       __be32  ddrclkdr;       /* 0x.0b28 - DDR Clock Disable Register */
+       u32     ddr1clkdr;      /* 0x.0b20 - DDR1 Clock Disable Register */
+       u32     ddr2clkdr;      /* 0x.0b24 - DDR2 Clock Disable Register */
+       u32     ddrclkdr;       /* 0x.0b28 - DDR Clock Disable Register */
        u8      resb2c[0xe00 - 0xb2c];
-       __be32  clkocr;         /* 0x.0e00 - Clock Out Select Register */
+       u32     clkocr;         /* 0x.0e00 - Clock Out Select Register */
        u8      rese04[0xe10 - 0xe04];
-       __be32  ddrdllcr;       /* 0x.0e10 - DDR DLL Control Register */
+       u32     ddrdllcr;       /* 0x.0e10 - DDR DLL Control Register */
        u8      rese14[0xe20 - 0xe14];
-       __be32  lbcdllcr;       /* 0x.0e20 - LBC DLL Control Register */
-       __be32  cpfor;          /* 0x.0e24 - L2 charge pump fuse override register */
+       u32     lbcdllcr;       /* 0x.0e20 - LBC DLL Control Register */
+       u32     cpfor;          /* 0x.0e24 - L2 charge pump fuse override
+                                *           register
+                                */
        u8      rese28[0xf04 - 0xe28];
-       __be32  srds1cr0;       /* 0x.0f04 - SerDes1 Control Register 0 */
-       __be32  srds1cr1;       /* 0x.0f08 - SerDes1 Control Register 0 */
+       u32     srds1cr0;       /* 0x.0f04 - SerDes1 Control Register 0 */
+       u32     srds1cr1;       /* 0x.0f08 - SerDes1 Control Register 0 */
        u8      resf0c[0xf2c - 0xf0c];
-       __be32  itcr;           /* 0x.0f2c - Internal transaction control register */
+       u32     itcr;           /* 0x.0f2c - Internal transaction control
+                                *           register
+                                */
        u8      resf30[0xf40 - 0xf30];
-       __be32  srds2cr0;       /* 0x.0f40 - SerDes2 Control Register 0 */
-       __be32  srds2cr1;       /* 0x.0f44 - SerDes2 Control Register 0 */
+       u32     srds2cr0;       /* 0x.0f40 - SerDes2 Control Register 0 */
+       u32     srds2cr1;       /* 0x.0f44 - SerDes2 Control Register 0 */
 } __attribute__ ((packed));
 
+u32 fsl_guts_get_svr(void);
 
 /* Alternate function signal multiplex control */
 #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
index 7a26286db895c587a681243bee8cb521b3dc4ba4..fba44abd05ba1a9dd4e8c802b6d60fd18766f05e 100644 (file)
  */
 #define TMIO_MMC_SDIO_STATUS_QUIRK     (1 << 8)
 
+/*
+ * Some controllers have a 32-bit wide data port register
+ */
+#define TMIO_MMC_32BIT_DATA_PORT       (1 << 9)
+
 /*
  * Some controllers allows to set SDx actual clock
  */
index 73fad83acbcb6a157587180516f9ffe7c61eb7d7..95d69d4982965aa30fb65d9ffecfad13f4e7be8f 100644 (file)
@@ -89,6 +89,8 @@ struct mmc_ext_csd {
        unsigned int            boot_ro_lock;           /* ro lock support */
        bool                    boot_ro_lockable;
        bool                    ffu_capable;    /* Firmware upgrade support */
+       bool                    cmdq_support;   /* Command Queue supported */
+       unsigned int            cmdq_depth;     /* Command Queue depth */
 #define MMC_FIRMWARE_LEN 8
        u8                      fwrev[MMC_FIRMWARE_LEN];  /* FW version */
        u8                      raw_exception_status;   /* 54 */
@@ -207,18 +209,6 @@ struct sdio_func_tuple;
 
 #define SDIO_MAX_FUNCS         7
 
-enum mmc_blk_status {
-       MMC_BLK_SUCCESS = 0,
-       MMC_BLK_PARTIAL,
-       MMC_BLK_CMD_ERR,
-       MMC_BLK_RETRY,
-       MMC_BLK_ABORT,
-       MMC_BLK_DATA_ERR,
-       MMC_BLK_ECC_ERR,
-       MMC_BLK_NOMEDIUM,
-       MMC_BLK_NEW_REQUEST,
-};
-
 /* The number of MMC physical partitions.  These consist of:
  * boot partitions (2), general purpose partitions (4) and
  * RPMB partition (1) in MMC v4.4.
index 2b953eb8ceae195d0aa328dc50d4760d3a2268df..e33cc748dcfe16e65d20ea6f815ca365deb0b2db 100644 (file)
@@ -15,6 +15,18 @@ struct request;
 struct mmc_data;
 struct mmc_request;
 
+enum mmc_blk_status {
+       MMC_BLK_SUCCESS = 0,
+       MMC_BLK_PARTIAL,
+       MMC_BLK_CMD_ERR,
+       MMC_BLK_RETRY,
+       MMC_BLK_ABORT,
+       MMC_BLK_DATA_ERR,
+       MMC_BLK_ECC_ERR,
+       MMC_BLK_NOMEDIUM,
+       MMC_BLK_NEW_REQUEST,
+};
+
 struct mmc_command {
        u32                     opcode;
        u32                     arg;
@@ -150,7 +162,8 @@ struct mmc_async_req;
 extern int mmc_stop_bkops(struct mmc_card *);
 extern int mmc_read_bkops_status(struct mmc_card *);
 extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
-                                          struct mmc_async_req *, int *);
+                                          struct mmc_async_req *,
+                                          enum mmc_blk_status *);
 extern int mmc_interrupt_hpi(struct mmc_card *);
 extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
 extern void mmc_wait_for_req_done(struct mmc_host *host,
@@ -163,6 +176,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
 extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
 extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
 extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
+extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
 extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
 
 #define MMC_ERASE_ARG          0x00000000
index f5af2bd35e7fdffbab622c121db2433e980f23e0..15db6f83f53f9ae072ac47ab6e8ea05f9aa96340 100644 (file)
@@ -39,6 +39,12 @@ enum {
        EVENT_DATA_ERROR,
 };
 
+enum dw_mci_cookie {
+       COOKIE_UNMAPPED,
+       COOKIE_PRE_MAPPED,      /* mapped by pre_req() of dwmmc */
+       COOKIE_MAPPED,          /* mapped by prepare_data() of dwmmc */
+};
+
 struct mmc_data;
 
 enum {
index 0b2439441cc8ccdc3e4ce58a1e7d575ea7189e29..8bc8841214653c0c39d7c9e1030f6cc9b72fe78c 100644 (file)
@@ -93,8 +93,7 @@ struct mmc_host_ops {
         */
        void    (*post_req)(struct mmc_host *host, struct mmc_request *req,
                            int err);
-       void    (*pre_req)(struct mmc_host *host, struct mmc_request *req,
-                          bool is_first_req);
+       void    (*pre_req)(struct mmc_host *host, struct mmc_request *req);
        void    (*request)(struct mmc_host *host, struct mmc_request *req);
 
        /*
@@ -173,7 +172,7 @@ struct mmc_async_req {
         * Check error status of completed mmc request.
         * Returns 0 if success otherwise non zero.
         */
-       int (*err_check) (struct mmc_card *, struct mmc_async_req *);
+       enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
 };
 
 /**
@@ -198,14 +197,12 @@ struct mmc_slot {
  * @is_new_req         wake up reason was new request
  * @is_waiting_last_req        mmc context waiting for single running request
  * @wait               wait queue
- * @lock               lock to protect data fields
  */
 struct mmc_context_info {
        bool                    is_done_rcv;
        bool                    is_new_req;
        bool                    is_waiting_last_req;
        wait_queue_head_t       wait;
-       spinlock_t              lock;
 };
 
 struct regulator;
@@ -495,11 +492,6 @@ static inline int mmc_host_uhs(struct mmc_host *host)
                 MMC_CAP_UHS_DDR50);
 }
 
-static inline int mmc_host_packed_wr(struct mmc_host *host)
-{
-       return host->caps2 & MMC_CAP2_PACKED_WR;
-}
-
 static inline int mmc_card_hs(struct mmc_card *card)
 {
        return card->host->ios.timing == MMC_TIMING_SD_HS ||
@@ -546,6 +538,11 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
                host->retune_now = 1;
 }
 
+static inline bool mmc_can_retune(struct mmc_host *host)
+{
+       return host->can_retune == 1;
+}
+
 void mmc_retune_pause(struct mmc_host *host);
 void mmc_retune_unpause(struct mmc_host *host);
 
index c376209c70ef4424e19e342c441670439c8a7d68..672730acc705799ef292a620f5011a5a1d556611 100644 (file)
 #define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
 #define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
 
+  /* class 11 */
+#define MMC_QUE_TASK_PARAMS      44   /* ac   [20:16] task id    R1  */
+#define MMC_QUE_TASK_ADDR        45   /* ac   [31:0] data addr   R1  */
+#define MMC_EXECUTE_READ_TASK    46   /* adtc [20:16] task id    R1  */
+#define MMC_EXECUTE_WRITE_TASK   47   /* adtc [20:16] task id    R1  */
+#define MMC_CMDQ_TASK_MGMT       48   /* ac   [20:16] task id    R1b */
+
 static inline bool mmc_op_multi(u32 opcode)
 {
        return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
@@ -272,6 +279,7 @@ struct _mmc_csd {
  * EXT_CSD fields
  */
 
+#define EXT_CSD_CMDQ_MODE_EN           15      /* R/W */
 #define EXT_CSD_FLUSH_CACHE            32      /* W */
 #define EXT_CSD_CACHE_CTRL             33      /* R/W */
 #define EXT_CSD_POWER_OFF_NOTIFICATION 34      /* R/W */
@@ -331,6 +339,8 @@ struct _mmc_csd {
 #define EXT_CSD_CACHE_SIZE             249     /* RO, 4 bytes */
 #define EXT_CSD_PWR_CL_DDR_200_360     253     /* RO */
 #define EXT_CSD_FIRMWARE_VERSION       254     /* RO, 8 bytes */
+#define EXT_CSD_CMDQ_DEPTH             307     /* RO */
+#define EXT_CSD_CMDQ_SUPPORT           308     /* RO */
 #define EXT_CSD_SUPPORTED_MODE         493     /* RO */
 #define EXT_CSD_TAG_UNIT_SIZE          498     /* RO */
 #define EXT_CSD_DATA_TAG_SUPPORT       499     /* RO */
@@ -437,6 +447,13 @@ struct _mmc_csd {
  */
 #define EXT_CSD_MANUAL_BKOPS_MASK      0x01
 
+/*
+ * Command Queue
+ */
+#define EXT_CSD_CMDQ_MODE_ENABLED      BIT(0)
+#define EXT_CSD_CMDQ_DEPTH_MASK                GENMASK(4, 0)
+#define EXT_CSD_CMDQ_SUPPORTED         BIT(0)
+
 /*
  * MMC_SWITCH access modes
  */
index 3945a8c9d3cbf68ba386b60166ec0b9b359e50d0..a7972cd3bc149bae8b5d59d06256244491361c1a 100644 (file)
@@ -29,5 +29,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
 void mmc_gpio_set_cd_isr(struct mmc_host *host,
                         irqreturn_t (*isr)(int irq, void *dev_id));
 void mmc_gpiod_request_cd_irq(struct mmc_host *host);
+bool mmc_can_gpio_cd(struct mmc_host *host);
 
 #endif
index 2739ccb69571e444265862bda4f5cc0f15636c2f..bed223b70217fd2d23f35a72a7d647a7cd0d525d 100644 (file)
@@ -13,6 +13,7 @@ struct soc_device_attribute {
        const char *family;
        const char *revision;
        const char *soc_id;
+       const void *data;
 };
 
 /**
@@ -34,4 +35,12 @@ void soc_device_unregister(struct soc_device *soc_dev);
  */
 struct device *soc_device_to_device(struct soc_device *soc);
 
+#ifdef CONFIG_SOC_BUS
+const struct soc_device_attribute *soc_device_match(
+       const struct soc_device_attribute *matches);
+#else
+static inline const struct soc_device_attribute *soc_device_match(
+       const struct soc_device_attribute *matches) { return NULL; }
+#endif
+
 #endif /* __SOC_BUS_H */
index 7e385b83b9d82057ec79e6fbc76b4a258f0eec2a..700a55156eee1f79b2018845b2983528b88008ab 100644 (file)
@@ -69,6 +69,6 @@ struct mmc_ioc_multi_cmd {
  * is enforced per ioctl call.  For larger data transfers, use the normal
  * block device operations.
  */
-#define MMC_IOC_MAX_BYTES  (512L * 256)
+#define MMC_IOC_MAX_BYTES  (512L * 1024)
 #define MMC_IOC_MAX_CMDS    255
 #endif /* LINUX_MMC_IOCTL_H */