]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'tee-misc-for-v5.1' of https://git.linaro.org/people/jens.wiklander/linux...
authorArnd Bergmann <arnd@arndb.de>
Fri, 1 Mar 2019 14:00:40 +0000 (15:00 +0100)
committerArnd Bergmann <arnd@arndb.de>
Fri, 1 Mar 2019 14:01:16 +0000 (15:01 +0100)
OP-TEE driver
- dual license for optee_msg.h and optee_smc.h
Generic
- add cancellation support to client interface

* tag 'tee-misc-for-v5.1' of https://git.linaro.org/people/jens.wiklander/linux-tee:
  tee: optee: update optee_msg.h and optee_smc.h to dual license
  tee: add cancellation support to client interface

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
392 files changed:
Documentation/core-api/xarray.rst
Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
Documentation/devicetree/bindings/bus/imx-weim.txt
Documentation/devicetree/bindings/display/msm/gpu.txt
Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt [new file with mode: 0644]
Documentation/devicetree/bindings/opp/opp.txt
Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
Documentation/devicetree/bindings/power/qcom,rpmpd.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt [new file with mode: 0644]
Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt [new file with mode: 0644]
Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt [new file with mode: 0644]
Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt
Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt
MAINTAINERS
Makefile
arch/arc/include/asm/Kbuild
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/bitops.h
arch/arc/include/asm/perf_event.h
arch/arc/kernel/perf_event.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/lib/memset-archs.S
arch/arc/mm/fault.c
arch/arc/mm/init.c
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/include/asm/xen/page-coherent.h
arch/arm/mach-bcm/Kconfig
arch/arm/mach-socfpga/socfpga.c
arch/arm/mach-sunxi/sunxi.c
arch/arm64/include/asm/device.h
arch/arm64/include/asm/xen/page-coherent.h
arch/arm64/mm/dma-mapping.c
arch/s390/include/asm/mmu_context.h
arch/s390/kernel/early.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/vdso.c
arch/x86/Kconfig
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/mmu_context.h
arch/x86/kernel/crash.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kexec-bzimage64.c
arch/x86/kernel/kvm.c
arch/x86/kernel/tsc.c
arch/x86/kvm/Makefile
arch/x86/kvm/hyperv.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/evmcs.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/kaslr.c
arch/x86/mm/mem_encrypt_identity.c
block/blk-mq-debugfs.c
block/blk-wbt.c
drivers/acpi/nfit/core.c
drivers/android/binderfs.c
drivers/ata/pata_macio.c
drivers/ata/sata_inic162x.c
drivers/bus/fsl-mc/fsl-mc-allocator.c
drivers/bus/fsl-mc/mc-io.c
drivers/bus/hisi_lpc.c
drivers/bus/imx-weim.c
drivers/char/hw_random/optee-rng.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/mwave/mwavedd.c
drivers/clk/Kconfig
drivers/clk/clk-versaclock5.c
drivers/clk/clk.c
drivers/clk/imx/clk-imx8qxp-lpcg.c
drivers/clk/qcom/Kconfig
drivers/clk/socfpga/clk-pll-s10.c
drivers/clk/socfpga/clk-s10.c
drivers/clk/tegra/Kconfig
drivers/clk/tegra/Makefile
drivers/clk/tegra/clk-dfll.c
drivers/clk/tegra/clk-dfll.h
drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
drivers/clk/tegra/cvb.c
drivers/clk/tegra/cvb.h
drivers/clk/zynqmp/clkc.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/tegra124-cpufreq.c
drivers/crypto/caam/caamalg_qi2.c
drivers/edac/altera_edac.h
drivers/firewire/sbp2.c
drivers/firmware/imx/misc.c
drivers/firmware/imx/scu-pd.c
drivers/firmware/raspberrypi.c
drivers/firmware/tegra/Makefile
drivers/firmware/tegra/bpmp-private.h [new file with mode: 0644]
drivers/firmware/tegra/bpmp-tegra186.c [new file with mode: 0644]
drivers/firmware/tegra/bpmp-tegra210.c [new file with mode: 0644]
drivers/firmware/tegra/bpmp.c
drivers/firmware/ti_sci.c
drivers/firmware/xilinx/Kconfig
drivers/firmware/xilinx/zynqmp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hv/channel.c
drivers/hv/hv_balloon.c
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/ide/ide-proc.c
drivers/input/joystick/xpad.c
drivers/input/misc/uinput.c
drivers/input/serio/olpc_apsp.c
drivers/input/touchscreen/Kconfig
drivers/iommu/of_iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-mbi.c
drivers/irqchip/irq-madera.c
drivers/irqchip/irq-stm32-exti.c
drivers/md/dm-crypt.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/mfd/Makefile
drivers/mfd/bcm2835-pm.c [new file with mode: 0644]
drivers/misc/ibmvmc.c
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/pvpanic.c
drivers/mmc/host/Kconfig
drivers/mmc/host/dw_mmc-bluefield.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/sdhci-iproc.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/freescale/dpaa2/Kconfig
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/cassini.h
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/phy/asix.c
drivers/net/phy/mdio-hisi-femac.c
drivers/net/phy/rockchip.c
drivers/net/usb/asix_devices.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/virt_wifi.c
drivers/nvdimm/dimm.c
drivers/nvdimm/dimm_devs.c
drivers/nvdimm/nd.h
drivers/nvme/host/multipath.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/rdma.c
drivers/nvmem/Kconfig
drivers/nvmem/Makefile
drivers/nvmem/zynqmp_nvmem.c [new file with mode: 0644]
drivers/opp/core.c
drivers/opp/of.c
drivers/opp/opp.h
drivers/phy/qualcomm/phy-ath79-usb.c
drivers/phy/ti/phy-gmii-sel.c
drivers/reset/Kconfig
drivers/reset/Makefile
drivers/reset/reset-brcmstb.c [new file with mode: 0644]
drivers/reset/reset-imx7.c
drivers/reset/reset-socfpga.c
drivers/reset/reset-sunxi.c
drivers/reset/reset-zynqmp.c [new file with mode: 0644]
drivers/s390/char/sclp_config.c
drivers/scsi/aacraid/linit.c
drivers/scsi/csiostor/csio_attr.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvme.h
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/scsi_lib.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/amlogic/meson-canvas.c
drivers/soc/amlogic/meson-clk-measure.c
drivers/soc/bcm/Kconfig
drivers/soc/bcm/Makefile
drivers/soc/bcm/bcm2835-power.c [new file with mode: 0644]
drivers/soc/fsl/dpio/dpio-cmd.h
drivers/soc/fsl/dpio/dpio-driver.c
drivers/soc/fsl/dpio/dpio-service.c
drivers/soc/fsl/dpio/dpio.c
drivers/soc/fsl/dpio/dpio.h
drivers/soc/fsl/dpio/qbman-portal.c
drivers/soc/fsl/guts.c
drivers/soc/imx/Kconfig
drivers/soc/imx/gpcv2.c
drivers/soc/qcom/Kconfig
drivers/soc/qcom/Makefile
drivers/soc/qcom/llcc-sdm845.c
drivers/soc/qcom/llcc-slice.c
drivers/soc/qcom/qcom_gsbi.c
drivers/soc/qcom/rmtfs_mem.c
drivers/soc/qcom/rpmh.c
drivers/soc/qcom/rpmhpd.c [new file with mode: 0644]
drivers/soc/qcom/rpmpd.c [new file with mode: 0644]
drivers/soc/qcom/smd-rpm.c
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/soc/tegra/fuse/speedo-tegra210.c
drivers/soc/tegra/pmc.c
drivers/soc/ti/knav_dma.c
drivers/soc/xilinx/Kconfig
drivers/soc/xilinx/Makefile
drivers/soc/xilinx/zynqmp_pm_domains.c [new file with mode: 0644]
drivers/soc/xilinx/zynqmp_power.c [new file with mode: 0644]
drivers/staging/android/ion/ion.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/rtl8723bs/include/ieee80211.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
drivers/staging/wilc1000/host_interface.c
drivers/staging/wilc1000/wilc_wlan.c
drivers/target/target_core_user.c
drivers/tee/optee/device.c
drivers/tee/tee_core.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
drivers/tty/n_hdlc.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_io.c
drivers/tty/vt/vt.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/core/ledtrig-usbport.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_sourcesink.c
drivers/usb/host/ehci-mv.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/keyspan_usa26msg.h
drivers/usb/serial/keyspan_usa28msg.h
drivers/usb/serial/keyspan_usa49msg.h
drivers/usb/serial/keyspan_usa67msg.h
drivers/usb/serial/keyspan_usa90msg.h
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/usb-serial-simple.c
drivers/usb/usbip/README [deleted file]
drivers/vfio/pci/trace.h
drivers/vfio/pci/vfio_pci_nvlink2.c
drivers/video/console/vgacon.c
drivers/watchdog/bcm2835_wdt.c
drivers/xen/swiotlb-xen.c
fs/ceph/caps.c
fs/ceph/quota.c
fs/cifs/cifs_debug.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/smb2inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/trace.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/direct-io.c
fs/fs-writeback.c
fs/notify/inotify/inotify_user.c
include/dt-bindings/power/qcom-rpmpd.h [new file with mode: 0644]
include/dt-bindings/power/xlnx-zynqmp-power.h [new file with mode: 0644]
include/dt-bindings/reset/amlogic,meson-g12a-reset.h [new file with mode: 0644]
include/dt-bindings/reset/imx8mq-reset.h [new file with mode: 0644]
include/dt-bindings/reset/xlnx-zynqmp-resets.h [new file with mode: 0644]
include/dt-bindings/soc/bcm2835-pm.h [new file with mode: 0644]
include/linux/backing-dev-defs.h
include/linux/blk_types.h
include/linux/firmware/imx/svc/misc.h
include/linux/firmware/xlnx-zynqmp.h
include/linux/fsl/mc.h
include/linux/hid.h
include/linux/hyperv.h
include/linux/if_arp.h
include/linux/interrupt.h
include/linux/libnvdimm.h
include/linux/mfd/bcm2835-pm.h [new file with mode: 0644]
include/linux/pm_opp.h
include/linux/reset/socfpga.h [new file with mode: 0644]
include/linux/reset/sunxi.h [new file with mode: 0644]
include/linux/sched/wake_q.h
include/linux/soc/qcom/llcc-qcom.h
include/linux/xarray.h
include/net/ax25.h
include/soc/bcm2835/raspberrypi-firmware.h
include/soc/fsl/dpaa2-io.h
include/soc/tegra/bpmp.h
include/soc/tegra/pmc.h
include/sound/soc.h
include/uapi/linux/android/binderfs.h [moved from include/uapi/linux/android/binder_ctl.h with 83% similarity]
include/uapi/linux/blkzoned.h
include/uapi/linux/input.h
include/xen/arm/page-coherent.h
kernel/exit.c
kernel/futex.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/locking/rwsem-xadd.c
kernel/sched/core.c
kernel/time/posix-cpu-timers.c
lib/test_xarray.c
lib/xarray.c
mm/backing-dev.c
mm/mincore.c
net/ax25/ax25_ip.c
net/ax25/ax25_route.c
net/can/bcm.c
net/ceph/messenger.c
net/ipv4/gre_demux.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/mac80211/cfg.c
net/mac80211/rx.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/stream.c
net/wireless/nl80211.c
net/wireless/reg.c
sound/core/compress_offload.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/pcm512x.c
sound/soc/codecs/rt274.c
sound/soc/codecs/rt5514-spi.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682.h
sound/soc/codecs/tlv320aic32x4.c
sound/soc/fsl/imx-audmux.c
sound/soc/intel/Kconfig
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/broadwell.c
sound/soc/intel/boards/glk_rt5682_max98357a.c
sound/soc/intel/boards/haswell.c
sound/soc/intel/skylake/skl.c
sound/soc/qcom/qdsp6/q6asm-dai.c
sound/soc/qcom/sdm845.c
sound/soc/sh/dma-sh7760.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/ti/davinci-mcasp.c
sound/soc/xilinx/Kconfig
sound/soc/xilinx/xlnx_i2s.c
tools/testing/nvdimm/dimm_devs.c
tools/testing/selftests/gpio/gpio-mockup-chardev.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/rtc/rtctest.c
tools/testing/selftests/seccomp/Makefile
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/vm/gup_benchmark.c
tools/testing/selftests/x86/mpx-mini-test.c
tools/testing/selftests/x86/protection_keys.c
tools/testing/selftests/x86/unwind_vdso.c

index 6a6d67acaf690abfadc40c05e97a4557060dc188..5d54b27c6ebab15a41e53bb6f9529af128be8092 100644 (file)
@@ -108,12 +108,13 @@ some, but not all of the other indices changing.
 
 Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
 will not need to allocate memory.  The :c:func:`xa_reserve` function
-will store a reserved entry at the indicated index.  Users of the normal
-API will see this entry as containing ``NULL``.  If you do not need to
-use the reserved entry, you can call :c:func:`xa_release` to remove the
-unused entry.  If another user has stored to the entry in the meantime,
-:c:func:`xa_release` will do nothing; if instead you want the entry to
-become ``NULL``, you should use :c:func:`xa_erase`.
+will store a reserved entry at the indicated index.  Users of the
+normal API will see this entry as containing ``NULL``.  If you do
+not need to use the reserved entry, you can call :c:func:`xa_release`
+to remove the unused entry.  If another user has stored to the entry
+in the meantime, :c:func:`xa_release` will do nothing; if instead you
+want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
+Using :c:func:`xa_insert` on a reserved entry will fail.
 
 If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
 will return ``true``.
@@ -183,6 +184,8 @@ Takes xa_lock internally:
  * :c:func:`xa_store_bh`
  * :c:func:`xa_store_irq`
  * :c:func:`xa_insert`
+ * :c:func:`xa_insert_bh`
+ * :c:func:`xa_insert_irq`
  * :c:func:`xa_erase`
  * :c:func:`xa_erase_bh`
  * :c:func:`xa_erase_irq`
index 27784b6edfeddfada4cd156615fbd0911cfa7823..fd2bed23e0e3ef616c992c20de4dd87ced9120c8 100644 (file)
@@ -58,7 +58,11 @@ This binding for the SCU power domain providers uses the generic power
 domain binding[2].
 
 Required properties:
-- compatible:          Should be "fsl,imx8qxp-scu-pd".
+- compatible:          Should be one of:
+                         "fsl,imx8qm-scu-pd",
+                         "fsl,imx8qxp-scu-pd"
+                       followed by "fsl,scu-pd"
+
 - #power-domain-cells: Must be 1. Contains the Resource ID used by
                        SCU commands.
                        See detailed Resource ID list from:
@@ -154,7 +158,7 @@ firmware {
                };
 
                pd: imx8qx-pd {
-                       compatible = "fsl,imx8qxp-scu-pd";
+                       compatible = "fsl,imx8qxp-scu-pd", "fsl,scu-pd";
                        #power-domain-cells = <1>;
                };
 
index 683eaf3aed795c74a72348c6eee9ed924553c1e8..dda7d6d6647987fc0ef46ad38dd6b1de4c44c9d5 100644 (file)
@@ -47,9 +47,9 @@ Optional properties:
 Timing property for child nodes. It is mandatory, not optional.
 
  - fsl,weim-cs-timing: The timing array, contains timing values for the
-                       child node. We can get the CS index from the child
-                       node's "reg" property. The number of registers depends
-                       on the selected chip.
+                       child node. We get the CS indexes from the address
+                       ranges in the child node's "reg" property.
+                       The number of registers depends on the selected chip:
                        For i.MX1, i.MX21 ("fsl,imx1-weim") there are two
                        registers: CSxU, CSxL.
                        For i.MX25, i.MX27, i.MX31 and i.MX35 ("fsl,imx27-weim")
@@ -80,3 +80,29 @@ Example for an imx6q-sabreauto board, the NOR flash connected to the WEIM:
                                        0x0000c000 0x1404a38e 0x00000000>;
                };
        };
+
+Example for an imx6q-based board, a multi-chipselect device connected to WEIM:
+
+In this case, both chip select 0 and 1 will be configured with the same timing
+array values.
+
+       weim: weim@21b8000 {
+               compatible = "fsl,imx6q-weim";
+               reg = <0x021b8000 0x4000>;
+               clocks = <&clks 196>;
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0 0x08000000 0x02000000
+                         1 0 0x0a000000 0x02000000
+                         2 0 0x0c000000 0x02000000
+                         3 0 0x0e000000 0x02000000>;
+               fsl,weim-cs-gpr = <&gpr>;
+
+               acme@0 {
+                       compatible = "acme,whatever";
+                       reg = <0 0 0x100>, <0 0x400000 0x800>,
+                               <1 0x400000 0x800>;
+                       fsl,weim-cs-timing = <0x024400b1 0x00001010 0x20081100
+                               0x00000000 0xa0000240 0x00000000>;
+               };
+       };
index ac8df3b871f900a0672f266a5b05635a7aca01cf..f8759145ce1a08e57046f4aa63198e642887b4e8 100644 (file)
@@ -27,7 +27,6 @@ Example:
                reg = <0x04300000 0x20000>;
                reg-names = "kgsl_3d0_reg_memory";
                interrupts = <GIC_SPI 80 0>;
-               interrupt-names = "kgsl_3d0_irq";
                clock-names =
                    "core",
                    "iface",
diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
new file mode 100644 (file)
index 0000000..4881561
--- /dev/null
@@ -0,0 +1,46 @@
+--------------------------------------------------------------------------
+=  Zynq UltraScale+ MPSoC nvmem firmware driver binding =
+--------------------------------------------------------------------------
+The nvmem_firmware node provides access to the hardware related data
+like soc revision, IDCODE... etc, By using the firmware interface.
+
+Required properties:
+- compatible: should be "xlnx,zynqmp-nvmem-fw"
+
+= Data cells =
+Are child nodes of silicon id, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+-------
+ Example
+-------
+firmware {
+       zynqmp_firmware: zynqmp-firmware {
+               compatible = "xlnx,zynqmp-firmware";
+               method = "smc";
+
+               nvmem_firmware {
+                       compatible = "xlnx,zynqmp-nvmem-fw";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       /* Data cells */
+                       soc_revision: soc_revision {
+                               reg = <0x0 0x4>;
+                       };
+               };
+       };
+};
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+       pcap {
+               ...
+
+               nvmem-cells = <&soc_revision>;
+               nvmem-cell-names = "soc_revision";
+
+               ...
+       };
index c396c4c0af92db1faa8d0f7ee7fe44b6a7fac242..76b6c79604a5b6e68814a26bcd88e1b48926d3be 100644 (file)
@@ -129,6 +129,9 @@ Optional properties:
 - opp-microamp-<name>: Named opp-microamp property. Similar to
   opp-microvolt-<name> property, but for microamp instead.
 
+- opp-level: A value representing the performance level of the device,
+  expressed as a 32-bit integer.
+
 - clock-latency-ns: Specifies the maximum possible transition latency (in
   nanoseconds) for switching to this OPP from any other OPP.
 
index 7c947a996df1c4e2f22e2e90df29454098b1b5b7..7c7e972aaa423e18b828cb7baef14ab0462621d9 100644 (file)
@@ -32,6 +32,9 @@ Required properties:
 Optional properties:
 
 - power-supply: Power supply used to power the domain
+- clocks: a number of phandles to clocks that need to be enabled during
+  domain power-up sequencing to ensure reset propagation into devices
+  located inside this power domain
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
new file mode 100644 (file)
index 0000000..980e541
--- /dev/null
@@ -0,0 +1,145 @@
+Qualcomm RPM/RPMh Power domains
+
+For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
+which then translates it into a corresponding voltage on a rail
+
+Required Properties:
+ - compatible: Should be one of the following
+       * qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC
+       * qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC
+ - #power-domain-cells: number of cells in Power domain specifier
+       must be 1.
+ - operating-points-v2: Phandle to the OPP table for the Power domain.
+       Refer to Documentation/devicetree/bindings/power/power_domain.txt
+       and Documentation/devicetree/bindings/opp/opp.txt for more details
+
+Refer to <dt-bindings/power/qcom-rpmpd.h> for the level values for
+various OPPs for different platforms as well as Power domain indexes
+
+Example: rpmh power domain controller and OPP table
+
+#include <dt-bindings/power/qcom-rpmhpd.h>
+
+opp-level values specified in the OPP tables for RPMh power domains
+should use the RPMH_REGULATOR_LEVEL_* constants from
+<dt-bindings/power/qcom-rpmhpd.h>
+
+       rpmhpd: power-controller {
+               compatible = "qcom,sdm845-rpmhpd";
+               #power-domain-cells = <1>;
+               operating-points-v2 = <&rpmhpd_opp_table>;
+
+               rpmhpd_opp_table: opp-table {
+                       compatible = "operating-points-v2";
+
+                       rpmhpd_opp_ret: opp1 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+                       };
+
+                       rpmhpd_opp_min_svs: opp2 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+                       };
+
+                       rpmhpd_opp_low_svs: opp3 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+                       };
+
+                       rpmhpd_opp_svs: opp4 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+                       };
+
+                       rpmhpd_opp_svs_l1: opp5 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+                       };
+
+                       rpmhpd_opp_nom: opp6 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+                       };
+
+                       rpmhpd_opp_nom_l1: opp7 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+                       };
+
+                       rpmhpd_opp_nom_l2: opp8 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+                       };
+
+                       rpmhpd_opp_turbo: opp9 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+                       };
+
+                       rpmhpd_opp_turbo_l1: opp10 {
+                               opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+                       };
+               };
+       };
+
+Example: rpm power domain controller and OPP table
+
+       rpmpd: power-controller {
+               compatible = "qcom,msm8996-rpmpd";
+               #power-domain-cells = <1>;
+               operating-points-v2 = <&rpmpd_opp_table>;
+
+               rpmpd_opp_table: opp-table {
+                       compatible = "operating-points-v2";
+
+                       rpmpd_opp_low: opp1 {
+                               opp-level = <1>;
+                       };
+
+                       rpmpd_opp_ret: opp2 {
+                               opp-level = <2>;
+                       };
+
+                       rpmpd_opp_svs: opp3 {
+                               opp-level = <3>;
+                       };
+
+                       rpmpd_opp_normal: opp4 {
+                               opp-level = <4>;
+                       };
+
+                       rpmpd_opp_high: opp5 {
+                               opp-level = <5>;
+                       };
+
+                       rpmpd_opp_turbo: opp6 {
+                               opp-level = <6>;
+                       };
+               };
+       };
+
+Example: Client/Consumer device using OPP table
+
+       leaky-device0@12350000 {
+               compatible = "foo,i-leak-current";
+               reg = <0x12350000 0x1000>;
+               power-domains = <&rpmhpd SDM845_MX>;
+               operating-points-v2 = <&leaky_opp_table>;
+       };
+
+
+       leaky_opp_table: opp-table {
+               compatible = "operating-points-v2";
+
+               opp1 {
+                       opp-hz = /bits/ 64 <144000>;
+                       required-opps = <&rpmhpd_opp_low>;
+               };
+
+               opp2 {
+                       opp-hz = /bits/ 64 <400000>;
+                       required-opps = <&rpmhpd_opp_ret>;
+               };
+
+               opp3 {
+                       opp-hz = /bits/ 64 <20000000>;
+                       required-opps = <&rpmpd_opp_svs>;
+               };
+
+               opp4 {
+                       opp-hz = /bits/ 64 <25000000>;
+                       required-opps = <&rpmpd_opp_normal>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
new file mode 100644 (file)
index 0000000..d366f1e
--- /dev/null
@@ -0,0 +1,25 @@
+--------------------------------------------------------------------
+Device Tree Bindings for the Xilinx Zynq MPSoC Power Management
+--------------------------------------------------------------------
+The zynqmp-power node describes the power management configurations.
+It will control remote suspend/shutdown interfaces.
+
+Required properties:
+ - compatible:         Must contain:   "xlnx,zynqmp-power"
+ - interrupts:         Interrupt specifier
+
+-------
+Example
+-------
+
+firmware {
+       zynqmp_firmware: zynqmp-firmware {
+               compatible = "xlnx,zynqmp-firmware";
+               method = "smc";
+
+               zynqmp_power: zynqmp-power {
+                       compatible = "xlnx,zynqmp-power";
+                       interrupts = <0 35 4>;
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt
new file mode 100644 (file)
index 0000000..8d1b820
--- /dev/null
@@ -0,0 +1,34 @@
+-----------------------------------------------------------
+Device Tree Bindings for the Xilinx Zynq MPSoC PM domains
+-----------------------------------------------------------
+The binding for zynqmp-power-controller follow the common
+generic PM domain binding[1].
+
+[1] Documentation/devicetree/bindings/power/power_domain.txt
+
+== Zynq MPSoC Generic PM Domain Node ==
+
+Required property:
+ - Below property should be in zynqmp-firmware node.
+ - #power-domain-cells:        Number of cells in a PM domain specifier. Must be 1.
+
+Power domain ID indexes are mentioned in
+include/dt-bindings/power/xlnx-zynqmp-power.h.
+
+-------
+Example
+-------
+
+firmware {
+       zynqmp_firmware: zynqmp-firmware {
+               ...
+               #power-domain-cells = <1>;
+               ...
+       };
+};
+
+sata {
+       ...
+       power-domains = <&zynqmp_firmware 28>;
+       ...
+};
diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
new file mode 100644 (file)
index 0000000..6e5341b
--- /dev/null
@@ -0,0 +1,27 @@
+Broadcom STB SW_INIT-style reset controller
+===========================================
+
+Broadcom STB SoCs have a SW_INIT-style reset controller with separate
+SET/CLEAR/STATUS registers and possibly multiple banks, each of 32 bit
+reset lines.
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: should be brcm,brcmstb-reset
+- reg: register base and length
+- #reset-cells: must be set to 1
+
+Example:
+
+       reset: reset-controller@8404318 {
+               compatible = "brcm,brcmstb-reset";
+               reg = <0x8404318 0x30>;
+               #reset-cells = <1>;
+       };
+
+       &ethernet_switch {
+               resets = <&reset>;
+               reset-names = "switch";
+       };
index 1ab1d109318e2aa6ca22d01c57268fa310e8b2a7..2ecf33815d1823cb41a2a51bde0c24adcc956b88 100644 (file)
@@ -5,7 +5,9 @@ Please also refer to reset.txt in this directory for common reset
 controller binding usage.
 
 Required properties:
-- compatible: Should be "fsl,imx7d-src", "syscon"
+- compatible:
+       - For i.MX7 SoCs should be "fsl,imx7d-src", "syscon"
+       - For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon"
 - reg: should be register base and length as documented in the
   datasheet
 - interrupts: Should contain SRC interrupt
@@ -44,4 +46,5 @@ Example:
 
 
 For list of all valid reset indicies see
-<dt-bindings/reset/imx7-reset.h>
+<dt-bindings/reset/imx7-reset.h> for i.MX7 and
+<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt
new file mode 100644 (file)
index 0000000..27a45fe
--- /dev/null
@@ -0,0 +1,52 @@
+--------------------------------------------------------------------------
+ =  Zynq UltraScale+ MPSoC reset driver binding =
+--------------------------------------------------------------------------
+The Zynq UltraScale+ MPSoC has several different resets.
+
+See Chapter 36 of the Zynq UltraScale+ MPSoC TRM (UG) for more information
+about zynqmp resets.
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required Properties:
+- compatible:  "xlnx,zynqmp-reset"
+- #reset-cells:        Specifies the number of cells needed to encode reset
+               line, should be 1
+
+-------
+Example
+-------
+
+firmware {
+       zynqmp_firmware: zynqmp-firmware {
+               compatible = "xlnx,zynqmp-firmware";
+               method = "smc";
+
+               zynqmp_reset: reset-controller {
+                       compatible = "xlnx,zynqmp-reset";
+                       #reset-cells = <1>;
+               };
+       };
+};
+
+Specifying reset lines connected to IP modules
+==============================================
+
+Device nodes that need access to reset lines should
+specify them as a reset phandle in their corresponding node as
+specified in reset.txt.
+
+For list of all valid reset indicies see
+<dt-bindings/reset/xlnx-zynqmp-resets.h>
+
+Example:
+
+serdes: zynqmp_phy@fd400000 {
+       ...
+
+       resets = <&zynqmp_reset ZYNQMP_RESET_SATA>;
+       reset-names = "sata_rst";
+
+       ...
+};
index 205a54bcd7c7f3682481ee50fa2cddecabb999d6..6bf6b43f8dd863fb41b9b8b54c837a9be966b562 100644 (file)
@@ -9,6 +9,8 @@ Required properties:
                        "amlogic,meson-gx-clk-measure" for GX SoCs
                        "amlogic,meson8-clk-measure" for Meson8 SoCs
                        "amlogic,meson8b-clk-measure" for Meson8b SoCs
+                       "amlogic,meson-axg-clk-measure" for AXG SoCs
+                       "amlogic,meson-g12a-clk-measure" for G12a SoCs
 - reg: base address and size of the Clock Measurer register space.
 
 Example:
diff --git a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt
new file mode 100644 (file)
index 0000000..3b7d329
--- /dev/null
@@ -0,0 +1,46 @@
+BCM2835 PM (Power domains, watchdog)
+
+The PM block controls power domains and some reset lines, and includes
+a watchdog timer.  This binding supersedes the brcm,bcm2835-pm-wdt
+binding which covered some of PM's register range and functionality.
+
+Required properties:
+
+- compatible:          Should be "brcm,bcm2835-pm"
+- reg:                 Specifies base physical address and size of the two
+                         register ranges ("PM" and "ASYNC_BRIDGE" in that
+                         order)
+- clocks:              a) v3d: The V3D clock from CPRMAN
+                       b) peri_image: The PERI_IMAGE clock from CPRMAN
+                       c) h264: The H264 clock from CPRMAN
+                       d) isp: The ISP clock from CPRMAN
+- #reset-cells:        Should be 1.  This property follows the reset controller
+                         bindings[1].
+- #power-domain-cells: Should be 1.  This property follows the power domain
+                         bindings[2].
+
+Optional properties:
+
+- timeout-sec:         Contains the watchdog timeout in seconds
+- system-power-controller: Whether the watchdog is controlling the
+    system power.  This node follows the power controller bindings[3].
+
+[1] Documentation/devicetree/bindings/reset/reset.txt
+[2] Documentation/devicetree/bindings/power/power_domain.txt
+[3] Documentation/devicetree/bindings/power/power-controller.txt
+
+Example:
+
+pm {
+       compatible = "brcm,bcm2835-pm", "brcm,bcm2835-pm-wdt";
+       #power-domain-cells = <1>;
+       #reset-cells = <1>;
+       reg = <0x7e100000 0x114>,
+             <0x7e00a000 0x24>;
+       clocks = <&clocks BCM2835_CLOCK_V3D>,
+                <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+                <&clocks BCM2835_CLOCK_H264>,
+                <&clocks BCM2835_CLOCK_ISP>;
+       clock-names = "v3d", "peri_image", "h264", "isp";
+       system-power-controller;
+};
index ec95705ba692fbd7c93454d5392f49501246387d..f3fa313963d5760f49158e81af33ff7265487b4d 100644 (file)
@@ -23,6 +23,7 @@ resources.
                    "qcom,rpm-msm8916"
                    "qcom,rpm-msm8974"
                    "qcom,rpm-msm8998"
+                   "qcom,rpm-sdm660"
                    "qcom,rpm-qcs404"
 
 - qcom,smd-channels:
index dcef7e938f2f8fc0842ab2da728c8bcf6bffa05d..eb25f3f0c503d1cf00fcc97ba5729794b19a7f4c 100644 (file)
@@ -1948,19 +1948,37 @@ M:      David Brown <david.brown@linaro.org>
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/soc/qcom/
+F:     Documentation/devicetree/bindings/*/qcom*
 F:     arch/arm/boot/dts/qcom-*.dts
 F:     arch/arm/boot/dts/qcom-*.dtsi
 F:     arch/arm/mach-qcom/
-F:     arch/arm64/boot/dts/qcom/*
+F:     arch/arm64/boot/dts/qcom/
+F:     drivers/*/qcom/
+F:     drivers/*/qcom*
+F:     drivers/*/*/qcom/
+F:     drivers/*/*/qcom*
+F:     drivers/*/pm8???-*
+F:     drivers/bluetooth/btqcomsmd.c
+F:     drivers/clocksource/timer-qcom.c
+F:     drivers/extcon/extcon-qcom*
+F:     drivers/iommu/msm*
 F:     drivers/i2c/busses/i2c-qup.c
-F:     drivers/clk/qcom/
-F:     drivers/dma/qcom/
-F:     drivers/soc/qcom/
+F:     drivers/i2c/busses/i2c-qcom-geni.c
+F:     drivers/mfd/ssbi.c
+F:     drivers/mmc/host/mmci_qcom*
+F:     drivers/mmc/host/sdhci_msm.c
+F:     drivers/pci/controller/dwc/pcie-qcom.c
+F:     drivers/phy/qualcomm/
+F:     drivers/power/*/msm*
+F:     drivers/reset/reset-qcom-*
+F:     drivers/scsi/ufs/ufs-qcom.*
 F:     drivers/spi/spi-qup.c
+F:     drivers/spi/spi-geni-qcom.c
+F:     drivers/spi/spi-qcom-qspi.c
 F:     drivers/tty/serial/msm_serial.c
-F:     drivers/*/pm8???-*
-F:     drivers/mfd/ssbi.c
-F:     drivers/firmware/qcom_scm*
+F:     drivers/usb/dwc3/dwc3-qcom.c
+F:     include/dt-bindings/*/qcom*
+F:     include/linux/*/qcom*
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git
 
 ARM/RADISYS ENP2611 MACHINE SUPPORT
@@ -3052,8 +3070,8 @@ F:        include/linux/bcm963xx_nvram.h
 F:     include/linux/bcm963xx_tag.h
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M:     Rasesh Mody <rasesh.mody@cavium.com>
-M:     Dept-GELinuxNICDev@cavium.com
+M:     Rasesh Mody <rmody@marvell.com>
+M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2.*
@@ -3072,9 +3090,9 @@ S:        Supported
 F:     drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M:     Ariel Elior <ariel.elior@cavium.com>
-M:     Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
-M:     everest-linux-l2@cavium.com
+M:     Ariel Elior <aelior@marvell.com>
+M:     Sudarsana Kalluru <skalluru@marvell.com>
+M:     GR-everest-linux-l2@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
@@ -3249,9 +3267,9 @@ S:        Supported
 F:     drivers/scsi/bfa/
 
 BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M:     Rasesh Mody <rasesh.mody@cavium.com>
-M:     Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
-M:     Dept-GELinuxNICDev@cavium.com
+M:     Rasesh Mody <rmody@marvell.com>
+M:     Sudarsana Kalluru <skalluru@marvell.com>
+M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/brocade/bna/
@@ -3978,6 +3996,7 @@ F:        drivers/cpufreq/arm_big_little.c
 CPU POWER MONITORING SUBSYSTEM
 M:     Thomas Renninger <trenn@suse.com>
 M:     Shuah Khan <shuah@kernel.org>
+M:     Shuah Khan <skhan@linuxfoundation.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     tools/power/cpupower/
@@ -8258,6 +8277,7 @@ F:        include/uapi/linux/sunrpc/
 
 KERNEL SELFTEST FRAMEWORK
 M:     Shuah Khan <shuah@kernel.org>
+M:     Shuah Khan <skhan@linuxfoundation.org>
 L:     linux-kselftest@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
 Q:     https://patchwork.kernel.org/project/linux-kselftest/list/
@@ -10688,9 +10708,9 @@ S:      Maintained
 F:     drivers/net/netdevsim/*
 
 NETXEN (1/10) GbE SUPPORT
-M:     Manish Chopra <manish.chopra@cavium.com>
-M:     Rahul Verma <rahul.verma@cavium.com>
-M:     Dept-GELinuxNICDev@cavium.com
+M:     Manish Chopra <manishc@marvell.com>
+M:     Rahul Verma <rahulv@marvell.com>
+M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/netxen/
@@ -12479,8 +12499,8 @@ S:      Supported
 F:     drivers/scsi/qedi/
 
 QLOGIC QL4xxx ETHERNET DRIVER
-M:     Ariel Elior <Ariel.Elior@cavium.com>
-M:     everest-linux-l2@cavium.com
+M:     Ariel Elior <aelior@marvell.com>
+M:     GR-everest-linux-l2@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/qed/
@@ -12488,8 +12508,8 @@ F:      include/linux/qed/
 F:     drivers/net/ethernet/qlogic/qede/
 
 QLOGIC QL4xxx RDMA DRIVER
-M:     Michal Kalderon <Michal.Kalderon@cavium.com>
-M:     Ariel Elior <Ariel.Elior@cavium.com>
+M:     Michal Kalderon <mkalderon@marvell.com>
+M:     Ariel Elior <aelior@marvell.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/qedr/
@@ -12509,7 +12529,7 @@ F:      Documentation/scsi/LICENSE.qla2xxx
 F:     drivers/scsi/qla2xxx/
 
 QLOGIC QLA3XXX NETWORK DRIVER
-M:     Dept-GELinuxNICDev@cavium.com
+M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
@@ -12523,16 +12543,16 @@ F:    Documentation/scsi/LICENSE.qla4xxx
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Shahed Shaikh <Shahed.Shaikh@cavium.com>
-M:     Manish Chopra <manish.chopra@cavium.com>
-M:     Dept-GELinuxNICDev@cavium.com
+M:     Shahed Shaikh <shshaikh@marvell.com>
+M:     Manish Chopra <manishc@marvell.com>
+M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Manish Chopra <manish.chopra@cavium.com>
-M:     Dept-GELinuxNICDev@cavium.com
+M:     Manish Chopra <manishc@marvell.com>
+M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/qlge/
@@ -12961,6 +12981,7 @@ F:      drivers/reset/
 F:     Documentation/devicetree/bindings/reset/
 F:     include/dt-bindings/reset/
 F:     include/linux/reset.h
+F:     include/linux/reset/
 F:     include/linux/reset-controller.h
 
 RESTARTABLE SEQUENCES SUPPORT
@@ -15846,6 +15867,7 @@ F:      drivers/usb/common/usb-otg-fsm.c
 USB OVER IP DRIVER
 M:     Valentina Manea <valentina.manea.m@gmail.com>
 M:     Shuah Khan <shuah@kernel.org>
+M:     Shuah Khan <skhan@linuxfoundation.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usbip_protocol.txt
index f5b1d0d168e01e90d76b8d26c26c38cc52459787..141653226f3c22ad9385d0b3863f714c8afc8038 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index feed50ce89fadfc1291080dffb16b969370553d8..caa270261521d45e46759592e9a3007c15fdd80f 100644 (file)
@@ -3,23 +3,19 @@ generic-y += bugs.h
 generic-y += compat.h
 generic-y += device.h
 generic-y += div64.h
-generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += extable.h
-generic-y += fb.h
 generic-y += ftrace.h
 generic-y += hardirq.h
 generic-y += hw_irq.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
-generic-y += kmap_types.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += msi.h
 generic-y += parport.h
-generic-y += pci.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += topology.h
index 49bfbd879caa6ffa08553e9b0f49b542739bb95b..f1b86cef09057ace18085cb1cb3f18b3be274852 100644 (file)
@@ -216,6 +216,14 @@ struct bcr_fp_arcv2 {
 #endif
 };
 
+struct bcr_actionpoint {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:21, min:1, num:2, ver:8;
+#else
+       unsigned int ver:8, num:2, min:1, pad:21;
+#endif
+};
+
 #include <soc/arc/timers.h>
 
 struct bcr_bpu_arcompact {
@@ -283,7 +291,7 @@ struct cpuinfo_arc_cache {
 };
 
 struct cpuinfo_arc_bpu {
-       unsigned int ver, full, num_cache, num_pred;
+       unsigned int ver, full, num_cache, num_pred, ret_stk;
 };
 
 struct cpuinfo_arc_ccm {
@@ -302,7 +310,7 @@ struct cpuinfo_arc {
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
                             fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
-                            debug:1, ap:1, smart:1, rtt:1, pad3:4,
+                            ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
        struct bcr_mpy extn_mpy;
index ee9246184033b3138f8d09878fc7763502b11e5c..202b74c339f0b43c6ca31a9ab0f76ae2240f0934 100644 (file)
@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
 /*
  * __ffs: Similar to ffs, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
 {
        if (!word)
                return word;
@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
 /*
  * __ffs: Similar to ffs, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
 {
-       int n;
+       unsigned long n;
 
        asm volatile(
        "       ffs.f   %0, %1          \n"  /* 0:31; 31(Z) if src 0 */
index 9185541035cc3a716b59eb12a159850b18c5a7ea..6958545390f0f847ed3a7745b7325964d7f23f17 100644 (file)
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
 
        /* counts condition */
        [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+       /* All jump instructions that are taken */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
        [PERF_COUNT_ARC_BPOK]         = "bpok",   /* NP-NT, PT-T, PNT-NT */
 #ifdef CONFIG_ISA_ARCV2
        [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
index 8aec462d90fbe8f0aa88847272d02004a863f2db..861a8aea51f9fe0c086665dd84677f7ee80ea838 100644 (file)
@@ -1,15 +1,10 @@
-/*
- * Linux performance counter support for ARC700 series
- *
- * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
- *
- * This code is inspired by the perf support of various other architectures.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Linux performance counter support for ARC CPUs.
+// This code is inspired by the perf support of various other architectures.
+//
+// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
+
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <asm/arcregs.h>
 #include <asm/stacktrace.h>
 
+/* HW holds 8 symbols + one for null terminator */
+#define ARCPMU_EVENT_NAME_LEN  9
+
+enum arc_pmu_attr_groups {
+       ARCPMU_ATTR_GR_EVENTS,
+       ARCPMU_ATTR_GR_FORMATS,
+       ARCPMU_NR_ATTR_GR
+};
+
+struct arc_pmu_raw_event_entry {
+       char name[ARCPMU_EVENT_NAME_LEN];
+};
+
 struct arc_pmu {
        struct pmu      pmu;
        unsigned int    irq;
        int             n_counters;
+       int             n_events;
        u64             max_period;
        int             ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
+
+       struct arc_pmu_raw_event_entry  *raw_entry;
+       struct attribute                **attrs;
+       struct perf_pmu_events_attr     *attr;
+       const struct attribute_group    *attr_groups[ARCPMU_NR_ATTR_GR + 1];
 };
 
 struct arc_pmu_cpu {
@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
 {
        struct arc_callchain_trace *ctrl = data;
        struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
+
        perf_callchain_store(entry, addr);
 
        if (ctrl->depth++ < 3)
@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
        return -1;
 }
 
-void
-perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
 {
        struct arc_callchain_trace ctrl = {
                .depth = 0,
@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
        arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
 }
 
-void
-perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
 {
        /*
         * User stack can't be unwound trivially with kernel dwarf unwinder
@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
 static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
 
 /* read counter #idx; note that counter# != event# on ARC! */
-static uint64_t arc_pmu_read_counter(int idx)
+static u64 arc_pmu_read_counter(int idx)
 {
-       uint32_t tmp;
-       uint64_t result;
+       u32 tmp;
+       u64 result;
 
        /*
         * ARC supports making 'snapshots' of the counters, so we don't
@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
        write_aux_reg(ARC_REG_PCT_INDEX, idx);
        tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
        write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
-       result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
+       result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
        result |= read_aux_reg(ARC_REG_PCT_SNAPL);
 
        return result;
@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
 static void arc_perf_event_update(struct perf_event *event,
                                  struct hw_perf_event *hwc, int idx)
 {
-       uint64_t prev_raw_count = local64_read(&hwc->prev_count);
-       uint64_t new_raw_count = arc_pmu_read_counter(idx);
-       int64_t delta = new_raw_count - prev_raw_count;
+       u64 prev_raw_count = local64_read(&hwc->prev_count);
+       u64 new_raw_count = arc_pmu_read_counter(idx);
+       s64 delta = new_raw_count - prev_raw_count;
 
        /*
         * We aren't afraid of hwc->prev_count changing beneath our feet
@@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
        int ret;
 
        if (!is_sampling_event(event)) {
-               hwc->sample_period  = arc_pmu->max_period;
+               hwc->sample_period = arc_pmu->max_period;
                hwc->last_period = hwc->sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
        }
@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
                pr_debug("init cache event with h/w %08x \'%s\'\n",
                         (int)hwc->config, arc_pmu_ev_hw_map[ret]);
                return 0;
+
+       case PERF_TYPE_RAW:
+               if (event->attr.config >= arc_pmu->n_events)
+                       return -ENOENT;
+
+               hwc->config |= event->attr.config;
+               pr_debug("init raw event with idx %lld \'%s\'\n",
+                        event->attr.config,
+                        arc_pmu->raw_entry[event->attr.config].name);
+
+               return 0;
+
        default:
                return -ENOENT;
        }
@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
 /* starts all counters */
 static void arc_pmu_enable(struct pmu *pmu)
 {
-       uint32_t tmp;
+       u32 tmp;
        tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
        write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
 }
@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
 /* stops all counters */
 static void arc_pmu_disable(struct pmu *pmu)
 {
-       uint32_t tmp;
+       u32 tmp;
        tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
        write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
 }
@@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
                local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                overflow = 1;
-       } else  if (unlikely(left <= 0)) {
+       } else if (unlikely(left <= 0)) {
                /* left underflowed by less than period. */
                left += period;
                local64_set(&hwc->period_left, left);
@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
        write_aux_reg(ARC_REG_PCT_INDEX, idx);
 
        /* Write value */
-       write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value);
-       write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32));
+       write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
+       write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
 
        perf_event_update_userpage(event);
 
@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
        /* Enable interrupt for this counter */
        if (is_sampling_event(event))
                write_aux_reg(ARC_REG_PCT_INT_CTRL,
-                             read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+                             read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
 
        /* enable ARC pmu here */
        write_aux_reg(ARC_REG_PCT_INDEX, idx);          /* counter # */
@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
                 * Reset interrupt flag by writing of 1. This is required
                 * to make sure pending interrupt was not left.
                 */
-               write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+               write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
                write_aux_reg(ARC_REG_PCT_INT_CTRL,
-                             read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx));
+                             read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
        }
 
        if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
 
        if (is_sampling_event(event)) {
                /* Mimic full counter overflow as other arches do */
-               write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period);
+               write_aux_reg(ARC_REG_PCT_INT_CNTL,
+                             lower_32_bits(arc_pmu->max_period));
                write_aux_reg(ARC_REG_PCT_INT_CNTH,
-                             (arc_pmu->max_period >> 32));
+                             upper_32_bits(arc_pmu->max_period));
        }
 
        write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
                idx = __ffs(active_ints);
 
                /* Reset interrupt flag by writing of 1 */
-               write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+               write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
 
                /*
                 * On reset of "interrupt active" bit corresponding
@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
                 * Now we need to re-enable interrupt for the counter.
                 */
                write_aux_reg(ARC_REG_PCT_INT_CTRL,
-                       read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+                       read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
 
                event = pmu_cpu->act_counter[idx];
                hwc = &event->hw;
@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
                                arc_pmu_stop(event, 0);
                }
 
-               active_ints &= ~(1U << idx);
+               active_ints &= ~BIT(idx);
        } while (active_ints);
 
 done:
@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
        write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
 }
 
+/* Event field occupies the bottom 15 bits of our config field */
+PMU_FORMAT_ATTR(event, "config:0-14");
+static struct attribute *arc_pmu_format_attrs[] = {
+       &format_attr_event.attr,
+       NULL,
+};
+
+static struct attribute_group arc_pmu_format_attr_gr = {
+       .name = "format",
+       .attrs = arc_pmu_format_attrs,
+};
+
+static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr;
+
+       pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+       return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
+}
+
+/*
+ * We don't add attrs here as we don't have pre-defined list of perf events.
+ * We will generate and add attrs dynamically in probe() after we read HW
+ * configuration.
+ */
+static struct attribute_group arc_pmu_events_attr_gr = {
+       .name = "events",
+};
+
+static void arc_pmu_add_raw_event_attr(int j, char *str)
+{
+       memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
+       arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
+       arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
+       arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
+       arc_pmu->attr[j].id = j;
+       arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
+}
+
+static int arc_pmu_raw_alloc(struct device *dev)
+{
+       arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+               sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
+       if (!arc_pmu->attr)
+               return -ENOMEM;
+
+       arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
+               sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
+       if (!arc_pmu->attrs)
+               return -ENOMEM;
+
+       arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
+               sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
+       if (!arc_pmu->raw_entry)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static inline bool event_in_hw_event_map(int i, char *name)
+{
+       if (!arc_pmu_ev_hw_map[i])
+               return false;
+
+       if (!strlen(arc_pmu_ev_hw_map[i]))
+               return false;
+
+       if (strcmp(arc_pmu_ev_hw_map[i], name))
+               return false;
+
+       return true;
+}
+
+static void arc_pmu_map_hw_event(int j, char *str)
+{
+       int i;
+
+       /* See if HW condition has been mapped to a perf event_id */
+       for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
+               if (event_in_hw_event_map(i, str)) {
+                       pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
+                                i, str, j);
+                       arc_pmu->ev_hw_idx[i] = j;
+               }
+       }
+}
+
 static int arc_pmu_device_probe(struct platform_device *pdev)
 {
        struct arc_reg_pct_build pct_bcr;
        struct arc_reg_cc_build cc_bcr;
-       int i, j, has_interrupts;
+       int i, has_interrupts;
        int counter_size;       /* in bits */
 
        union cc_name {
                struct {
-                       uint32_t word0, word1;
+                       u32 word0, word1;
                        char sentinel;
                } indiv;
-               char str[9];
+               char str[ARCPMU_EVENT_NAME_LEN];
        } cc_name;
 
 
@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
                return -ENODEV;
        }
        BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
-       BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
+       if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
+               return -EINVAL;
 
        READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
-       BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
+       if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
+               return -EINVAL;
 
        arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
        if (!arc_pmu)
                return -ENOMEM;
 
+       arc_pmu->n_events = cc_bcr.c;
+
+       if (arc_pmu_raw_alloc(&pdev->dev))
+               return -ENOMEM;
+
        has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
 
        arc_pmu->n_counters = pct_bcr.c;
@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
 
        pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
                arc_pmu->n_counters, counter_size, cc_bcr.c,
-               has_interrupts ? ", [overflow IRQ support]":"");
+               has_interrupts ? ", [overflow IRQ support]" : "");
 
-       cc_name.str[8] = 0;
+       cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
        for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
                arc_pmu->ev_hw_idx[i] = -1;
 
        /* loop thru all available h/w condition indexes */
-       for (j = 0; j < cc_bcr.c; j++) {
-               write_aux_reg(ARC_REG_CC_INDEX, j);
+       for (i = 0; i < cc_bcr.c; i++) {
+               write_aux_reg(ARC_REG_CC_INDEX, i);
                cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
                cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
 
-               /* See if it has been mapped to a perf event_id */
-               for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
-                       if (arc_pmu_ev_hw_map[i] &&
-                           !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
-                           strlen(arc_pmu_ev_hw_map[i])) {
-                               pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
-                                        i, cc_name.str, j);
-                               arc_pmu->ev_hw_idx[i] = j;
-                       }
-               }
+               arc_pmu_map_hw_event(i, cc_name.str);
+               arc_pmu_add_raw_event_attr(i, cc_name.str);
        }
 
+       arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
+       arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
+       arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
+
        arc_pmu->pmu = (struct pmu) {
                .pmu_enable     = arc_pmu_enable,
                .pmu_disable    = arc_pmu_disable,
@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
                .start          = arc_pmu_start,
                .stop           = arc_pmu_stop,
                .read           = arc_pmu_read,
+               .attr_groups    = arc_pmu->attr_groups,
        };
 
        if (has_interrupts) {
@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
        } else
                arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 
-       return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
+       /*
+        * perf parser doesn't really like '-' symbol in events name, so let's
+        * use '_' in arc pct name as it goes to kernel PMU event prefix.
+        */
+       return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
 }
 
-#ifdef CONFIG_OF
 static const struct of_device_id arc_pmu_match[] = {
        { .compatible = "snps,arc700-pct" },
        { .compatible = "snps,archs-pct" },
        {},
 };
 MODULE_DEVICE_TABLE(of, arc_pmu_match);
-#endif
 
 static struct platform_driver arc_pmu_driver = {
        .driver = {
index 2e018b8c2e19ce368c43d8eec102fe5334627da7..feb90093e6b1354eaf60064d7d961c736ded3108 100644 (file)
@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        const struct id_to_str *tbl;
        struct bcr_isa_arcv2 isa;
+       struct bcr_actionpoint ap;
 
        FIX_PTR(cpu);
 
@@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void)
                cpu->bpu.full = bpu.ft;
                cpu->bpu.num_cache = 256 << bpu.bce;
                cpu->bpu.num_pred = 2048 << bpu.pte;
+               cpu->bpu.ret_stk = 4 << bpu.rse;
 
                if (cpu->core.family >= 0x54) {
                        unsigned int exec_ctrl;
@@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void)
                }
        }
 
-       READ_BCR(ARC_REG_AP_BCR, bcr);
-       cpu->extn.ap = bcr.ver ? 1 : 0;
+       READ_BCR(ARC_REG_AP_BCR, ap);
+       if (ap.ver) {
+               cpu->extn.ap_num = 2 << ap.num;
+               cpu->extn.ap_full = !!ap.min;
+       }
 
        READ_BCR(ARC_REG_SMART_BCR, bcr);
        cpu->extn.smart = bcr.ver ? 1 : 0;
@@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void)
        READ_BCR(ARC_REG_RTT_BCR, bcr);
        cpu->extn.rtt = bcr.ver ? 1 : 0;
 
-       cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-
        READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
 
        /* some hacks for lack of feature BCR info in old ARC700 cores */
@@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 
        if (cpu->bpu.ver)
                n += scnprintf(buf + n, len - n,
-                             "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
+                             "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
                              IS_AVAIL1(cpu->bpu.full, "full"),
                              IS_AVAIL1(!cpu->bpu.full, "partial"),
-                             cpu->bpu.num_cache, cpu->bpu.num_pred);
+                             cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
 
        if (is_isa_arcv2()) {
                struct bcr_lpb lpb;
@@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                               IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
                               IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
 
-       if (cpu->extn.debug)
-               n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n",
-                              IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
+       if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
+               n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
                               IS_AVAIL1(cpu->extn.smart, "smaRT "),
                               IS_AVAIL1(cpu->extn.rtt, "RTT "));
+               if (cpu->extn.ap_num) {
+                       n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
+                                      cpu->extn.ap_num,
+                                      cpu->extn.ap_full ? "full":"min");
+               }
+               n += scnprintf(buf + n, len - n, "\n");
+       }
 
        if (cpu->dccm.sz || cpu->iccm.sz)
                n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
index e8d9fb4523462a9807358fea19c4e7668cc1126d..215f515442e03d53ee3a18ade4c62e2a06987b3b 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/arcregs.h>
 #include <asm/irqflags.h>
 
+#define ARC_PATH_MAX   256
+
 /*
  * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
  *   -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
        print_reg_file(&(cregs->r13), 13);
 }
 
-static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk)
 {
        char *path_nm = NULL;
        struct mm_struct *mm;
        struct file *exe_file;
+       char buf[ARC_PATH_MAX];
 
        mm = get_task_mm(tsk);
        if (!mm)
@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
        mmput(mm);
 
        if (exe_file) {
-               path_nm = file_path(exe_file, buf, 255);
+               path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
                fput(exe_file);
        }
 
@@ -80,10 +83,9 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
        pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
 }
 
-static void show_faulting_vma(unsigned long address, char *buf)
+static void show_faulting_vma(unsigned long address)
 {
        struct vm_area_struct *vma;
-       char *nm = buf;
        struct mm_struct *active_mm = current->active_mm;
 
        /* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
         * if the container VMA is not found
         */
        if (vma && (vma->vm_start <= address)) {
+               char buf[ARC_PATH_MAX];
+               char *nm = "?";
+
                if (vma->vm_file) {
-                       nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+                       nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
                        if (IS_ERR(nm))
                                nm = "?";
                }
@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
 {
        struct task_struct *tsk = current;
        struct callee_regs *cregs;
-       char *buf;
 
-       buf = (char *)__get_free_page(GFP_KERNEL);
-       if (!buf)
-               return;
+       /*
+        * generic code calls us with preemption disabled, but some calls
+        * here could sleep, so re-enable to avoid lockdep splat
+        */
+       preempt_enable();
 
-       print_task_path_n_nm(tsk, buf);
+       print_task_path_n_nm(tsk);
        show_regs_print_info(KERN_INFO);
 
        show_ecr_verbose(regs);
@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
                (void *)regs->blink, (void *)regs->ret);
 
        if (user_mode(regs))
-               show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+               show_faulting_vma(regs->ret); /* faulting code, not data */
 
        pr_info("[STAT32]: 0x%08lx", regs->status32);
 
@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
        if (cregs)
                show_callee_regs(cregs);
 
-       free_page((unsigned long)buf);
+       preempt_disable();
 }
 
 void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
index 62ad4bcb841aa70811a637c3621d80190ed0c352..f230bb7092fdb3d7d98883ab7310db1b4bc56654 100644 (file)
@@ -7,11 +7,39 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/cache.h>
 
-#undef PREALLOC_NOT_AVAIL
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR  reg, off
+       prealloc        [\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR reg, off
+       prefetchw       [\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR
+.endm
+
+.macro PREFETCHW_INSTR
+.endm
+
+#endif
 
 ENTRY_CFI(memset)
-       prefetchw [r0]          ; Prefetch the write location
+       PREFETCHW_INSTR r0, 0   ; Prefetch the first write location
        mov.f   0, r2
 ;;; if size is zero
        jz.d    [blink]
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
 
        lpnz    @.Lset64bytes
        ;; LOOP START
-#ifdef PREALLOC_NOT_AVAIL
-       prefetchw [r3, 64]      ;Prefetch the next write location
-#else
-       prealloc  [r3, 64]
-#endif
+       PREALLOC_INSTR  r3, 64  ; alloc next line w/o fetching
+
 #ifdef CONFIG_ARC_HAS_LL64
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
        lsr.f   lp_count, r2, 5 ;Last remaining  max 124 bytes
        lpnz    .Lset32bytes
        ;; LOOP START
-       prefetchw   [r3, 32]    ;Prefetch the next write location
 #ifdef CONFIG_ARC_HAS_LL64
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
index a1d7231970848dbc6f1f097fd7536153d63cf38d..8df1638259f3f2daa6b7d3cd3f70f54c3094017a 100644 (file)
@@ -141,12 +141,17 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
         */
        fault = handle_mm_fault(vma, address, flags);
 
-       /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
        if (fatal_signal_pending(current)) {
-               if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
-                       up_read(&mm->mmap_sem);
-               if (user_mode(regs))
+
+               /*
+                * if fault retry, mmap_sem already relinquished by core mm
+                * so OK to return to user mode (with signal handled first)
+                */
+               if (fault & VM_FAULT_RETRY) {
+                       if (!user_mode(regs))
+                               goto no_context;
                        return;
+               }
        }
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
index 43bf4c3a1290d818578bfdcda5fefaf76df27810..e1ab2d7f1d646510ab17a4be31b675e88027745b 100644 (file)
@@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
         */
 
        memblock_add_node(low_mem_start, low_mem_sz, 0);
-       memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
+       memblock_reserve(CONFIG_LINUX_LINK_BASE,
+                        __pa(_end) - CONFIG_LINUX_LINK_BASE);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        if (phys_initrd_size) {
index 29f970f864dc1d42a7fb9204cae7e649c1c68560..c6e2b2320abc572b0eeb65d03947d1bb69c4be0a 100644 (file)
@@ -87,10 +87,6 @@ &usb {
        power-domains = <&power RPI_POWER_DOMAIN_USB>;
 };
 
-&v3d {
-       power-domains = <&power RPI_POWER_DOMAIN_V3D>;
-};
-
 &hdmi {
        power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
        status = "okay";
index 31b29646b14cf0725cf312002f23ffe2074006e6..9777644c6c2b44d34ca67b7781b524b8d0998ea8 100644 (file)
@@ -3,6 +3,7 @@
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/soc/bcm2835-pm.h>
 
 /* firmware-provided startup stubs live here, where the secondary CPUs are
  * spinning.
@@ -120,9 +121,18 @@ intc: interrupt-controller@7e00b200 {
                        #interrupt-cells = <2>;
                };
 
-               watchdog@7e100000 {
-                       compatible = "brcm,bcm2835-pm-wdt";
-                       reg = <0x7e100000 0x28>;
+               pm: watchdog@7e100000 {
+                       compatible = "brcm,bcm2835-pm", "brcm,bcm2835-pm-wdt";
+                       #power-domain-cells = <1>;
+                       #reset-cells = <1>;
+                       reg = <0x7e100000 0x114>,
+                             <0x7e00a000 0x24>;
+                       clocks = <&clocks BCM2835_CLOCK_V3D>,
+                                <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+                                <&clocks BCM2835_CLOCK_H264>,
+                                <&clocks BCM2835_CLOCK_ISP>;
+                       clock-names = "v3d", "peri_image", "h264", "isp";
+                       system-power-controller;
                };
 
                clocks: cprman@7e101000 {
@@ -629,6 +639,7 @@ v3d: v3d@7ec00000 {
                        compatible = "brcm,bcm2835-v3d";
                        reg = <0x7ec00000 0x1000>;
                        interrupts = <1 10>;
+                       power-domains = <&pm BCM2835_POWER_DOMAIN_GRAFX_V3D>;
                };
 
                vc4: gpu {
index b3ef061d8b7417b90279c94a8706995fc116bd21..2c403e7c782d31f83b956dcbcc9a9a9027092ffe 100644 (file)
@@ -1 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <linux/dma-mapping.h>
+#include <asm/page.h>
 #include <xen/arm/page-coherent.h>
+
+static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
+{
+       if (dev && dev->archdata.dev_dma_ops)
+               return dev->archdata.dev_dma_ops;
+       return get_arch_dma_ops(NULL);
+}
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+       return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+               void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+       xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+            dma_addr_t dev_addr, unsigned long offset, size_t size,
+            enum dma_data_direction dir, unsigned long attrs)
+{
+       unsigned long page_pfn = page_to_xen_pfn(page);
+       unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+       unsigned long compound_pages =
+               (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+       bool local = (page_pfn <= dev_pfn) &&
+               (dev_pfn - page_pfn < compound_pages);
+
+       /*
+        * Dom0 is mapped 1:1, while the Linux page can span across
+        * multiple Xen pages, it's not possible for it to contain a
+        * mix of local and foreign Xen pages. So if the first xen_pfn
+        * == mfn the page is local otherwise it's a foreign page
+        * grant-mapped in dom0. If the page is local we can safely
+        * call the native dma_ops function, otherwise we call the xen
+        * specific function.
+        */
+       if (local)
+               xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+       else
+               __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+       /*
+        * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+        * multiple Xen page, it's not possible to have a mix of local and
+        * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+        * foreign mfn will always return false. If the page is local we can
+        * safely call the native dma_ops function, otherwise we call the xen
+        * specific function.
+        */
+       if (pfn_valid(pfn)) {
+               if (xen_get_dma_ops(hwdev)->unmap_page)
+                       xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+       } else
+               __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+       if (pfn_valid(pfn)) {
+               if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
+                       xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+       } else
+               __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+       if (pfn_valid(pfn)) {
+               if (xen_get_dma_ops(hwdev)->sync_single_for_device)
+                       xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+       } else
+               __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index a067adf9f1eecf33b859b19e11942165001fe33d..4ef1e55f4a0bffb37f138f6b04020fa1248d64c7 100644 (file)
@@ -167,6 +167,7 @@ config ARCH_BCM2835
        select BCM2835_TIMER
        select PINCTRL
        select PINCTRL_BCM2835
+       select MFD_CORE
        help
          This enables support for the Broadcom BCM2835 and BCM2836 SoCs.
          This SoC is used in the Raspberry Pi and Roku 2 devices.
index afd98971d9034335cf2340b3462df91c756fcdc7..816da0eb66168061953ab1fac8461b76e8db4a09 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/reboot.h>
+#include <linux/reset/socfpga.h>
 
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
@@ -32,8 +33,6 @@ void __iomem *rst_manager_base_addr;
 void __iomem *sdr_ctl_base_addr;
 unsigned long socfpga_cpu1start_addr;
 
-extern void __init socfpga_reset_init(void);
-
 static void __init socfpga_sysmgr_init(void)
 {
        struct device_node *np;
index 8a7f301839c2d9e59156be125f79076d7e531645..933b6930f024f03a3a441b7fa087962993541e36 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/clocksource.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/reset/sunxi.h>
 
 #include <asm/mach/arch.h>
 #include <asm/secure_cntvoff.h>
@@ -37,7 +38,6 @@ static const char * const sun6i_board_dt_compat[] = {
        NULL,
 };
 
-extern void __init sun6i_reset_init(void);
 static void __init sun6i_timer_init(void)
 {
        of_clk_init(NULL);
index 3dd3d664c5c5dfc3ee759af069d248f09eba4e50..4658c937e17304fcf86212294d3cfc9badb40e37 100644 (file)
@@ -20,9 +20,6 @@ struct dev_archdata {
 #ifdef CONFIG_IOMMU_API
        void *iommu;                    /* private IOMMU data */
 #endif
-#ifdef CONFIG_XEN
-       const struct dma_map_ops *dev_dma_ops;
-#endif
 };
 
 struct pdev_archdata {
index b3ef061d8b7417b90279c94a8706995fc116bd21..d88e56b90b93a55fa972fbf22b518c17bdfba1cb 100644 (file)
@@ -1 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
+#define _ASM_ARM64_XEN_PAGE_COHERENT_H
+
+#include <linux/dma-mapping.h>
+#include <asm/page.h>
 #include <xen/arm/page-coherent.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+               dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+       return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+               void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+       dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+
+       if (pfn_valid(pfn))
+               dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
+       else
+               __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+       if (pfn_valid(pfn))
+               dma_direct_sync_single_for_device(hwdev, handle, size, dir);
+       else
+               __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+            dma_addr_t dev_addr, unsigned long offset, size_t size,
+            enum dma_data_direction dir, unsigned long attrs)
+{
+       unsigned long page_pfn = page_to_xen_pfn(page);
+       unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+       unsigned long compound_pages =
+               (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+       bool local = (page_pfn <= dev_pfn) &&
+               (dev_pfn - page_pfn < compound_pages);
+
+       if (local)
+               dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
+       else
+               __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+       /*
+        * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+        * multiple Xen page, it's not possible to have a mix of local and
+        * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+        * foreign mfn will always return false. If the page is local we can
+        * safely call the native dma_ops function, otherwise we call the xen
+        * specific function.
+        */
+       if (pfn_valid(pfn))
+               dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
+       else
+               __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
index fb0908456a1f99fcb0479d50c33c48c15d2a90a6..78c0a72f822c0a2b0e05b1394e9ca8b982ceb62b 100644 (file)
@@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        __iommu_setup_dma_ops(dev, dma_base, size, iommu);
 
 #ifdef CONFIG_XEN
-       if (xen_initial_domain()) {
-               dev->archdata.dev_dma_ops = dev->dma_ops;
+       if (xen_initial_domain())
                dev->dma_ops = xen_dma_ops;
-       }
 #endif
 }
index ccbb53e2202404b85aae86e883d3e64405d2d305..8d04e6f3f79649d460376f09217c9e8fe211a850 100644 (file)
@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
        atomic_set(&mm->context.flush_count, 0);
        mm->context.gmap_asce = 0;
        mm->context.flush_mm = 0;
-       mm->context.compat_mm = 0;
+       mm->context.compat_mm = test_thread_flag(TIF_31BIT);
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        int cpu = smp_processor_id();
 
-       if (prev == next)
-               return;
        S390_lowcore.user_asce = next->context.asce;
        cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
        /* Clear previous user-ASCE from CR1 and CR7 */
@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                __ctl_load(S390_lowcore.vdso_asce, 7, 7);
                clear_cpu_flag(CIF_ASCE_SECONDARY);
        }
-       cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+       if (prev != next)
+               cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
index af5c2b3f706567f5f9927686ccfd210b50366221..a8c7789b246b49eb5a546360e4491eaf0f9eac7b 100644 (file)
@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
        if (stsi(vmms, 3, 2, 2) || !vmms->count)
                return;
 
-       /* Running under KVM? If not we assume z/VM */
+       /* Detect known hypervisors */
        if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
                S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
-       else
+       else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
                S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 }
 
index 72dd23ef771b6bf22f77e71df22a40e05fbee2d5..7ed90a75913572f752652d611a0cd5347279d8e2 100644 (file)
@@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
                pr_info("Linux is running under KVM in 64-bit mode\n");
        else if (MACHINE_IS_LPAR)
                pr_info("Linux is running natively in 64-bit mode\n");
+       else
+               pr_info("Linux is running as a guest in 64-bit mode\n");
 
        /* Have one command line that is parsed and saved in /proc/cmdline */
        /* boot_command_line has been already set up in early.c */
index f82b3d3c36e2d5620e4b8e6b363936bcdf38d673..b198ece2aad63d70f58b2acde1c280ce5534cf7a 100644 (file)
@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
  */
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
+       struct lowcore *lc = pcpu_devices->lowcore;
+
+       if (pcpu_devices[0].address == stap())
+               lc = &S390_lowcore;
+
        pcpu_delegate(&pcpu_devices[0], func, data,
-                     pcpu_devices->lowcore->nodat_stack);
+                     lc->nodat_stack);
 }
 
 int smp_find_processor_id(u16 address)
@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
 {
        int rc;
 
+       rc = lock_device_hotplug_sysfs();
+       if (rc)
+               return rc;
        rc = smp_rescan_cpus();
+       unlock_device_hotplug();
        return rc ? rc : count;
 }
 static DEVICE_ATTR_WO(rescan);
index ebe748a9f472fde63cfccd42921472233f6ea529..4ff354887db412a504bb432956c28b2ba6b8a936 100644 (file)
@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 
        vdso_pages = vdso64_pages;
 #ifdef CONFIG_COMPAT
-       if (is_compat_task()) {
+       mm->context.compat_mm = is_compat_task();
+       if (mm->context.compat_mm)
                vdso_pages = vdso32_pages;
-               mm->context.compat_mm = 1;
-       }
 #endif
        /*
         * vDSO has a problem and was disabled, just don't "enable" it for
index 4b4a7f32b68ed6e28e22793009f33cb36e8e17fa..26387c7bf305bc7d4e14e2d34856802a250c55ba 100644 (file)
@@ -198,7 +198,7 @@ config X86
        select IRQ_FORCED_THREADING
        select NEED_SG_DMA_LENGTH
        select PCI_DOMAINS                      if PCI
-       select PCI_LOCKLESS_CONFIG
+       select PCI_LOCKLESS_CONFIG              if PCI
        select PERF_EVENTS
        select RTC_LIB
        select RTC_MC146818_LIB
index 8eaf8952c408cd619124f9696b4888fae2f529ad..39913770a44d5aeed855c51082a6cf55080457f1 100644 (file)
@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
 
        /* Need to switch before accessing the thread stack. */
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
-       movq    %rsp, %rdi
+       /* In the Xen PV case we already run on the thread stack. */
+       ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        pushq   6*8(%rdi)               /* regs->ss */
@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
        pushq   3*8(%rdi)               /* regs->cs */
        pushq   2*8(%rdi)               /* regs->ip */
        pushq   1*8(%rdi)               /* regs->orig_ax */
-
        pushq   (%rdi)                  /* pt_regs->di */
+.Lint80_keep_stack:
+
        pushq   %rsi                    /* pt_regs->si */
        xorl    %esi, %esi              /* nospec   si */
        pushq   %rdx                    /* pt_regs->dx */
index 0ca50611e8cec0b5af0f29947bee90761bc76ef1..19d18fae6ec660e8119f21d2f80cf0ac3552486e 100644 (file)
@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
 
 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
+/*
+ * Init a new mm.  Used on mm copies, like at fork()
+ * and on mm's that are brand-new, like at execve().
+ */
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
@@ -228,8 +232,22 @@ do {                                               \
 } while (0)
 #endif
 
+static inline void arch_dup_pkeys(struct mm_struct *oldmm,
+                                 struct mm_struct *mm)
+{
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+       if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+               return;
+
+       /* Duplicate the oldmm pkey state in mm: */
+       mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
+       mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
+#endif
+}
+
 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
+       arch_dup_pkeys(oldmm, mm);
        paravirt_arch_dup_mmap(oldmm, mm);
        return ldt_dup_context(oldmm, mm);
 }
index c8b07d8ea5a2bba61448b169a0cd0ea3daa086ad..17ffc869cab822d03e85baea56bade232a0e4598 100644 (file)
@@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
 
        kbuf.memsz = kbuf.bufsz;
        kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
        ret = kexec_add_buffer(&kbuf);
        if (ret) {
                vfree((void *)image->arch.elf_headers);
index b0acb22e5a465096b37f7a67a689852685228087..dfd3aca82c61cbe345f462a62ea6b52fa0b81516 100644 (file)
 
 #define HPET_MASK                      CLOCKSOURCE_MASK(32)
 
-/* FSEC = 10^-15
-   NSEC = 10^-9 */
-#define FSEC_PER_NSEC                  1000000L
-
 #define HPET_DEV_USED_BIT              2
 #define HPET_DEV_USED                  (1 << HPET_DEV_USED_BIT)
 #define HPET_DEV_VALID                 0x8
index 278cd07228dd886778cabf708dd2b1237a318d72..0d5efa34f35966f0b272797f737252ed8bdb23a6 100644 (file)
@@ -434,6 +434,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
        kbuf.memsz = PAGE_ALIGN(header->init_size);
        kbuf.buf_align = header->kernel_alignment;
        kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
+       kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
        ret = kexec_add_buffer(&kbuf);
        if (ret)
                goto out_free_params;
@@ -448,6 +449,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
                kbuf.bufsz = kbuf.memsz = initrd_len;
                kbuf.buf_align = PAGE_SIZE;
                kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
+               kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
                ret = kexec_add_buffer(&kbuf);
                if (ret)
                        goto out_free_params;
index ba4bfb7f6a36996a806a634bf3c4a86bd594129f..5c93a65ee1e5c2ec56e83eda147bc1bc31e159cd 100644 (file)
@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
 #else
        u64 ipi_bitmap = 0;
 #endif
+       long ret;
 
        if (cpumask_empty(mask))
                return;
@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
                } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
                        max = apic_id < max ? max : apic_id;
                } else {
-                       kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+                       ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
                                (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+                       WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
                        min = max = apic_id;
                        ipi_bitmap = 0;
                }
@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
        }
 
        if (ipi_bitmap) {
-               kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+               ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
                        (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+               WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
        }
 
        local_irq_restore(flags);
index e9f777bfed404340a36de19898fffd129610fc3c..3fae238340699376ef2b5f84d4b6d4c7f60c48c5 100644 (file)
@@ -297,15 +297,16 @@ static int __init tsc_setup(char *str)
 
 __setup("tsc=", tsc_setup);
 
-#define MAX_RETRIES     5
-#define SMI_TRESHOLD    50000
+#define MAX_RETRIES            5
+#define TSC_DEFAULT_THRESHOLD  0x20000
 
 /*
- * Read TSC and the reference counters. Take care of SMI disturbance
+ * Read TSC and the reference counters. Take care of any disturbances
  */
 static u64 tsc_read_refs(u64 *p, int hpet)
 {
        u64 t1, t2;
+       u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
        int i;
 
        for (i = 0; i < MAX_RETRIES; i++) {
@@ -315,7 +316,7 @@ static u64 tsc_read_refs(u64 *p, int hpet)
                else
                        *p = acpi_pm_read_early();
                t2 = get_cycles();
-               if ((t2 - t1) < SMI_TRESHOLD)
+               if ((t2 - t1) < thresh)
                        return t2;
        }
        return ULLONG_MAX;
@@ -703,15 +704,15 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
         * zero. In each wait loop iteration we read the TSC and check
         * the delta to the previous read. We keep track of the min
         * and max values of that delta. The delta is mostly defined
-        * by the IO time of the PIT access, so we can detect when a
-        * SMI/SMM disturbance happened between the two reads. If the
+        * by the IO time of the PIT access, so we can detect when
+        * any disturbance happened between the two reads. If the
         * maximum time is significantly larger than the minimum time,
         * then we discard the result and have another try.
         *
         * 2) Reference counter. If available we use the HPET or the
         * PMTIMER as a reference to check the sanity of that value.
         * We use separate TSC readouts and check inside of the
-        * reference read for a SMI/SMM disturbance. We dicard
+        * reference read for any possible disturbance. We dicard
         * disturbed values here as well. We do that around the PIT
         * calibration delay loop as we have to wait for a certain
         * amount of time anyway.
@@ -744,7 +745,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
                if (ref1 == ref2)
                        continue;
 
-               /* Check, whether the sampling was disturbed by an SMI */
+               /* Check, whether the sampling was disturbed */
                if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
                        continue;
 
@@ -1268,7 +1269,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
  */
 static void tsc_refine_calibration_work(struct work_struct *work)
 {
-       static u64 tsc_start = -1, ref_start;
+       static u64 tsc_start = ULLONG_MAX, ref_start;
        static int hpet;
        u64 tsc_stop, ref_stop, delta;
        unsigned long freq;
@@ -1283,14 +1284,15 @@ static void tsc_refine_calibration_work(struct work_struct *work)
         * delayed the first time we expire. So set the workqueue
         * again once we know timers are working.
         */
-       if (tsc_start == -1) {
+       if (tsc_start == ULLONG_MAX) {
+restart:
                /*
                 * Only set hpet once, to avoid mixing hardware
                 * if the hpet becomes enabled later.
                 */
                hpet = is_hpet_enabled();
-               schedule_delayed_work(&tsc_irqwork, HZ);
                tsc_start = tsc_read_refs(&ref_start, hpet);
+               schedule_delayed_work(&tsc_irqwork, HZ);
                return;
        }
 
@@ -1300,9 +1302,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
        if (ref_start == ref_stop)
                goto out;
 
-       /* Check, whether the sampling was disturbed by an SMI */
-       if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
-               goto out;
+       /* Check, whether the sampling was disturbed */
+       if (tsc_stop == ULLONG_MAX)
+               goto restart;
 
        delta = tsc_stop - tsc_start;
        delta *= 1000000LL;
index 69b3a7c3001397f65f9cfde541b042347827ee98..31ecf7a76d5a40474e2bc833f9834797e10f295f 100644 (file)
@@ -2,10 +2,6 @@
 
 ccflags-y += -Iarch/x86/kvm
 
-CFLAGS_x86.o := -I.
-CFLAGS_svm.o := -I.
-CFLAGS_vmx.o := -I.
-
 KVM := ../../../virt/kvm
 
 kvm-y                  += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
index c90a5352d158fa29ff72b42cd11efba0163225e2..89d20ed1d2e8bf7abe753adba301ef2b31ae8398 100644 (file)
@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
                ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
                if (ret != HV_STATUS_INVALID_PORT_ID)
                        break;
-               /* maybe userspace knows this conn_id: fall through */
+               /* fall through - maybe userspace knows this conn_id. */
        case HVCALL_POST_MESSAGE:
                /* don't bother userspace if it has no way to handle it */
                if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
                        ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
                        ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
                        ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
-                       ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
                        ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
                        ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
 
@@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
                case HYPERV_CPUID_ENLIGHTMENT_INFO:
                        ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
                        ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
-                       ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
                        ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
                        ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
                        ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
-                       ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
+                       if (evmcs_ver)
+                               ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
 
                        /*
                         * Default number of spinlock retry attempts, matches
index 9f089e2e09d02b7585a9b9c1203549c62576112f..4b6c2da7265c88f8f530eb026ba6b0e950eac51e 100644 (file)
@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
        switch (delivery_mode) {
        case APIC_DM_LOWEST:
                vcpu->arch.apic_arb_prio++;
+               /* fall through */
        case APIC_DM_FIXED:
                if (unlikely(trig_mode && !level))
                        break;
@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 
        case APIC_LVT0:
                apic_manage_nmi_watchdog(apic, val);
+               /* fall through */
        case APIC_LVTTHMR:
        case APIC_LVTPC:
        case APIC_LVT1:
index ce770b446238592e985bba826aefdf3bd4e6efcc..da9c42349b1f800ec06ad3441be7ade3f852248f 100644 (file)
@@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[1][4] =
                        rsvd_check->rsvd_bits_mask[0][4];
+               /* fall through */
        case PT64_ROOT_4LEVEL:
                rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
                        nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
index a157ca5b686955bbf9942f6a6f3b68247f7bb5cb..f13a3a24d3609e03b2bd849c0058ff41a670f2ca 100644 (file)
@@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        kvm_mmu_reset_context(&svm->vcpu);
        kvm_mmu_load(&svm->vcpu);
 
+       /*
+        * Drop what we picked up for L2 via svm_complete_interrupts() so it
+        * doesn't end up in L1.
+        */
+       svm->vcpu.arch.nmi_injected = false;
+       kvm_clear_exception_queue(&svm->vcpu);
+       kvm_clear_interrupt_queue(&svm->vcpu);
+
        return 0;
 }
 
@@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_IA32_APICBASE:
                if (kvm_vcpu_apicv_active(vcpu))
                        avic_update_vapic_bar(to_svm(vcpu), data);
-               /* Follow through */
+               /* Fall through */
        default:
                return kvm_set_msr_common(vcpu, msr);
        }
@@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
                kvm_lapic_reg_write(apic, APIC_ICR, icrl);
                break;
        case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
-               int i;
-               struct kvm_vcpu *vcpu;
-               struct kvm *kvm = svm->vcpu.kvm;
                struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
                /*
-                * At this point, we expect that the AVIC HW has already
-                * set the appropriate IRR bits on the valid target
-                * vcpus. So, we just need to kick the appropriate vcpu.
+                * Update ICR high and low, then emulate sending IPI,
+                * which is handled when writing APIC_ICR.
                 */
-               kvm_for_each_vcpu(i, vcpu, kvm) {
-                       bool m = kvm_apic_match_dest(vcpu, apic,
-                                                    icrl & KVM_APIC_SHORT_MASK,
-                                                    GET_APIC_DEST_FIELD(icrh),
-                                                    icrl & KVM_APIC_DEST_MASK);
-
-                       if (m && !avic_vcpu_is_running(vcpu))
-                               kvm_vcpu_wake_up(vcpu);
-               }
+               kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
+               kvm_lapic_reg_write(apic, APIC_ICR, icrl);
                break;
        }
        case AVIC_IPI_FAILURE_INVALID_TARGET:
+               WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
+                         index, svm->vcpu.vcpu_id, icrh, icrl);
                break;
        case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
                WARN_ONCE(1, "Invalid backing page\n");
index 705f40ae25329ca74fee07191a9104e0945dba92..6432d08c7de79ccbde654b7ab17c9649b75a25c2 100644 (file)
@@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH arch/x86/kvm
+#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
 #undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE trace
 
index 95bc2247478d90bda1d9305f11986f85db908e5b..5466c6d85cf3ef07388e47012b88fb478ac0d5c2 100644 (file)
@@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
                        uint16_t *vmcs_version)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
+
+       vmx->nested.enlightened_vmcs_enabled = true;
 
        if (vmcs_version)
                *vmcs_version = nested_get_evmcs_version(vcpu);
 
        /* We don't support disabling the feature for simplicity. */
-       if (vmx->nested.enlightened_vmcs_enabled)
+       if (evmcs_already_enabled)
                return 0;
 
-       vmx->nested.enlightened_vmcs_enabled = true;
-
        vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
        vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
        vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
index 2616bd2c7f2c78b25339eff719d37a22b9070404..8ff20523661b87c5e68ee263cef47debb53f1d1f 100644 (file)
@@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
 static int max_shadow_read_write_fields =
        ARRAY_SIZE(shadow_read_write_fields);
 
-void init_vmcs_shadow_fields(void)
+static void init_vmcs_shadow_fields(void)
 {
        int i, j;
 
@@ -4140,11 +4140,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
        if (r < 0)
                goto out_vmcs02;
 
-       vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
 
-       vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
 
@@ -5263,13 +5263,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
                        copy_shadow_to_vmcs12(vmx);
        }
 
-       if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+       /*
+        * Copy over the full allocated size of vmcs12 rather than just the size
+        * of the struct.
+        */
+       if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
                return -EFAULT;
 
        if (nested_cpu_has_shadow_vmcs(vmcs12) &&
            vmcs12->vmcs_link_pointer != -1ull) {
                if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
-                                get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+                                get_shadow_vmcs12(vcpu), VMCS12_SIZE))
                        return -EFAULT;
        }
 
index f6915f10e584a5f568e2e6a38123ac70c4d3881c..4341175339f32a84bb96d9ec18031e4fadd9baa8 100644 (file)
@@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
        to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
 }
 
-int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
                void *data)
 {
        struct kvm_tlb_range *range = data;
@@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
                        return 1;
-               /* Otherwise falls through */
+               /* Else, falls through */
        default:
                msr = find_msr_entry(vmx, msr_info->index);
                if (msr) {
@@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* Check reserved bit, higher 32 bits should be zero */
                if ((data >> 32) != 0)
                        return 1;
-               /* Otherwise falls through */
+               /* Else, falls through */
        default:
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
@@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                case 37: /* AAT100 */
                case 44: /* BC86,AAY89,BD102 */
                case 46: /* BA97 */
-                       _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+                       _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
                        _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
                        pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
                                        "does not work properly. Using workaround\n");
@@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
        vmx->loaded_vmcs->hv_timer_armed = false;
 }
 
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 {
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr3, cr4, evmcs_rsp;
-
-       /* Record the guest's net vcpu time for enforced NMI injections. */
-       if (unlikely(!enable_vnmi &&
-                    vmx->loaded_vmcs->soft_vnmi_blocked))
-               vmx->loaded_vmcs->entry_time = ktime_get();
-
-       /* Don't enter VMX if guest state is invalid, let the exit handler
-          start emulation until we arrive back to a valid state */
-       if (vmx->emulation_required)
-               return;
-
-       if (vmx->ple_window_dirty) {
-               vmx->ple_window_dirty = false;
-               vmcs_write32(PLE_WINDOW, vmx->ple_window);
-       }
-
-       if (vmx->nested.need_vmcs12_sync)
-               nested_sync_from_vmcs12(vcpu);
-
-       if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
-               vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
-       if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
-               vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
-       cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               vmx->loaded_vmcs->host_state.cr3 = cr3;
-       }
-
-       cr4 = cr4_read_shadow();
-       if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
-               vmcs_writel(HOST_CR4, cr4);
-               vmx->loaded_vmcs->host_state.cr4 = cr4;
-       }
-
-       /* When single-stepping over STI and MOV SS, we must clear the
-        * corresponding interruptibility bits in the guest state. Otherwise
-        * vmentry fails as it then expects bit 14 (BS) in pending debug
-        * exceptions being set, but that's not correct for the guest debugging
-        * case. */
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               vmx_set_interrupt_shadow(vcpu, 0);
-
-       if (static_cpu_has(X86_FEATURE_PKU) &&
-           kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
-           vcpu->arch.pkru != vmx->host_pkru)
-               __write_pkru(vcpu->arch.pkru);
-
-       pt_guest_enter(vmx);
-
-       atomic_switch_perf_msrs(vmx);
-
-       vmx_update_hv_timer(vcpu);
-
-       /*
-        * If this vCPU has touched SPEC_CTRL, restore the guest's value if
-        * it's non-zero. Since vmentry is serialising on affected CPUs, there
-        * is no need to worry about the conditional branch over the wrmsr
-        * being speculatively taken.
-        */
-       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+       unsigned long evmcs_rsp;
 
        vmx->__launched = vmx->loaded_vmcs->launched;
 
@@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                , "eax", "ebx", "edi"
 #endif
              );
+}
+STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
+
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       unsigned long cr3, cr4;
+
+       /* Record the guest's net vcpu time for enforced NMI injections. */
+       if (unlikely(!enable_vnmi &&
+                    vmx->loaded_vmcs->soft_vnmi_blocked))
+               vmx->loaded_vmcs->entry_time = ktime_get();
+
+       /* Don't enter VMX if guest state is invalid, let the exit handler
+          start emulation until we arrive back to a valid state */
+       if (vmx->emulation_required)
+               return;
+
+       if (vmx->ple_window_dirty) {
+               vmx->ple_window_dirty = false;
+               vmcs_write32(PLE_WINDOW, vmx->ple_window);
+       }
+
+       if (vmx->nested.need_vmcs12_sync)
+               nested_sync_from_vmcs12(vcpu);
+
+       if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+       if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               vmx->loaded_vmcs->host_state.cr3 = cr3;
+       }
+
+       cr4 = cr4_read_shadow();
+       if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+               vmcs_writel(HOST_CR4, cr4);
+               vmx->loaded_vmcs->host_state.cr4 = cr4;
+       }
+
+       /* When single-stepping over STI and MOV SS, we must clear the
+        * corresponding interruptibility bits in the guest state. Otherwise
+        * vmentry fails as it then expects bit 14 (BS) in pending debug
+        * exceptions being set, but that's not correct for the guest debugging
+        * case. */
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+               vmx_set_interrupt_shadow(vcpu, 0);
+
+       if (static_cpu_has(X86_FEATURE_PKU) &&
+           kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
+           vcpu->arch.pkru != vmx->host_pkru)
+               __write_pkru(vcpu->arch.pkru);
+
+       pt_guest_enter(vmx);
+
+       atomic_switch_perf_msrs(vmx);
+
+       vmx_update_hv_timer(vcpu);
+
+       /*
+        * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+        * it's non-zero. Since vmentry is serialising on affected CPUs, there
+        * is no need to worry about the conditional branch over the wrmsr
+        * being speculatively taken.
+        */
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+       __vmx_vcpu_run(vcpu, vmx);
 
        /*
         * We do not use IBRS in the kernel. If this vCPU has used the
@@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx_recover_nmi_blocking(vmx);
        vmx_complete_interrupts(vmx);
 }
-STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static struct kvm *vmx_vm_alloc(void)
 {
index 02c8e095a23907f821f2ae3f1dc958ec66af0a7b..3d27206f6c010b13d408d45a4eb647430bf2e50d 100644 (file)
@@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
        case KVM_CAP_HYPERV_SYNIC2:
                if (cap->args[0])
                        return -EINVAL;
+               /* fall through */
+
        case KVM_CAP_HYPERV_SYNIC:
                if (!irqchip_in_kernel(vcpu->kvm))
                        return -EINVAL;
@@ -6480,8 +6482,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                toggle_interruptibility(vcpu, ctxt->interruptibility);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
                kvm_rip_write(vcpu, ctxt->eip);
-               if (r == EMULATE_DONE &&
-                   (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+               if (r == EMULATE_DONE && ctxt->tf)
                        kvm_vcpu_do_singlestep(vcpu, &r);
                if (!ctxt->have_exception ||
                    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -7093,10 +7094,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        case KVM_HC_CLOCK_PAIRING:
                ret = kvm_pv_clock_pairing(vcpu, a0, a1);
                break;
+#endif
        case KVM_HC_SEND_IPI:
                ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
                break;
-#endif
        default:
                ret = -KVM_ENOSYS;
                break;
@@ -7937,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                vcpu->arch.pv.pv_unhalted = false;
                vcpu->arch.mp_state =
                        KVM_MP_STATE_RUNNABLE;
+               /* fall through */
        case KVM_MP_STATE_RUNNABLE:
                vcpu->arch.apf.halted = false;
                break;
index 79778ab200e494c16cac7441cdb5da9507940715..a536651164584c96a19b1455f743834291a355c6 100644 (file)
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
        u16 status, timer;
 
        do {
-               outb(I8254_PORT_CONTROL,
-                    I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+               outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
+                    I8254_PORT_CONTROL);
                status = inb(I8254_PORT_COUNTER0);
                timer  = inb(I8254_PORT_COUNTER0);
                timer |= inb(I8254_PORT_COUNTER0) << 8;
index a19ef1a416ff66f0f1c70928644815dc385aa1e0..4aa9b1480866b70d16dbeafdc711a3479de1d670 100644 (file)
@@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
        pmd = pmd_offset(pud, ppd->vaddr);
        if (pmd_none(*pmd)) {
                pte = ppd->pgtable_area;
-               memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
-               ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
+               memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
+               ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
                set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
        }
 
index 90d68760af086bd6de78c71dc4a1f6954ceb7a5b..f8120832ca7b8ea44cf872c3810dd373a662656b 100644 (file)
@@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
        CMD_FLAG_NAME(PREFLUSH),
        CMD_FLAG_NAME(RAHEAD),
        CMD_FLAG_NAME(BACKGROUND),
-       CMD_FLAG_NAME(NOUNMAP),
        CMD_FLAG_NAME(NOWAIT),
+       CMD_FLAG_NAME(NOUNMAP),
+       CMD_FLAG_NAME(HIPRI),
 };
 #undef CMD_FLAG_NAME
 
index f0c56649775fcb35ae978b9dac27bcd4c4001fb3..fd166fbb0f6587c494e6095b8bf6e58de0c67360 100644 (file)
@@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
        rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
 }
 
-void wbt_issue(struct rq_qos *rqos, struct request *rq)
+static void wbt_issue(struct rq_qos *rqos, struct request *rq)
 {
        struct rq_wb *rwb = RQWB(rqos);
 
@@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
        }
 }
 
-void wbt_requeue(struct rq_qos *rqos, struct request *rq)
+static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
 {
        struct rq_wb *rwb = RQWB(rqos);
        if (!rwb_enabled(rwb))
index 5143e11e3b0fa5378a5f08ee094706b92ba41962..e18ade5d74e9ecf38126bce5301f027073779bc3 100644 (file)
@@ -409,6 +409,32 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
        return true;
 }
 
+static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
+               struct nd_cmd_pkg *call_pkg)
+{
+       if (call_pkg) {
+               int i;
+
+               if (nfit_mem->family != call_pkg->nd_family)
+                       return -ENOTTY;
+
+               for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
+                       if (call_pkg->nd_reserved2[i])
+                               return -EINVAL;
+               return call_pkg->nd_command;
+       }
+
+       /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
+       if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+               return cmd;
+
+       /*
+        * Force function number validation to fail since 0 is never
+        * published as a valid function in dsm_mask.
+        */
+       return 0;
+}
+
 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
 {
@@ -422,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        unsigned long cmd_mask, dsm_mask;
        u32 offset, fw_status = 0;
        acpi_handle handle;
-       unsigned int func;
        const guid_t *guid;
-       int rc, i;
+       int func, rc, i;
 
        if (cmd_rc)
                *cmd_rc = -EINVAL;
-       func = cmd;
-       if (cmd == ND_CMD_CALL) {
-               call_pkg = buf;
-               func = call_pkg->nd_command;
-
-               for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
-                       if (call_pkg->nd_reserved2[i])
-                               return -EINVAL;
-       }
 
        if (nvdimm) {
                struct acpi_device *adev = nfit_mem->adev;
 
                if (!adev)
                        return -ENOTTY;
-               if (call_pkg && nfit_mem->family != call_pkg->nd_family)
-                       return -ENOTTY;
 
+               if (cmd == ND_CMD_CALL)
+                       call_pkg = buf;
+               func = cmd_to_func(nfit_mem, cmd, call_pkg);
+               if (func < 0)
+                       return func;
                dimm_name = nvdimm_name(nvdimm);
                cmd_name = nvdimm_cmd_name(cmd);
                cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -456,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        } else {
                struct acpi_device *adev = to_acpi_dev(acpi_desc);
 
+               func = cmd;
                cmd_name = nvdimm_bus_cmd_name(cmd);
                cmd_mask = nd_desc->cmd_mask;
                dsm_mask = cmd_mask;
@@ -470,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
                return -ENOTTY;
 
-       if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
+       /*
+        * Check for a valid command.  For ND_CMD_CALL, we also have to
+        * make sure that the DSM function is supported.
+        */
+       if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
+               return -ENOTTY;
+       else if (!test_bit(cmd, &cmd_mask))
                return -ENOTTY;
 
        in_obj.type = ACPI_TYPE_PACKAGE;
@@ -1867,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                return 0;
        }
 
+       /*
+        * Function 0 is the command interrogation function, don't
+        * export it to potential userspace use, and enable it to be
+        * used as an error value in acpi_nfit_ctl().
+        */
+       dsm_mask &= ~1UL;
+
        guid = to_nfit_uuid(nfit_mem->family);
        for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
                if (acpi_check_dsm(adev_dimm->handle, guid,
@@ -2042,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
                if (!nvdimm)
                        continue;
 
-               rc = nvdimm_security_setup_events(nvdimm);
-               if (rc < 0)
-                       dev_warn(acpi_desc->dev,
-                               "security event setup failed: %d\n", rc);
-
                nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
                if (nfit_kernfs)
                        nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
index 7496b10532aafde54d68c95f264c2853c6f67f1c..6a2185eb66c59761e5d4f0d0f09120806c12dc8b 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kdev_t.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/namei.h>
 #include <linux/magic.h>
 #include <linux/major.h>
 #include <linux/miscdevice.h>
@@ -20,6 +21,7 @@
 #include <linux/parser.h>
 #include <linux/radix-tree.h>
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/spinlock_types.h>
 #include <linux/stddef.h>
@@ -30,7 +32,7 @@
 #include <linux/xarray.h>
 #include <uapi/asm-generic/errno-base.h>
 #include <uapi/linux/android/binder.h>
-#include <uapi/linux/android/binder_ctl.h>
+#include <uapi/linux/android/binderfs.h>
 
 #include "binder_internal.h"
 
 #define INODE_OFFSET 3
 #define INTSTRLEN 21
 #define BINDERFS_MAX_MINOR (1U << MINORBITS)
-
-static struct vfsmount *binderfs_mnt;
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
 
 static dev_t binderfs_dev;
 static DEFINE_MUTEX(binderfs_minors_mutex);
 static DEFINE_IDA(binderfs_minors);
 
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ */
+struct binderfs_mount_opts {
+       int max;
+};
+
+enum {
+       Opt_max,
+       Opt_err
+};
+
+static const match_table_t tokens = {
+       { Opt_max, "max=%d" },
+       { Opt_err, NULL     }
+};
+
 /**
  * binderfs_info - information about a binderfs mount
  * @ipc_ns:         The ipc namespace the binderfs mount belongs to.
@@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
  *                  created.
  * @root_gid:       gid that needs to be used when a new binder device is
  *                  created.
+ * @mount_opts:     The mount options in use.
+ * @device_count:   The current number of allocated binder devices.
  */
 struct binderfs_info {
        struct ipc_namespace *ipc_ns;
        struct dentry *control_dentry;
        kuid_t root_uid;
        kgid_t root_gid;
-
+       struct binderfs_mount_opts mount_opts;
+       int device_count;
 };
 
 static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
@@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
  * @userp:     buffer to copy information about new device for userspace to
  * @req:       struct binderfs_device as copied from userspace
  *
- * This function allocated a new binder_device and reserves a new minor
+ * This function allocates a new binder_device and reserves a new minor
  * number for it.
  * Minor numbers are limited and tracked globally in binderfs_minors. The
  * function will stash a struct binder_device for the specific binder
@@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
                                         struct binderfs_device *req)
 {
        int minor, ret;
-       struct dentry *dentry, *dup, *root;
+       struct dentry *dentry, *root;
        struct binder_device *device;
-       size_t name_len = BINDERFS_MAX_NAME + 1;
        char *name = NULL;
+       size_t name_len;
        struct inode *inode = NULL;
        struct super_block *sb = ref_inode->i_sb;
        struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+       bool use_reserve = true;
+#endif
 
        /* Reserve new minor number for the new device. */
        mutex_lock(&binderfs_minors_mutex);
-       minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
-       mutex_unlock(&binderfs_minors_mutex);
-       if (minor < 0)
+       if (++info->device_count <= info->mount_opts.max)
+               minor = ida_alloc_max(&binderfs_minors,
+                                     use_reserve ? BINDERFS_MAX_MINOR :
+                                                   BINDERFS_MAX_MINOR_CAPPED,
+                                     GFP_KERNEL);
+       else
+               minor = -ENOSPC;
+       if (minor < 0) {
+               --info->device_count;
+               mutex_unlock(&binderfs_minors_mutex);
                return minor;
+       }
+       mutex_unlock(&binderfs_minors_mutex);
 
        ret = -ENOMEM;
        device = kzalloc(sizeof(*device), GFP_KERNEL);
@@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
        inode->i_uid = info->root_uid;
        inode->i_gid = info->root_gid;
 
-       name = kmalloc(name_len, GFP_KERNEL);
+       req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+       name_len = strlen(req->name);
+       /* Make sure to include terminating NUL byte */
+       name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
        if (!name)
                goto err;
 
-       strscpy(name, req->name, name_len);
-
        device->binderfs_inode = inode;
        device->context.binder_context_mgr_uid = INVALID_UID;
        device->context.name = name;
@@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
 
        root = sb->s_root;
        inode_lock(d_inode(root));
-       dentry = d_alloc_name(root, name);
-       if (!dentry) {
+
+       /* look it up */
+       dentry = lookup_one_len(name, root, name_len);
+       if (IS_ERR(dentry)) {
                inode_unlock(d_inode(root));
-               ret = -ENOMEM;
+               ret = PTR_ERR(dentry);
                goto err;
        }
 
-       /* Verify that the name userspace gave us is not already in use. */
-       dup = d_lookup(root, &dentry->d_name);
-       if (dup) {
-               if (d_really_is_positive(dup)) {
-                       dput(dup);
-                       dput(dentry);
-                       inode_unlock(d_inode(root));
-                       ret = -EEXIST;
-                       goto err;
-               }
-               dput(dup);
+       if (d_really_is_positive(dentry)) {
+               /* already exists */
+               dput(dentry);
+               inode_unlock(d_inode(root));
+               ret = -EEXIST;
+               goto err;
        }
 
        inode->i_private = device;
-       d_add(dentry, inode);
+       d_instantiate(dentry, inode);
        fsnotify_create(root->d_inode, dentry);
        inode_unlock(d_inode(root));
 
@@ -187,6 +222,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
        kfree(name);
        kfree(device);
        mutex_lock(&binderfs_minors_mutex);
+       --info->device_count;
        ida_free(&binderfs_minors, minor);
        mutex_unlock(&binderfs_minors_mutex);
        iput(inode);
@@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
 static void binderfs_evict_inode(struct inode *inode)
 {
        struct binder_device *device = inode->i_private;
+       struct binderfs_info *info = BINDERFS_I(inode);
 
        clear_inode(inode);
 
@@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
                return;
 
        mutex_lock(&binderfs_minors_mutex);
+       --info->device_count;
        ida_free(&binderfs_minors, device->miscdev.minor);
        mutex_unlock(&binderfs_minors_mutex);
 
@@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
        kfree(device);
 }
 
+/**
+ * binderfs_parse_mount_opts - parse binderfs mount options
+ * @data: options to set (can be NULL in which case defaults are used)
+ */
+static int binderfs_parse_mount_opts(char *data,
+                                    struct binderfs_mount_opts *opts)
+{
+       char *p;
+       opts->max = BINDERFS_MAX_MINOR;
+
+       while ((p = strsep(&data, ",")) != NULL) {
+               substring_t args[MAX_OPT_ARGS];
+               int token;
+               int max_devices;
+
+               if (!*p)
+                       continue;
+
+               token = match_token(p, tokens, args);
+               switch (token) {
+               case Opt_max:
+                       if (match_int(&args[0], &max_devices) ||
+                           (max_devices < 0 ||
+                            (max_devices > BINDERFS_MAX_MINOR)))
+                               return -EINVAL;
+
+                       opts->max = max_devices;
+                       break;
+               default:
+                       pr_err("Invalid mount options\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int binderfs_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct binderfs_info *info = sb->s_fs_info;
+       return binderfs_parse_mount_opts(data, &info->mount_opts);
+}
+
+static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
+{
+       struct binderfs_info *info;
+
+       info = root->d_sb->s_fs_info;
+       if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+               seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+       return 0;
+}
+
 static const struct super_operations binderfs_super_ops = {
-       .statfs = simple_statfs,
-       .evict_inode = binderfs_evict_inode,
+       .evict_inode    = binderfs_evict_inode,
+       .remount_fs     = binderfs_remount,
+       .show_options   = binderfs_show_mount_opts,
+       .statfs         = simple_statfs,
 };
 
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+       struct binderfs_info *info = dentry->d_sb->s_fs_info;
+       return info->control_dentry == dentry;
+}
+
 static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                           struct inode *new_dir, struct dentry *new_dentry,
                           unsigned int flags)
 {
-       struct inode *inode = d_inode(old_dentry);
-
-       /* binderfs doesn't support directories. */
-       if (d_is_dir(old_dentry))
+       if (is_binderfs_control_device(old_dentry) ||
+           is_binderfs_control_device(new_dentry))
                return -EPERM;
 
-       if (flags & ~RENAME_NOREPLACE)
-               return -EINVAL;
-
-       if (!simple_empty(new_dentry))
-               return -ENOTEMPTY;
-
-       if (d_really_is_positive(new_dentry))
-               simple_unlink(new_dir, new_dentry);
-
-       old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
-               new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
-
-       return 0;
+       return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
 }
 
 static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
 {
-       /*
-        * The control dentry is only ever touched during mount so checking it
-        * here should not require us to take lock.
-        */
-       if (BINDERFS_I(dir)->control_dentry == dentry)
+       if (is_binderfs_control_device(dentry))
                return -EPERM;
 
        return simple_unlink(dir, dentry);
@@ -318,8 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
        if (!device)
                return -ENOMEM;
 
-       inode_lock(d_inode(root));
-
        /* If we have already created a binder-control node, return. */
        if (info->control_dentry) {
                ret = 0;
@@ -358,12 +438,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
        inode->i_private = device;
        info->control_dentry = dentry;
        d_add(dentry, inode);
-       inode_unlock(d_inode(root));
 
        return 0;
 
 out:
-       inode_unlock(d_inode(root));
        kfree(device);
        iput(inode);
 
@@ -378,12 +456,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
 
 static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
 {
+       int ret;
        struct binderfs_info *info;
-       int ret = -ENOMEM;
        struct inode *inode = NULL;
-       struct ipc_namespace *ipc_ns = sb->s_fs_info;
-
-       get_ipc_ns(ipc_ns);
 
        sb->s_blocksize = PAGE_SIZE;
        sb->s_blocksize_bits = PAGE_SHIFT;
@@ -405,11 +480,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_op = &binderfs_super_ops;
        sb->s_time_gran = 1;
 
-       info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
-       if (!info)
-               goto err_without_dentry;
+       sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+       if (!sb->s_fs_info)
+               return -ENOMEM;
+       info = sb->s_fs_info;
+
+       info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+       ret = binderfs_parse_mount_opts(data, &info->mount_opts);
+       if (ret)
+               return ret;
 
-       info->ipc_ns = ipc_ns;
        info->root_gid = make_kgid(sb->s_user_ns, 0);
        if (!gid_valid(info->root_gid))
                info->root_gid = GLOBAL_ROOT_GID;
@@ -417,11 +498,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
        if (!uid_valid(info->root_uid))
                info->root_uid = GLOBAL_ROOT_UID;
 
-       sb->s_fs_info = info;
-
        inode = new_inode(sb);
        if (!inode)
-               goto err_without_dentry;
+               return -ENOMEM;
 
        inode->i_ino = FIRST_INODE;
        inode->i_fop = &simple_dir_operations;
@@ -432,79 +511,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_root = d_make_root(inode);
        if (!sb->s_root)
-               goto err_without_dentry;
-
-       ret = binderfs_binder_ctl_create(sb);
-       if (ret)
-               goto err_with_dentry;
-
-       return 0;
-
-err_with_dentry:
-       dput(sb->s_root);
-       sb->s_root = NULL;
-
-err_without_dentry:
-       put_ipc_ns(ipc_ns);
-       iput(inode);
-       kfree(info);
-
-       return ret;
-}
-
-static int binderfs_test_super(struct super_block *sb, void *data)
-{
-       struct binderfs_info *info = sb->s_fs_info;
-
-       if (info)
-               return info->ipc_ns == data;
-
-       return 0;
-}
+               return -ENOMEM;
 
-static int binderfs_set_super(struct super_block *sb, void *data)
-{
-       sb->s_fs_info = data;
-       return set_anon_super(sb, NULL);
+       return binderfs_binder_ctl_create(sb);
 }
 
 static struct dentry *binderfs_mount(struct file_system_type *fs_type,
                                     int flags, const char *dev_name,
                                     void *data)
 {
-       struct super_block *sb;
-       struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
-
-       if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
-
-       sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
-                        flags, ipc_ns->user_ns, ipc_ns);
-       if (IS_ERR(sb))
-               return ERR_CAST(sb);
-
-       if (!sb->s_root) {
-               int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
-               if (ret) {
-                       deactivate_locked_super(sb);
-                       return ERR_PTR(ret);
-               }
-
-               sb->s_flags |= SB_ACTIVE;
-       }
-
-       return dget(sb->s_root);
+       return mount_nodev(fs_type, flags, data, binderfs_fill_super);
 }
 
 static void binderfs_kill_super(struct super_block *sb)
 {
        struct binderfs_info *info = sb->s_fs_info;
 
+       kill_litter_super(sb);
+
        if (info && info->ipc_ns)
                put_ipc_ns(info->ipc_ns);
 
        kfree(info);
-       kill_litter_super(sb);
 }
 
 static struct file_system_type binder_fs_type = {
@@ -530,14 +558,6 @@ static int __init init_binderfs(void)
                return ret;
        }
 
-       binderfs_mnt = kern_mount(&binder_fs_type);
-       if (IS_ERR(binderfs_mnt)) {
-               ret = PTR_ERR(binderfs_mnt);
-               binderfs_mnt = NULL;
-               unregister_filesystem(&binder_fs_type);
-               unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
-       }
-
        return ret;
 }
 
index 8cc9c429ad9589e54f06db537ff9688f94a28988..9e7fc302430ff2b43d86cf0dded1f81cb0576d88 100644 (file)
@@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
        .sg_tablesize           = MAX_DCMDS,
        /* We may not need that strict one */
        .dma_boundary           = ATA_DMA_BOUNDARY,
+       /* Not sure what the real max is but we know it's less than 64K, let's
+        * use 64K minus 256
+        */
+       .max_segment_size       = MAX_DBDMA_SEG,
        .slave_configure        = pata_macio_slave_config,
 };
 
@@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
        /* Make sure we have sane initial timings in the cache */
        pata_macio_default_timings(priv);
 
-       /* Not sure what the real max is but we know it's less than 64K, let's
-        * use 64K minus 256
-        */
-       dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
-
        /* Allocate libata host for 1 port */
        memset(&pinfo, 0, sizeof(struct ata_port_info));
        pmac_macio_calc_timing_masks(priv, &pinfo);
index e0bcf9b2dab040d7342c1eb4f4ead9abef550ba9..174e84ce437950702717bfbf6fc2cd6a734291b0 100644 (file)
@@ -245,8 +245,15 @@ struct inic_port_priv {
 
 static struct scsi_host_template inic_sht = {
        ATA_BASE_SHT(DRV_NAME),
-       .sg_tablesize   = LIBATA_MAX_PRD,       /* maybe it can be larger? */
-       .dma_boundary   = INIC_DMA_BOUNDARY,
+       .sg_tablesize           = LIBATA_MAX_PRD, /* maybe it can be larger? */
+
+       /*
+        * This controller is braindamaged.  dma_boundary is 0xffff like others
+        * but it will lock up the whole machine HARD if 65536 byte PRD entry
+        * is fed.  Reduce maximum segment size.
+        */
+       .dma_boundary           = INIC_DMA_BOUNDARY,
+       .max_segment_size       = 65536 - 512,
 };
 
 static const int scr_map[] = {
@@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return rc;
        }
 
-       /*
-        * This controller is braindamaged.  dma_boundary is 0xffff
-        * like others but it will lock up the whole machine HARD if
-        * 65536 byte PRD entry is fed. Reduce maximum segment size.
-        */
-       rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
-       if (rc) {
-               dev_err(&pdev->dev, "failed to set the maximum segment size\n");
-               return rc;
-       }
-
        rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
        if (rc) {
                dev_err(&pdev->dev, "failed to initialize controller\n");
index e906ecfe23dd82025cdfc9b412f309a7f04fdfdf..8ad77246f3221fc2e34fb98fa21c95f84099684e 100644 (file)
@@ -295,6 +295,14 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
        if (!mc_adev)
                goto error;
 
+       mc_adev->consumer_link = device_link_add(&mc_dev->dev,
+                                                &mc_adev->dev,
+                                                DL_FLAG_AUTOREMOVE_CONSUMER);
+       if (!mc_adev->consumer_link) {
+               error = -EINVAL;
+               goto error;
+       }
+
        *new_mc_adev = mc_adev;
        return 0;
 error:
@@ -321,6 +329,9 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
                return;
 
        fsl_mc_resource_free(resource);
+
+       device_link_del(mc_adev->consumer_link);
+       mc_adev->consumer_link = NULL;
 }
 EXPORT_SYMBOL_GPL(fsl_mc_object_free);
 
index 7226cfc49b6fd99f04be428ae751105e8f0cd058..3ae574a58ccee5369d249cfb603521b32b1093e8 100644 (file)
@@ -209,9 +209,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
        if (error < 0)
                goto error_cleanup_resource;
 
+       dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+                                                  &dpmcp_dev->dev,
+                                                  DL_FLAG_AUTOREMOVE_CONSUMER);
+       if (!dpmcp_dev->consumer_link) {
+               error = -EINVAL;
+               goto error_cleanup_mc_io;
+       }
+
        *new_mc_io = mc_io;
        return 0;
 
+error_cleanup_mc_io:
+       fsl_destroy_mc_io(mc_io);
 error_cleanup_resource:
        fsl_mc_resource_free(resource);
        return error;
@@ -244,6 +254,9 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
 
        fsl_destroy_mc_io(mc_io);
        fsl_mc_resource_free(resource);
+
+       device_link_del(dpmcp_dev->consumer_link);
+       dpmcp_dev->consumer_link = NULL;
 }
 EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
 
index d5f85455fa6216fc2c7660e5f543f5aab71f50f5..19d7b6ff2f1793b23d1e80507d733b4de3f8d9b8 100644 (file)
@@ -522,10 +522,9 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
 
                if (!found) {
                        dev_warn(hostdev,
-                                "could not find cell for child device (%s)\n",
+                                "could not find cell for child device (%s), discarding\n",
                                 hid);
-                       ret = -ENODEV;
-                       goto fail;
+                       continue;
                }
 
                pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
index d84996a4528ead1c3d38ecdf74922b78c12bc2c1..db74334ca5ef003ed587c5cccb63363abb30750d 100644 (file)
@@ -46,6 +46,17 @@ static const struct imx_weim_devtype imx51_weim_devtype = {
 };
 
 #define MAX_CS_REGS_COUNT      6
+#define MAX_CS_COUNT           6
+#define OF_REG_SIZE            3
+
+struct cs_timing {
+       bool is_applied;
+       u32 regs[MAX_CS_REGS_COUNT];
+};
+
+struct cs_timing_state {
+       struct cs_timing cs[MAX_CS_COUNT];
+};
 
 static const struct of_device_id weim_id_table[] = {
        /* i.MX1/21 */
@@ -111,21 +122,19 @@ static int __init imx_weim_gpr_setup(struct platform_device *pdev)
 }
 
 /* Parse and set the timing for this device. */
-static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
-                                   const struct imx_weim_devtype *devtype)
+static int __init weim_timing_setup(struct device *dev,
+                                   struct device_node *np, void __iomem *base,
+                                   const struct imx_weim_devtype *devtype,
+                                   struct cs_timing_state *ts)
 {
        u32 cs_idx, value[MAX_CS_REGS_COUNT];
        int i, ret;
+       int reg_idx, num_regs;
+       struct cs_timing *cst;
 
        if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
                return -EINVAL;
-
-       /* get the CS index from this child node's "reg" property. */
-       ret = of_property_read_u32(np, "reg", &cs_idx);
-       if (ret)
-               return ret;
-
-       if (cs_idx >= devtype->cs_count)
+       if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
                return -EINVAL;
 
        ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
@@ -133,9 +142,43 @@ static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
        if (ret)
                return ret;
 
-       /* set the timing for WEIM */
-       for (i = 0; i < devtype->cs_regs_count; i++)
-               writel(value[i], base + cs_idx * devtype->cs_stride + i * 4);
+       /*
+        * the child node's "reg" property may contain multiple address ranges,
+        * extract the chip select for each.
+        */
+       num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
+       if (num_regs < 0)
+               return num_regs;
+       if (!num_regs)
+               return -EINVAL;
+       for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
+               /* get the CS index from this child node's "reg" property. */
+               ret = of_property_read_u32_index(np, "reg",
+                                       reg_idx * OF_REG_SIZE, &cs_idx);
+               if (ret)
+                       break;
+
+               if (cs_idx >= devtype->cs_count)
+                       return -EINVAL;
+
+               /* prevent re-configuring a CS that's already been configured */
+               cst = &ts->cs[cs_idx];
+               if (cst->is_applied && memcmp(value, cst->regs,
+                                       devtype->cs_regs_count * sizeof(u32))) {
+                       dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
+                       return -EINVAL;
+               }
+
+               /* set the timing for WEIM */
+               for (i = 0; i < devtype->cs_regs_count; i++)
+                       writel(value[i],
+                               base + cs_idx * devtype->cs_stride + i * 4);
+               if (!cst->is_applied) {
+                       cst->is_applied = true;
+                       memcpy(cst->regs, value,
+                               devtype->cs_regs_count * sizeof(u32));
+               }
+       }
 
        return 0;
 }
@@ -148,6 +191,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
        const struct imx_weim_devtype *devtype = of_id->data;
        struct device_node *child;
        int ret, have_child = 0;
+       struct cs_timing_state ts = {};
 
        if (devtype == &imx50_weim_devtype) {
                ret = imx_weim_gpr_setup(pdev);
@@ -156,7 +200,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
        }
 
        for_each_available_child_of_node(pdev->dev.of_node, child) {
-               ret = weim_timing_setup(child, base, devtype);
+               ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
                if (ret)
                        dev_warn(&pdev->dev, "%pOF set timing failed.\n",
                                child);
index 2b9fc8ac55009c66ec3e9e86a1cc50e23d34437d..ddfbabaa5f8f61517eeaa8e187259ab1111410fd 100644 (file)
@@ -73,11 +73,14 @@ struct optee_rng_private {
 static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
                                 void *buf, size_t req_size)
 {
-       u32 ret = 0;
+       int ret = 0;
        u8 *rng_data = NULL;
        size_t rng_size = 0;
-       struct tee_ioctl_invoke_arg inv_arg = {0};
-       struct tee_param param[4] = {0};
+       struct tee_ioctl_invoke_arg inv_arg;
+       struct tee_param param[4];
+
+       memset(&inv_arg, 0, sizeof(inv_arg));
+       memset(&param, 0, sizeof(param));
 
        /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */
        inv_arg.func = TA_CMD_GET_ENTROPY;
@@ -172,9 +175,12 @@ static struct optee_rng_private pvt_data = {
 
 static int get_optee_rng_info(struct device *dev)
 {
-       u32 ret = 0;
-       struct tee_ioctl_invoke_arg inv_arg = {0};
-       struct tee_param param[4] = {0};
+       int ret = 0;
+       struct tee_ioctl_invoke_arg inv_arg;
+       struct tee_param param[4];
+
+       memset(&inv_arg, 0, sizeof(inv_arg));
+       memset(&param, 0, sizeof(param));
 
        /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */
        inv_arg.func = TA_CMD_GET_RNG_INFO;
@@ -209,7 +215,9 @@ static int optee_rng_probe(struct device *dev)
 {
        struct tee_client_device *rng_device = to_tee_client_device(dev);
        int ret = 0, err = -ENODEV;
-       struct tee_ioctl_open_session_arg sess_arg = {0};
+       struct tee_ioctl_open_session_arg sess_arg;
+
+       memset(&sess_arg, 0, sizeof(sess_arg));
 
        /* Open context with TEE driver */
        pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
@@ -262,7 +270,7 @@ static int optee_rng_remove(struct device *dev)
        return 0;
 }
 
-const struct tee_client_device_id optee_rng_id_table[] = {
+static const struct tee_client_device_id optee_rng_id_table[] = {
        {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f,
                   0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)},
        {}
index a74ce885b54125b3852cd9f7de66ec5ee2a52ea3..c518659b4d9fe17a39edc9a53651198c08aa2b5f 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/moduleparam.h>
 #include <linux/workqueue.h>
 #include <linux/uuid.h>
+#include <linux/nospec.h>
 
 #define IPMI_DRIVER_VERSION "39.2"
 
@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
 { }
 #endif
 
-static int initialized;
+static bool initialized;
+static bool drvregistered;
 
 enum ipmi_panic_event_op {
        IPMI_SEND_PANIC_EVENT_NONE,
@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
 
 static LIST_HEAD(ipmi_interfaces);
 static DEFINE_MUTEX(ipmi_interfaces_mutex);
-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
+struct srcu_struct ipmi_interfaces_srcu;
 
 /*
  * List of watchers that want to know when smi's are added and deleted.
@@ -720,7 +722,15 @@ struct watcher_entry {
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 {
        struct ipmi_smi *intf;
-       int index;
+       int index, rv;
+
+       /*
+        * Make sure the driver is actually initialized, this handles
+        * problems with initialization order.
+        */
+       rv = ipmi_init_msghandler();
+       if (rv)
+               return rv;
 
        mutex_lock(&smi_watchers_mutex);
 
@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
 
                if (user) {
                        user->handler->ipmi_recv_hndl(msg, user->handler_data);
-                       release_ipmi_user(msg->user, index);
+                       release_ipmi_user(user, index);
                } else {
                        /* User went away, give up. */
                        ipmi_free_recv_msg(msg);
@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int          if_num,
 {
        unsigned long flags;
        struct ipmi_user *new_user;
-       int           rv = 0, index;
+       int           rv, index;
        struct ipmi_smi *intf;
 
        /*
@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int          if_num,
         * Make sure the driver is actually initialized, this handles
         * problems with initialization order.
         */
-       if (!initialized) {
-               rv = ipmi_init_msghandler();
-               if (rv)
-                       return rv;
-
-               /*
-                * The init code doesn't return an error if it was turned
-                * off, but it won't initialize.  Check that.
-                */
-               if (!initialized)
-                       return -ENODEV;
-       }
+       rv = ipmi_init_msghandler();
+       if (rv)
+               return rv;
 
        new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
        if (!new_user)
@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
 static void free_user(struct kref *ref)
 {
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+       cleanup_srcu_struct(&user->release_barrier);
        kfree(user);
 }
 
@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
 {
        _ipmi_destroy_user(user);
 
-       cleanup_srcu_struct(&user->release_barrier);
        kref_put(&user->refcount, free_user);
 
        return 0;
@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
        if (!user)
                return -ENODEV;
 
-       if (channel >= IPMI_MAX_CHANNELS)
+       if (channel >= IPMI_MAX_CHANNELS) {
                rv = -EINVAL;
-       else
+       } else {
+               channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
                user->intf->addrinfo[channel].address = address;
+       }
        release_ipmi_user(user, index);
 
        return rv;
@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
        if (!user)
                return -ENODEV;
 
-       if (channel >= IPMI_MAX_CHANNELS)
+       if (channel >= IPMI_MAX_CHANNELS) {
                rv = -EINVAL;
-       else
+       } else {
+               channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
                *address = user->intf->addrinfo[channel].address;
+       }
        release_ipmi_user(user, index);
 
        return rv;
@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
        if (!user)
                return -ENODEV;
 
-       if (channel >= IPMI_MAX_CHANNELS)
+       if (channel >= IPMI_MAX_CHANNELS) {
                rv = -EINVAL;
-       else
+       } else {
+               channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
                user->intf->addrinfo[channel].lun = LUN & 0x3;
+       }
        release_ipmi_user(user, index);
 
        return rv;
@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
        if (!user)
                return -ENODEV;
 
-       if (channel >= IPMI_MAX_CHANNELS)
+       if (channel >= IPMI_MAX_CHANNELS) {
                rv = -EINVAL;
-       else
+       } else {
+               channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
                *address = user->intf->addrinfo[channel].lun;
+       }
        release_ipmi_user(user, index);
 
        return rv;
@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi  *intf,
 {
        if (addr->channel >= IPMI_MAX_CHANNELS)
                return -EINVAL;
+       addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
        *lun = intf->addrinfo[addr->channel].lun;
        *saddr = intf->addrinfo[addr->channel].address;
        return 0;
@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
         * Make sure the driver is actually initialized, this handles
         * problems with initialization order.
         */
-       if (!initialized) {
-               rv = ipmi_init_msghandler();
-               if (rv)
-                       return rv;
-               /*
-                * The init code doesn't return an error if it was turned
-                * off, but it won't initialize.  Check that.
-                */
-               if (!initialized)
-                       return -ENODEV;
-       }
+       rv = ipmi_init_msghandler();
+       if (rv)
+               return rv;
 
        intf = kzalloc(sizeof(*intf), GFP_KERNEL);
        if (!intf)
@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+/* Must be called with ipmi_interfaces_mutex held. */
+static int ipmi_register_driver(void)
+{
+       int rv;
+
+       if (drvregistered)
+               return 0;
+
+       rv = driver_register(&ipmidriver.driver);
+       if (rv)
+               pr_err("Could not register IPMI driver\n");
+       else
+               drvregistered = true;
+       return rv;
+}
+
 static struct notifier_block panic_block = {
        .notifier_call  = panic_event,
        .next           = NULL,
@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
 {
        int rv;
 
+       mutex_lock(&ipmi_interfaces_mutex);
+       rv = ipmi_register_driver();
+       if (rv)
+               goto out;
        if (initialized)
-               return 0;
-
-       rv = driver_register(&ipmidriver.driver);
-       if (rv) {
-               pr_err("Could not register IPMI driver\n");
-               return rv;
-       }
+               goto out;
 
-       pr_info("version " IPMI_DRIVER_VERSION "\n");
+       init_srcu_struct(&ipmi_interfaces_srcu);
 
        timer_setup(&ipmi_timer, ipmi_timeout, 0);
        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
 
        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
-       initialized = 1;
+       initialized = true;
 
-       return 0;
+out:
+       mutex_unlock(&ipmi_interfaces_mutex);
+       return rv;
 }
 
 static int __init ipmi_init_msghandler_mod(void)
 {
-       ipmi_init_msghandler();
-       return 0;
+       int rv;
+
+       pr_info("version " IPMI_DRIVER_VERSION "\n");
+
+       mutex_lock(&ipmi_interfaces_mutex);
+       rv = ipmi_register_driver();
+       mutex_unlock(&ipmi_interfaces_mutex);
+
+       return rv;
 }
 
 static void __exit cleanup_ipmi(void)
 {
        int count;
 
-       if (!initialized)
-               return;
-
-       atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+       if (initialized) {
+               atomic_notifier_chain_unregister(&panic_notifier_list,
+                                                &panic_block);
 
-       /*
-        * This can't be called if any interfaces exist, so no worry
-        * about shutting down the interfaces.
-        */
+               /*
+                * This can't be called if any interfaces exist, so no worry
+                * about shutting down the interfaces.
+                */
 
-       /*
-        * Tell the timer to stop, then wait for it to stop.  This
-        * avoids problems with race conditions removing the timer
-        * here.
-        */
-       atomic_inc(&stop_operation);
-       del_timer_sync(&ipmi_timer);
+               /*
+                * Tell the timer to stop, then wait for it to stop.  This
+                * avoids problems with race conditions removing the timer
+                * here.
+                */
+               atomic_inc(&stop_operation);
+               del_timer_sync(&ipmi_timer);
 
-       driver_unregister(&ipmidriver.driver);
+               initialized = false;
 
-       initialized = 0;
+               /* Check for buffer leaks. */
+               count = atomic_read(&smi_msg_inuse_count);
+               if (count != 0)
+                       pr_warn("SMI message count %d at exit\n", count);
+               count = atomic_read(&recv_msg_inuse_count);
+               if (count != 0)
+                       pr_warn("recv message count %d at exit\n", count);
 
-       /* Check for buffer leaks. */
-       count = atomic_read(&smi_msg_inuse_count);
-       if (count != 0)
-               pr_warn("SMI message count %d at exit\n", count);
-       count = atomic_read(&recv_msg_inuse_count);
-       if (count != 0)
-               pr_warn("recv message count %d at exit\n", count);
+               cleanup_srcu_struct(&ipmi_interfaces_srcu);
+       }
+       if (drvregistered)
+               driver_unregister(&ipmidriver.driver);
 }
 module_exit(cleanup_ipmi);
 
index ca9528c4f183e7ea57cb71805b5a09aaf3bedd69..b7a1ae2afaeac7435410f6c100d7e9941f2cb486 100644 (file)
@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
 
                /* Remove the multi-part read marker. */
                len -= 2;
+               data += 2;
                for (i = 0; i < len; i++)
-                       ssif_info->data[i] = data[i+2];
+                       ssif_info->data[i] = data[i];
                ssif_info->multi_len = len;
                ssif_info->multi_pos = 1;
 
@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
                }
 
                blocknum = data[0];
+               len--;
+               data++;
+
+               if (blocknum != 0xff && len != 31) {
+                   /* All blocks but the last must have 31 data bytes. */
+                       result = -EIO;
+                       if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+                               pr_info("Received middle message <31\n");
 
-               if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+                       goto continue_op;
+               }
+
+               if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
                        /* Received message too big, abort the operation. */
                        result = -E2BIG;
                        if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
                        goto continue_op;
                }
 
-               /* Remove the blocknum from the data. */
-               len--;
                for (i = 0; i < len; i++)
-                       ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+                       ssif_info->data[i + ssif_info->multi_len] = data[i];
                ssif_info->multi_len += len;
                if (blocknum == 0xff) {
                        /* End of read */
                        len = ssif_info->multi_len;
                        data = ssif_info->data;
-               } else if (blocknum + 1 != ssif_info->multi_pos) {
+               } else if (blocknum != ssif_info->multi_pos) {
                        /*
                         * Out of sequence block, just abort.  Block
                         * numbers start at zero for the second block,
@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
                }
        }
 
+ continue_op:
        if (result < 0) {
                ssif_inc_stat(ssif_info, receive_errors);
        } else {
@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
                ssif_inc_stat(ssif_info, received_message_parts);
        }
 
-
- continue_op:
        if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
                pr_info("DONE 1: state = %d, result=%d\n",
                        ssif_info->ssif_state, result);
index b5e3103c1175575a0e16d3f4168b4d343aca7b6f..e43c876a92232d9fc0e8a18269c5c4fd7342a3ff 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/serial_8250.h>
+#include <linux/nospec.h>
 #include "smapi.h"
 #include "mwavedd.h"
 #include "3780i.h"
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
                                                ipcnum);
                                return -EINVAL;
                        }
+                       ipcnum = array_index_nospec(ipcnum,
+                                                   ARRAY_SIZE(pDrvData->IPCs));
                        PRINTK_3(TRACE_MWAVE,
                                "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
                                " ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
                                                " Invalid ipcnum %x\n", ipcnum);
                                return -EINVAL;
                        }
+                       ipcnum = array_index_nospec(ipcnum,
+                                                   ARRAY_SIZE(pDrvData->IPCs));
                        PRINTK_3(TRACE_MWAVE,
                                "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
                                " ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
                                                ipcnum);
                                return -EINVAL;
                        }
+                       ipcnum = array_index_nospec(ipcnum,
+                                                   ARRAY_SIZE(pDrvData->IPCs));
                        mutex_lock(&mwave_mutex);
                        if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
                                pDrvData->IPCs[ipcnum].bIsEnabled = false;
index e5b2fe80eab432c699242cd3146e5c97e2928f7a..d2f0bb5ba47eabd3702a9c249f51f81f8d25a318 100644 (file)
@@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX
 source "drivers/clk/actions/Kconfig"
 source "drivers/clk/bcm/Kconfig"
 source "drivers/clk/hisilicon/Kconfig"
-source "drivers/clk/imx/Kconfig"
 source "drivers/clk/imgtec/Kconfig"
 source "drivers/clk/imx/Kconfig"
 source "drivers/clk/ingenic/Kconfig"
index 5b393e711e94b28559a23b215810c5bdbf3d0666..7d16ab0784ecf4258963a2429ef6ea95e3ecc777 100644 (file)
@@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
 
                if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
                        src = VC5_PRIM_SRC_SHDN_EN_XTAL;
-               if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
+               else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
                        src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+               else /* Invalid; should have been caught by vc5_probe() */
+                       return -EINVAL;
        }
 
        return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
index 75d13c0eff1243ebc29bbab45470e34f127e538c..6ccdbedb02f374a0d8ef9aac22dea5a80b27996c 100644 (file)
@@ -2779,7 +2779,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
        seq_printf(s, "\"protect_count\": %d,", c->protect_count);
        seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
        seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
-       seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+       seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
        seq_printf(s, "\"duty_cycle\": %u",
                   clk_core_get_scaled_duty_cycle(c, 100000));
 }
index 99c2508de8e56777ea4ab6814470e7584fb3d700..fb6edf1b8aa2688bc1ce7e2e2555698d8c01a583 100644 (file)
@@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
                return -ENODEV;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -EINVAL;
        base = devm_ioremap(dev, res->start, resource_size(res));
        if (!base)
                return -ENOMEM;
index 1b1ba54e33dde801bc380280507ec933f65cddea..1c04575c118f722c5d94ad28c281bdb1a76ec611 100644 (file)
@@ -215,6 +215,7 @@ config MSM_MMCC_8996
 
 config MSM_GCC_8998
        tristate "MSM8998 Global Clock Controller"
+       select QCOM_GDSC
        help
          Support for the global clock controller on msm8998 devices.
          Say Y if you want to use peripheral devices such as UART, SPI,
index 2d5d8b43727e95a5bd8f9af70c4452fedc1c9c19..c4d0b6f6abf2e1bb1a6027cd19bf0c22dc1b5cfe 100644 (file)
@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
        /* Read mdiv and fdiv from the fdbck register */
        reg = readl(socfpgaclk->hw.reg + 0x4);
        mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
-       vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
+       vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
 
        return (unsigned long)vco_freq;
 }
index 5b238fc314ac65c83f6bbc44af222ad1f0037110..8281dfbf38c2f8c6e88db7489df6482b70f2a232 100644 (file)
 
 #include "stratix10-clk.h"
 
-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
-                                       "f2s_free_clk",};
+static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
+                                       "f2s-free-clk",};
 static const char * const cntr_mux[] = { "main_pll", "periph_pll",
-                                        "osc1", "cb_intosc_hs_div2_clk",
-                                        "f2s_free_clk"};
-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
+                                        "osc1", "cb-intosc-hs-div2-clk",
+                                        "f2s-free-clk"};
+static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
 
 static const char * const noc_free_mux[] = {"main_noc_base_clk",
                                            "peri_noc_base_clk",
-                                           "osc1", "cb_intosc_hs_div2_clk",
-                                           "f2s_free_clk"};
+                                           "osc1", "cb-intosc-hs-div2-clk",
+                                           "f2s-free-clk"};
 
 static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
 static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
 static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
 static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
 
-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
+static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
 static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
 static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
 
 static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
                                            "peri_mpu_base_clk",
-                                           "osc1", "cb_intosc_hs_div2_clk",
-                                           "f2s_free_clk"};
+                                           "osc1", "cb-intosc-hs-div2-clk",
+                                           "f2s-free-clk"};
 
 /* clocks in AO (always on) controller */
 static const struct stratix10_pll_clock s10_pll_clks[] = {
index 7ddacae5d0b1718c48d400774d12069426c922e4..1adcccfa7829961c129760b9e8c27887568a2b1a 100644 (file)
@@ -5,3 +5,8 @@ config TEGRA_CLK_EMC
 config CLK_TEGRA_BPMP
        def_bool y
        depends on TEGRA_BPMP
+
+config TEGRA_CLK_DFLL
+       depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+       select PM_OPP
+       def_bool y
index 6507acc843c7fd2440117c94c48878641dce12fc..4812e45c22142670158747c173bf0383f8206a8c 100644 (file)
@@ -20,7 +20,7 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC)         += clk-tegra20.o
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)       += clk-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)       += clk-tegra124.o
-obj-$(CONFIG_ARCH_TEGRA_124_SOC)       += clk-tegra124-dfll-fcpu.o
+obj-$(CONFIG_TEGRA_CLK_DFLL)           += clk-tegra124-dfll-fcpu.o
 obj-$(CONFIG_ARCH_TEGRA_132_SOC)       += clk-tegra124.o
 obj-y                                  += cvb.o
 obj-$(CONFIG_ARCH_TEGRA_210_SOC)       += clk-tegra210.o
index 609e363dabf81400378c84e4ce16e5a209095d8b..0400e5b1d627f6823a567791e9379b00e6945c81 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * clk-dfll.c - Tegra DFLL clock source common code
  *
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
  *
  * Aleksandr Frid <afrid@nvidia.com>
  * Paul Walmsley <pwalmsley@nvidia.com>
@@ -47,6 +47,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/pm_opp.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -243,6 +244,12 @@ enum dfll_tune_range {
        DFLL_TUNE_LOW = 1,
 };
 
+
+enum tegra_dfll_pmu_if {
+       TEGRA_DFLL_PMU_I2C = 0,
+       TEGRA_DFLL_PMU_PWM = 1,
+};
+
 /**
  * struct dfll_rate_req - target DFLL rate request data
  * @rate: target frequency, after the postscaling
@@ -300,10 +307,19 @@ struct tegra_dfll {
        u32                             i2c_reg;
        u32                             i2c_slave_addr;
 
-       /* i2c_lut array entries are regulator framework selectors */
-       unsigned                        i2c_lut[MAX_DFLL_VOLTAGES];
-       int                             i2c_lut_size;
-       u8                              lut_min, lut_max, lut_safe;
+       /* lut array entries are regulator framework selectors or PWM values*/
+       unsigned                        lut[MAX_DFLL_VOLTAGES];
+       unsigned long                   lut_uv[MAX_DFLL_VOLTAGES];
+       int                             lut_size;
+       u8                              lut_bottom, lut_min, lut_max, lut_safe;
+
+       /* PWM interface */
+       enum tegra_dfll_pmu_if          pmu_if;
+       unsigned long                   pwm_rate;
+       struct pinctrl                  *pwm_pin;
+       struct pinctrl_state            *pwm_enable_state;
+       struct pinctrl_state            *pwm_disable_state;
+       u32                             reg_init_uV;
 };
 
 #define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw)
@@ -489,6 +505,34 @@ static void dfll_set_mode(struct tegra_dfll *td,
        dfll_wmb(td);
 }
 
+/*
+ * DVCO rate control
+ */
+
+static unsigned long get_dvco_rate_below(struct tegra_dfll *td, u8 out_min)
+{
+       struct dev_pm_opp *opp;
+       unsigned long rate, prev_rate;
+       unsigned long uv, min_uv;
+
+       min_uv = td->lut_uv[out_min];
+       for (rate = 0, prev_rate = 0; ; rate++) {
+               opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+               if (IS_ERR(opp))
+                       break;
+
+               uv = dev_pm_opp_get_voltage(opp);
+               dev_pm_opp_put(opp);
+
+               if (uv && uv > min_uv)
+                       return prev_rate;
+
+               prev_rate = rate;
+       }
+
+       return prev_rate;
+}
+
 /*
  * DFLL-to-I2C controller interface
  */
@@ -518,6 +562,118 @@ static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable)
        return 0;
 }
 
+
+/*
+ * DFLL-to-PWM controller interface
+ */
+
+/**
+ * dfll_pwm_set_output_enabled - enable/disable PWM voltage requests
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the PWM voltage requests
+ *
+ * Set the master enable control for PWM control value updates. If disabled,
+ * then the PWM signal is not driven. Also configure the PWM output pad
+ * to the appropriate state.
+ */
+static int dfll_pwm_set_output_enabled(struct tegra_dfll *td, bool enable)
+{
+       int ret;
+       u32 val, div;
+
+       if (enable) {
+               ret = pinctrl_select_state(td->pwm_pin, td->pwm_enable_state);
+               if (ret < 0) {
+                       dev_err(td->dev, "setting enable state failed\n");
+                       return -EINVAL;
+               }
+               val = dfll_readl(td, DFLL_OUTPUT_CFG);
+               val &= ~DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+               div = DIV_ROUND_UP(td->ref_rate, td->pwm_rate);
+               val |= (div << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT)
+                               & DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+               dfll_writel(td, val, DFLL_OUTPUT_CFG);
+               dfll_wmb(td);
+
+               val |= DFLL_OUTPUT_CFG_PWM_ENABLE;
+               dfll_writel(td, val, DFLL_OUTPUT_CFG);
+               dfll_wmb(td);
+       } else {
+               ret = pinctrl_select_state(td->pwm_pin, td->pwm_disable_state);
+               if (ret < 0)
+                       dev_warn(td->dev, "setting disable state failed\n");
+
+               val = dfll_readl(td, DFLL_OUTPUT_CFG);
+               val &= ~DFLL_OUTPUT_CFG_PWM_ENABLE;
+               dfll_writel(td, val, DFLL_OUTPUT_CFG);
+               dfll_wmb(td);
+       }
+
+       return 0;
+}
+
+/**
+ * dfll_set_force_output_value - set fixed value for force output
+ * @td: DFLL instance
+ * @out_val: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value when
+ * force output is enabled.
+ */
+static u32 dfll_set_force_output_value(struct tegra_dfll *td, u8 out_val)
+{
+       u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+       val = (val & DFLL_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK);
+       dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+       dfll_wmb(td);
+
+       return dfll_readl(td, DFLL_OUTPUT_FORCE);
+}
+
+/**
+ * dfll_set_force_output_enabled - enable/disable force output
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the force output
+ *
+ * Set the enable control for fouce output with fixed value.
+ */
+static void dfll_set_force_output_enabled(struct tegra_dfll *td, bool enable)
+{
+       u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+       if (enable)
+               val |= DFLL_OUTPUT_FORCE_ENABLE;
+       else
+               val &= ~DFLL_OUTPUT_FORCE_ENABLE;
+
+       dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+       dfll_wmb(td);
+}
+
+/**
+ * dfll_force_output - force output a fixed value
+ * @td: DFLL instance
+ * @out_sel: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value.
+ */
+static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel)
+{
+       u32 val;
+
+       if (out_sel > OUT_MASK)
+               return -EINVAL;
+
+       val = dfll_set_force_output_value(td, out_sel);
+       if ((td->mode < DFLL_CLOSED_LOOP) &&
+           !(val & DFLL_OUTPUT_FORCE_ENABLE)) {
+               dfll_set_force_output_enabled(td, true);
+       }
+
+       return 0;
+}
+
 /**
  * dfll_load_lut - load the voltage lookup table
  * @td: struct tegra_dfll *
@@ -539,7 +695,7 @@ static void dfll_load_i2c_lut(struct tegra_dfll *td)
                        lut_index = i;
 
                val = regulator_list_hardware_vsel(td->vdd_reg,
-                                                    td->i2c_lut[lut_index]);
+                                                    td->lut[lut_index]);
                __raw_writel(val, td->lut_base + i * 4);
        }
 
@@ -594,24 +750,41 @@ static void dfll_init_out_if(struct tegra_dfll *td)
 {
        u32 val;
 
-       td->lut_min = 0;
-       td->lut_max = td->i2c_lut_size - 1;
-       td->lut_safe = td->lut_min + 1;
+       td->lut_min = td->lut_bottom;
+       td->lut_max = td->lut_size - 1;
+       td->lut_safe = td->lut_min + (td->lut_min < td->lut_max ? 1 : 0);
+
+       /* clear DFLL_OUTPUT_CFG before setting new value */
+       dfll_writel(td, 0, DFLL_OUTPUT_CFG);
+       dfll_wmb(td);
 
-       dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG);
        val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) |
-               (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
-               (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
-       dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
-       dfll_i2c_wmb(td);
+             (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
+             (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
+       dfll_writel(td, val, DFLL_OUTPUT_CFG);
+       dfll_wmb(td);
 
        dfll_writel(td, 0, DFLL_OUTPUT_FORCE);
        dfll_i2c_writel(td, 0, DFLL_INTR_EN);
        dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK,
                        DFLL_INTR_STS);
 
-       dfll_load_i2c_lut(td);
-       dfll_init_i2c_if(td);
+       if (td->pmu_if == TEGRA_DFLL_PMU_PWM) {
+               u32 vinit = td->reg_init_uV;
+               int vstep = td->soc->alignment.step_uv;
+               unsigned long vmin = td->lut_uv[0];
+
+               /* set initial voltage */
+               if ((vinit >= vmin) && vstep) {
+                       unsigned int vsel;
+
+                       vsel = DIV_ROUND_UP((vinit - vmin), vstep);
+                       dfll_force_output(td, vsel);
+               }
+       } else {
+               dfll_load_i2c_lut(td);
+               dfll_init_i2c_if(td);
+       }
 }
 
 /*
@@ -631,17 +804,17 @@ static void dfll_init_out_if(struct tegra_dfll *td)
 static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
 {
        struct dev_pm_opp *opp;
-       int i, uv;
+       int i, align_step;
 
        opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
        if (IS_ERR(opp))
                return PTR_ERR(opp);
 
-       uv = dev_pm_opp_get_voltage(opp);
+       align_step = dev_pm_opp_get_voltage(opp) / td->soc->alignment.step_uv;
        dev_pm_opp_put(opp);
 
-       for (i = 0; i < td->i2c_lut_size; i++) {
-               if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
+       for (i = td->lut_bottom; i < td->lut_size; i++) {
+               if ((td->lut_uv[i] / td->soc->alignment.step_uv) >= align_step)
                        return i;
        }
 
@@ -863,9 +1036,14 @@ static int dfll_lock(struct tegra_dfll *td)
                        return -EINVAL;
                }
 
-               dfll_i2c_set_output_enabled(td, true);
+               if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+                       dfll_pwm_set_output_enabled(td, true);
+               else
+                       dfll_i2c_set_output_enabled(td, true);
+
                dfll_set_mode(td, DFLL_CLOSED_LOOP);
                dfll_set_frequency_request(td, req);
+               dfll_set_force_output_enabled(td, false);
                return 0;
 
        default:
@@ -889,7 +1067,10 @@ static int dfll_unlock(struct tegra_dfll *td)
        case DFLL_CLOSED_LOOP:
                dfll_set_open_loop_config(td);
                dfll_set_mode(td, DFLL_OPEN_LOOP);
-               dfll_i2c_set_output_enabled(td, false);
+               if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+                       dfll_pwm_set_output_enabled(td, false);
+               else
+                       dfll_i2c_set_output_enabled(td, false);
                return 0;
 
        case DFLL_OPEN_LOOP:
@@ -1171,15 +1352,17 @@ static int attr_registers_show(struct seq_file *s, void *data)
                seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
                           dfll_i2c_readl(td, offs));
 
-       seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
-       offs = DFLL_I2C_CLK_DIVISOR;
-       seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
-                  __raw_readl(td->i2c_controller_base + offs));
-
-       seq_puts(s, "\nLUT:\n");
-       for (offs = 0; offs <  4 * MAX_DFLL_VOLTAGES; offs += 4)
+       if (td->pmu_if == TEGRA_DFLL_PMU_I2C) {
+               seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
+               offs = DFLL_I2C_CLK_DIVISOR;
                seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
-                          __raw_readl(td->lut_base + offs));
+                          __raw_readl(td->i2c_controller_base + offs));
+
+               seq_puts(s, "\nLUT:\n");
+               for (offs = 0; offs <  4 * MAX_DFLL_VOLTAGES; offs += 4)
+                       seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+                                  __raw_readl(td->lut_base + offs));
+       }
 
        return 0;
 }
@@ -1349,15 +1532,21 @@ static int dfll_init(struct tegra_dfll *td)
  */
 static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
 {
-       int i, n_voltages, reg_uV;
+       int i, n_voltages, reg_uV,reg_volt_id, align_step;
+
+       if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+               return -EINVAL;
 
+       align_step = uV / td->soc->alignment.step_uv;
        n_voltages = regulator_count_voltages(td->vdd_reg);
        for (i = 0; i < n_voltages; i++) {
                reg_uV = regulator_list_voltage(td->vdd_reg, i);
                if (reg_uV < 0)
                        break;
 
-               if (uV == reg_uV)
+               reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+               if (align_step == reg_volt_id)
                        return i;
        }
 
@@ -1371,15 +1560,21 @@ static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
  * */
 static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
 {
-       int i, n_voltages, reg_uV;
+       int i, n_voltages, reg_uV, reg_volt_id, align_step;
 
+       if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+               return -EINVAL;
+
+       align_step = uV / td->soc->alignment.step_uv;
        n_voltages = regulator_count_voltages(td->vdd_reg);
        for (i = 0; i < n_voltages; i++) {
                reg_uV = regulator_list_voltage(td->vdd_reg, i);
                if (reg_uV < 0)
                        break;
 
-               if (uV <= reg_uV)
+               reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+               if (align_step <= reg_volt_id)
                        return i;
        }
 
@@ -1387,9 +1582,61 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
        return -EINVAL;
 }
 
+/*
+ * dfll_build_pwm_lut - build the PWM regulator lookup table
+ * @td: DFLL instance
+ * @v_max: Vmax from OPP table
+ *
+ * Look-up table in h/w is ignored when PWM is used as DFLL interface to PMIC.
+ * In this case closed loop output is controlling duty cycle directly. The s/w
+ * look-up that maps PWM duty cycle to voltage is still built by this function.
+ */
+static int dfll_build_pwm_lut(struct tegra_dfll *td, unsigned long v_max)
+{
+       int i;
+       unsigned long rate, reg_volt;
+       u8 lut_bottom = MAX_DFLL_VOLTAGES;
+       int v_min = td->soc->cvb->min_millivolts * 1000;
+
+       for (i = 0; i < MAX_DFLL_VOLTAGES; i++) {
+               reg_volt = td->lut_uv[i];
+
+               /* since opp voltage is exact mv */
+               reg_volt = (reg_volt / 1000) * 1000;
+               if (reg_volt > v_max)
+                       break;
+
+               td->lut[i] = i;
+               if ((lut_bottom == MAX_DFLL_VOLTAGES) && (reg_volt >= v_min))
+                       lut_bottom = i;
+       }
+
+       /* determine voltage boundaries */
+       td->lut_size = i;
+       if ((lut_bottom == MAX_DFLL_VOLTAGES) ||
+           (lut_bottom + 1 >= td->lut_size)) {
+               dev_err(td->dev, "no voltage above DFLL minimum %d mV\n",
+                       td->soc->cvb->min_millivolts);
+               return -EINVAL;
+       }
+       td->lut_bottom = lut_bottom;
+
+       /* determine rate boundaries */
+       rate = get_dvco_rate_below(td, td->lut_bottom);
+       if (!rate) {
+               dev_err(td->dev, "no opp below DFLL minimum voltage %d mV\n",
+                       td->soc->cvb->min_millivolts);
+               return -EINVAL;
+       }
+       td->dvco_rate_min = rate;
+
+       return 0;
+}
+
 /**
  * dfll_build_i2c_lut - build the I2C voltage register lookup table
  * @td: DFLL instance
+ * @v_max: Vmax from OPP table
  *
  * The DFLL hardware has 33 bytes of look-up table RAM that must be filled with
  * PMIC voltage register values that span the entire DFLL operating range.
@@ -1397,33 +1644,24 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
  * the soc-specific platform driver (td->soc->opp_dev) and the PMIC
  * register-to-voltage mapping queried from the regulator framework.
  *
- * On success, fills in td->i2c_lut and returns 0, or -err on failure.
+ * On success, fills in td->lut and returns 0, or -err on failure.
  */
-static int dfll_build_i2c_lut(struct tegra_dfll *td)
+static int dfll_build_i2c_lut(struct tegra_dfll *td, unsigned long v_max)
 {
+       unsigned long rate, v, v_opp;
        int ret = -EINVAL;
-       int j, v, v_max, v_opp;
-       int selector;
-       unsigned long rate;
-       struct dev_pm_opp *opp;
-       int lut;
-
-       rate = ULONG_MAX;
-       opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
-       if (IS_ERR(opp)) {
-               dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
-               goto out;
-       }
-       v_max = dev_pm_opp_get_voltage(opp);
-       dev_pm_opp_put(opp);
+       int j, selector, lut;
 
        v = td->soc->cvb->min_millivolts * 1000;
        lut = find_vdd_map_entry_exact(td, v);
        if (lut < 0)
                goto out;
-       td->i2c_lut[0] = lut;
+       td->lut[0] = lut;
+       td->lut_bottom = 0;
 
        for (j = 1, rate = 0; ; rate++) {
+               struct dev_pm_opp *opp;
+
                opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
                if (IS_ERR(opp))
                        break;
@@ -1435,39 +1673,64 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
                dev_pm_opp_put(opp);
 
                for (;;) {
-                       v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
+                       v += max(1UL, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
                        if (v >= v_opp)
                                break;
 
                        selector = find_vdd_map_entry_min(td, v);
                        if (selector < 0)
                                goto out;
-                       if (selector != td->i2c_lut[j - 1])
-                               td->i2c_lut[j++] = selector;
+                       if (selector != td->lut[j - 1])
+                               td->lut[j++] = selector;
                }
 
                v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp;
                selector = find_vdd_map_entry_exact(td, v);
                if (selector < 0)
                        goto out;
-               if (selector != td->i2c_lut[j - 1])
-                       td->i2c_lut[j++] = selector;
+               if (selector != td->lut[j - 1])
+                       td->lut[j++] = selector;
 
                if (v >= v_max)
                        break;
        }
-       td->i2c_lut_size = j;
+       td->lut_size = j;
 
        if (!td->dvco_rate_min)
                dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
                        td->soc->cvb->min_millivolts);
-       else
+       else {
                ret = 0;
+               for (j = 0; j < td->lut_size; j++)
+                       td->lut_uv[j] =
+                               regulator_list_voltage(td->vdd_reg,
+                                                      td->lut[j]);
+       }
 
 out:
        return ret;
 }
 
+static int dfll_build_lut(struct tegra_dfll *td)
+{
+       unsigned long rate, v_max;
+       struct dev_pm_opp *opp;
+
+       rate = ULONG_MAX;
+       opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
+       if (IS_ERR(opp)) {
+               dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
+               return -EINVAL;
+       }
+       v_max = dev_pm_opp_get_voltage(opp);
+       dev_pm_opp_put(opp);
+
+       if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+               return dfll_build_pwm_lut(td, v_max);
+       else
+               return dfll_build_i2c_lut(td, v_max);
+}
+
 /**
  * read_dt_param - helper function for reading required parameters from the DT
  * @td: DFLL instance
@@ -1526,11 +1789,56 @@ static int dfll_fetch_i2c_params(struct tegra_dfll *td)
        }
        td->i2c_reg = vsel_reg;
 
-       ret = dfll_build_i2c_lut(td);
-       if (ret) {
-               dev_err(td->dev, "couldn't build I2C LUT\n");
+       return 0;
+}
+
+static int dfll_fetch_pwm_params(struct tegra_dfll *td)
+{
+       int ret, i;
+       u32 pwm_period;
+
+       if (!td->soc->alignment.step_uv || !td->soc->alignment.offset_uv) {
+               dev_err(td->dev,
+                       "Missing step or alignment info for PWM regulator");
+               return -EINVAL;
+       }
+       for (i = 0; i < MAX_DFLL_VOLTAGES; i++)
+               td->lut_uv[i] = td->soc->alignment.offset_uv +
+                               i * td->soc->alignment.step_uv;
+
+       ret = read_dt_param(td, "nvidia,pwm-tristate-microvolts",
+                           &td->reg_init_uV);
+       if (!ret) {
+               dev_err(td->dev, "couldn't get initialized voltage\n");
+               return ret;
+       }
+
+       ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
+       if (!ret) {
+               dev_err(td->dev, "couldn't get PWM period\n");
                return ret;
        }
+       td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
+
+       td->pwm_pin = devm_pinctrl_get(td->dev);
+       if (IS_ERR(td->pwm_pin)) {
+               dev_err(td->dev, "DT: missing pinctrl device\n");
+               return PTR_ERR(td->pwm_pin);
+       }
+
+       td->pwm_enable_state = pinctrl_lookup_state(td->pwm_pin,
+                                                   "dvfs_pwm_enable");
+       if (IS_ERR(td->pwm_enable_state)) {
+               dev_err(td->dev, "DT: missing pwm enabled state\n");
+               return PTR_ERR(td->pwm_enable_state);
+       }
+
+       td->pwm_disable_state = pinctrl_lookup_state(td->pwm_pin,
+                                                    "dvfs_pwm_disable");
+       if (IS_ERR(td->pwm_disable_state)) {
+               dev_err(td->dev, "DT: missing pwm disabled state\n");
+               return PTR_ERR(td->pwm_disable_state);
+       }
 
        return 0;
 }
@@ -1597,16 +1905,6 @@ int tegra_dfll_register(struct platform_device *pdev,
 
        td->soc = soc;
 
-       td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
-       if (IS_ERR(td->vdd_reg)) {
-               ret = PTR_ERR(td->vdd_reg);
-               if (ret != -EPROBE_DEFER)
-                       dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n",
-                               ret);
-
-               return ret;
-       }
-
        td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
        if (IS_ERR(td->dvco_rst)) {
                dev_err(td->dev, "couldn't get dvco reset\n");
@@ -1619,10 +1917,27 @@ int tegra_dfll_register(struct platform_device *pdev,
                return ret;
        }
 
-       ret = dfll_fetch_i2c_params(td);
+       if (of_property_read_bool(td->dev->of_node, "nvidia,pwm-to-pmic")) {
+               td->pmu_if = TEGRA_DFLL_PMU_PWM;
+               ret = dfll_fetch_pwm_params(td);
+       } else  {
+               td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
+               if (IS_ERR(td->vdd_reg)) {
+                       dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
+                       return PTR_ERR(td->vdd_reg);
+               }
+               td->pmu_if = TEGRA_DFLL_PMU_I2C;
+               ret = dfll_fetch_i2c_params(td);
+       }
        if (ret)
                return ret;
 
+       ret = dfll_build_lut(td);
+       if (ret) {
+               dev_err(td->dev, "couldn't build LUT\n");
+               return ret;
+       }
+
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem) {
                dev_err(td->dev, "no control register resource\n");
index 83352c8078f27a9644d7d5bb165432d1bcddef37..85d0d95223f354361b57a910c755ebb1a2098946 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver
- * Copyright (C) 2013 NVIDIA Corporation.  All rights reserved.
+ * Copyright (C) 2013-2019 NVIDIA Corporation.  All rights reserved.
  *
  * Aleksandr Frid <afrid@nvidia.com>
  * Paul Walmsley <pwalmsley@nvidia.com>
 #include <linux/reset.h>
 #include <linux/types.h>
 
+#include "cvb.h"
+
 /**
  * struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
  * @dev: struct device * that holds the OPP table for the DFLL
  * @max_freq: maximum frequency supported on this SoC
  * @cvb: CPU frequency table for this SoC
+ * @alignment: parameters of the regulator step and offset
  * @init_clock_trimmers: callback to initialize clock trimmers
  * @set_clock_trimmers_high: callback to tune clock trimmers for high voltage
  * @set_clock_trimmers_low: callback to tune clock trimmers for low voltage
@@ -35,6 +38,7 @@ struct tegra_dfll_soc_data {
        struct device *dev;
        unsigned long max_freq;
        const struct cvb_table *cvb;
+       struct rail_alignment alignment;
 
        void (*init_clock_trimmers)(void);
        void (*set_clock_trimmers_high)(void);
index 269d3595758bebabf0f72d6448ba6633e9cd3c8f..e8ec42bf863869f1d7b73689871ed4e4dadfe21c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Tegra124 DFLL FCPU clock source driver
  *
- * Copyright (C) 2012-2014 NVIDIA Corporation.  All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation.  All rights reserved.
  *
  * Aleksandr Frid <afrid@nvidia.com>
  * Paul Walmsley <pwalmsley@nvidia.com>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <soc/tegra/fuse.h>
 
 #include "clk.h"
 #include "clk-dfll.h"
 #include "cvb.h"
 
+struct dfll_fcpu_data {
+       const unsigned long *cpu_max_freq_table;
+       unsigned int cpu_max_freq_table_size;
+       const struct cvb_table *cpu_cvb_tables;
+       unsigned int cpu_cvb_tables_size;
+};
+
 /* Maximum CPU frequency, indexed by CPU speedo id */
-static const unsigned long cpu_max_freq_table[] = {
+static const unsigned long tegra124_cpu_max_freq_table[] = {
        [0] = 2014500000UL,
        [1] = 2320500000UL,
        [2] = 2116500000UL,
@@ -42,9 +51,6 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
                .process_id = -1,
                .min_millivolts = 900,
                .max_millivolts = 1260,
-               .alignment = {
-                       .step_uv = 10000, /* 10mV */
-               },
                .speedo_scale = 100,
                .voltage_scale = 1000,
                .entries = {
@@ -82,16 +88,493 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
        },
 };
 
+static const unsigned long tegra210_cpu_max_freq_table[] = {
+       [0] = 1912500000UL,
+       [1] = 1912500000UL,
+       [2] = 2218500000UL,
+       [3] = 1785000000UL,
+       [4] = 1632000000UL,
+       [5] = 1912500000UL,
+       [6] = 2014500000UL,
+       [7] = 1734000000UL,
+       [8] = 1683000000UL,
+       [9] = 1555500000UL,
+       [10] = 1504500000UL,
+};
+
+#define CPU_CVB_TABLE \
+       .speedo_scale = 100,    \
+       .voltage_scale = 1000,  \
+       .entries = {            \
+               {  204000000UL, { 1007452, -23865, 370 } }, \
+               {  306000000UL, { 1052709, -24875, 370 } }, \
+               {  408000000UL, { 1099069, -25895, 370 } }, \
+               {  510000000UL, { 1146534, -26905, 370 } }, \
+               {  612000000UL, { 1195102, -27915, 370 } }, \
+               {  714000000UL, { 1244773, -28925, 370 } }, \
+               {  816000000UL, { 1295549, -29935, 370 } }, \
+               {  918000000UL, { 1347428, -30955, 370 } }, \
+               { 1020000000UL, { 1400411, -31965, 370 } }, \
+               { 1122000000UL, { 1454497, -32975, 370 } }, \
+               { 1224000000UL, { 1509687, -33985, 370 } }, \
+               { 1326000000UL, { 1565981, -35005, 370 } }, \
+               { 1428000000UL, { 1623379, -36015, 370 } }, \
+               { 1530000000UL, { 1681880, -37025, 370 } }, \
+               { 1632000000UL, { 1741485, -38035, 370 } }, \
+               { 1734000000UL, { 1802194, -39055, 370 } }, \
+               { 1836000000UL, { 1864006, -40065, 370 } }, \
+               { 1912500000UL, { 1910780, -40815, 370 } }, \
+               { 2014500000UL, { 1227000,      0,   0 } }, \
+               { 2218500000UL, { 1227000,      0,   0 } }, \
+               {          0UL, {       0,      0,   0 } }, \
+       }
+
+#define CPU_CVB_TABLE_XA \
+       .speedo_scale = 100,    \
+       .voltage_scale = 1000,  \
+       .entries = {            \
+               {  204000000UL, { 1250024, -39785, 565 } }, \
+               {  306000000UL, { 1297556, -41145, 565 } }, \
+               {  408000000UL, { 1346718, -42505, 565 } }, \
+               {  510000000UL, { 1397511, -43855, 565 } }, \
+               {  612000000UL, { 1449933, -45215, 565 } }, \
+               {  714000000UL, { 1503986, -46575, 565 } }, \
+               {  816000000UL, { 1559669, -47935, 565 } }, \
+               {  918000000UL, { 1616982, -49295, 565 } }, \
+               { 1020000000UL, { 1675926, -50645, 565 } }, \
+               { 1122000000UL, { 1736500, -52005, 565 } }, \
+               { 1224000000UL, { 1798704, -53365, 565 } }, \
+               { 1326000000UL, { 1862538, -54725, 565 } }, \
+               { 1428000000UL, { 1928003, -56085, 565 } }, \
+               { 1530000000UL, { 1995097, -57435, 565 } }, \
+               { 1606500000UL, { 2046149, -58445, 565 } }, \
+               { 1632000000UL, { 2063822, -58795, 565 } }, \
+               {          0UL, {       0,      0,   0 } }, \
+       }
+
+#define CPU_CVB_TABLE_EUCM1 \
+       .speedo_scale = 100,    \
+       .voltage_scale = 1000,  \
+       .entries = {            \
+               {  204000000UL, {  734429, 0, 0 } }, \
+               {  306000000UL, {  768191, 0, 0 } }, \
+               {  408000000UL, {  801953, 0, 0 } }, \
+               {  510000000UL, {  835715, 0, 0 } }, \
+               {  612000000UL, {  869477, 0, 0 } }, \
+               {  714000000UL, {  903239, 0, 0 } }, \
+               {  816000000UL, {  937001, 0, 0 } }, \
+               {  918000000UL, {  970763, 0, 0 } }, \
+               { 1020000000UL, { 1004525, 0, 0 } }, \
+               { 1122000000UL, { 1038287, 0, 0 } }, \
+               { 1224000000UL, { 1072049, 0, 0 } }, \
+               { 1326000000UL, { 1105811, 0, 0 } }, \
+               { 1428000000UL, { 1130000, 0, 0 } }, \
+               { 1555500000UL, { 1130000, 0, 0 } }, \
+               { 1632000000UL, { 1170000, 0, 0 } }, \
+               { 1734000000UL, { 1227500, 0, 0 } }, \
+               {          0UL, {       0, 0, 0 } }, \
+       }
+
+#define CPU_CVB_TABLE_EUCM2 \
+       .speedo_scale = 100,    \
+       .voltage_scale = 1000,  \
+       .entries = {            \
+               {  204000000UL, {  742283, 0, 0 } }, \
+               {  306000000UL, {  776249, 0, 0 } }, \
+               {  408000000UL, {  810215, 0, 0 } }, \
+               {  510000000UL, {  844181, 0, 0 } }, \
+               {  612000000UL, {  878147, 0, 0 } }, \
+               {  714000000UL, {  912113, 0, 0 } }, \
+               {  816000000UL, {  946079, 0, 0 } }, \
+               {  918000000UL, {  980045, 0, 0 } }, \
+               { 1020000000UL, { 1014011, 0, 0 } }, \
+               { 1122000000UL, { 1047977, 0, 0 } }, \
+               { 1224000000UL, { 1081943, 0, 0 } }, \
+               { 1326000000UL, { 1090000, 0, 0 } }, \
+               { 1479000000UL, { 1090000, 0, 0 } }, \
+               { 1555500000UL, { 1162000, 0, 0 } }, \
+               { 1683000000UL, { 1195000, 0, 0 } }, \
+               {          0UL, {       0, 0, 0 } }, \
+       }
+
+#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+       .speedo_scale = 100,    \
+       .voltage_scale = 1000,  \
+       .entries = {            \
+               {  204000000UL, {  742283, 0, 0 } }, \
+               {  306000000UL, {  776249, 0, 0 } }, \
+               {  408000000UL, {  810215, 0, 0 } }, \
+               {  510000000UL, {  844181, 0, 0 } }, \
+               {  612000000UL, {  878147, 0, 0 } }, \
+               {  714000000UL, {  912113, 0, 0 } }, \
+               {  816000000UL, {  946079, 0, 0 } }, \
+               {  918000000UL, {  980045, 0, 0 } }, \
+               { 1020000000UL, { 1014011, 0, 0 } }, \
+               { 1122000000UL, { 1047977, 0, 0 } }, \
+               { 1224000000UL, { 1081943, 0, 0 } }, \
+               { 1326000000UL, { 1090000, 0, 0 } }, \
+               { 1479000000UL, { 1090000, 0, 0 } }, \
+               { 1504500000UL, { 1120000, 0, 0 } }, \
+               {          0UL, {       0, 0, 0 } }, \
+       }
+
+#define CPU_CVB_TABLE_ODN \
+       .speedo_scale = 100,    \
+       .voltage_scale = 1000,  \
+       .entries = {            \
+               {  204000000UL, {  721094, 0, 0 } }, \
+               {  306000000UL, {  754040, 0, 0 } }, \
+               {  408000000UL, {  786986, 0, 0 } }, \
+               {  510000000UL, {  819932, 0, 0 } }, \
+               {  612000000UL, {  852878, 0, 0 } }, \
+               {  714000000UL, {  885824, 0, 0 } }, \
+               {  816000000UL, {  918770, 0, 0 } }, \
+               {  918000000UL, {  915716, 0, 0 } }, \
+               { 1020000000UL, {  984662, 0, 0 } }, \
+               { 1122000000UL, { 1017608, 0, 0 } }, \
+               { 1224000000UL, { 1050554, 0, 0 } }, \
+               { 1326000000UL, { 1083500, 0, 0 } }, \
+               { 1428000000UL, { 1116446, 0, 0 } }, \
+               { 1581000000UL, { 1130000, 0, 0 } }, \
+               { 1683000000UL, { 1168000, 0, 0 } }, \
+               { 1785000000UL, { 1227500, 0, 0 } }, \
+               {          0UL, {       0, 0, 0 } }, \
+       }
+
+static struct cvb_table tegra210_cpu_cvb_tables[] = {
+       {
+               .speedo_id = 10,
+               .process_id = 0,
+               .min_millivolts = 840,
+               .max_millivolts = 1120,
+               CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 10,
+               .process_id = 1,
+               .min_millivolts = 840,
+               .max_millivolts = 1120,
+               CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 9,
+               .process_id = 0,
+               .min_millivolts = 900,
+               .max_millivolts = 1162,
+               CPU_CVB_TABLE_EUCM2,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+               }
+       },
+       {
+               .speedo_id = 9,
+               .process_id = 1,
+               .min_millivolts = 900,
+               .max_millivolts = 1162,
+               CPU_CVB_TABLE_EUCM2,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+               }
+       },
+       {
+               .speedo_id = 8,
+               .process_id = 0,
+               .min_millivolts = 900,
+               .max_millivolts = 1195,
+               CPU_CVB_TABLE_EUCM2,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+               }
+       },
+       {
+               .speedo_id = 8,
+               .process_id = 1,
+               .min_millivolts = 900,
+               .max_millivolts = 1195,
+               CPU_CVB_TABLE_EUCM2,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+               }
+       },
+       {
+               .speedo_id = 7,
+               .process_id = 0,
+               .min_millivolts = 841,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE_EUCM1,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 7,
+               .process_id = 1,
+               .min_millivolts = 841,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE_EUCM1,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 6,
+               .process_id = 0,
+               .min_millivolts = 870,
+               .max_millivolts = 1150,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+               }
+       },
+       {
+               .speedo_id = 6,
+               .process_id = 1,
+               .min_millivolts = 870,
+               .max_millivolts = 1150,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune1 = 0x25501d0,
+               }
+       },
+       {
+               .speedo_id = 5,
+               .process_id = 0,
+               .min_millivolts = 818,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 5,
+               .process_id = 1,
+               .min_millivolts = 818,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x25501d0,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 4,
+               .process_id = -1,
+               .min_millivolts = 918,
+               .max_millivolts = 1113,
+               CPU_CVB_TABLE_XA,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune1 = 0x17711BD,
+               }
+       },
+       {
+               .speedo_id = 3,
+               .process_id = 0,
+               .min_millivolts = 825,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE_ODN,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 3,
+               .process_id = 1,
+               .min_millivolts = 825,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE_ODN,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x25501d0,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 2,
+               .process_id = 0,
+               .min_millivolts = 870,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+               }
+       },
+       {
+               .speedo_id = 2,
+               .process_id = 1,
+               .min_millivolts = 870,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune1 = 0x25501d0,
+               }
+       },
+       {
+               .speedo_id = 1,
+               .process_id = 0,
+               .min_millivolts = 837,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 1,
+               .process_id = 1,
+               .min_millivolts = 837,
+               .max_millivolts = 1227,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x25501d0,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 0,
+               .process_id = 0,
+               .min_millivolts = 850,
+               .max_millivolts = 1170,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x20091d9,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+       {
+               .speedo_id = 0,
+               .process_id = 1,
+               .min_millivolts = 850,
+               .max_millivolts = 1170,
+               CPU_CVB_TABLE,
+               .cpu_dfll_data = {
+                       .tune0_low = 0xffead0ff,
+                       .tune0_high = 0xffead0ff,
+                       .tune1 = 0x25501d0,
+                       .tune_high_min_millivolts = 864,
+               }
+       },
+};
+
+static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
+       .cpu_max_freq_table = tegra124_cpu_max_freq_table,
+       .cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
+       .cpu_cvb_tables = tegra124_cpu_cvb_tables,
+       .cpu_cvb_tables_size = ARRAY_SIZE(tegra124_cpu_cvb_tables)
+};
+
+static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
+       .cpu_max_freq_table = tegra210_cpu_max_freq_table,
+       .cpu_max_freq_table_size = ARRAY_SIZE(tegra210_cpu_max_freq_table),
+       .cpu_cvb_tables = tegra210_cpu_cvb_tables,
+       .cpu_cvb_tables_size = ARRAY_SIZE(tegra210_cpu_cvb_tables),
+};
+
+static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
+       {
+               .compatible = "nvidia,tegra124-dfll",
+               .data = &tegra124_dfll_fcpu_data,
+       },
+       {
+               .compatible = "nvidia,tegra210-dfll",
+               .data = &tegra210_dfll_fcpu_data
+       },
+       { },
+};
+
+static void get_alignment_from_dt(struct device *dev,
+                                 struct rail_alignment *align)
+{
+       if (of_property_read_u32(dev->of_node,
+                                "nvidia,pwm-voltage-step-microvolts",
+                                &align->step_uv))
+               align->step_uv = 0;
+
+       if (of_property_read_u32(dev->of_node,
+                                "nvidia,pwm-min-microvolts",
+                                &align->offset_uv))
+               align->offset_uv = 0;
+}
+
+static int get_alignment_from_regulator(struct device *dev,
+                                        struct rail_alignment *align)
+{
+       struct regulator *reg = devm_regulator_get(dev, "vdd-cpu");
+
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
+
+       align->offset_uv = regulator_list_voltage(reg, 0);
+       align->step_uv = regulator_get_linear_step(reg);
+
+       devm_regulator_put(reg);
+
+       return 0;
+}
+
 static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
 {
        int process_id, speedo_id, speedo_value, err;
        struct tegra_dfll_soc_data *soc;
+       const struct dfll_fcpu_data *fcpu_data;
+       struct rail_alignment align;
+
+       fcpu_data = of_device_get_match_data(&pdev->dev);
+       if (!fcpu_data)
+               return -ENODEV;
 
        process_id = tegra_sku_info.cpu_process_id;
        speedo_id = tegra_sku_info.cpu_speedo_id;
        speedo_value = tegra_sku_info.cpu_speedo_value;
 
-       if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) {
+       if (speedo_id >= fcpu_data->cpu_max_freq_table_size) {
                dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n",
                        speedo_id);
                return -ENODEV;
@@ -107,12 +590,22 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       soc->max_freq = cpu_max_freq_table[speedo_id];
+       if (of_property_read_bool(pdev->dev.of_node, "nvidia,pwm-to-pmic")) {
+               get_alignment_from_dt(&pdev->dev, &align);
+       } else {
+               err = get_alignment_from_regulator(&pdev->dev, &align);
+               if (err)
+                       return err;
+       }
+
+       soc->max_freq = fcpu_data->cpu_max_freq_table[speedo_id];
+
+       soc->cvb = tegra_cvb_add_opp_table(soc->dev, fcpu_data->cpu_cvb_tables,
+                                          fcpu_data->cpu_cvb_tables_size,
+                                          &align, process_id, speedo_id,
+                                          speedo_value, soc->max_freq);
+       soc->alignment = align;
 
-       soc->cvb = tegra_cvb_add_opp_table(soc->dev, tegra124_cpu_cvb_tables,
-                                          ARRAY_SIZE(tegra124_cpu_cvb_tables),
-                                          process_id, speedo_id, speedo_value,
-                                          soc->max_freq);
        if (IS_ERR(soc->cvb)) {
                dev_err(&pdev->dev, "couldn't add OPP table: %ld\n",
                        PTR_ERR(soc->cvb));
@@ -133,20 +626,17 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
        struct tegra_dfll_soc_data *soc;
 
        soc = tegra_dfll_unregister(pdev);
-       if (IS_ERR(soc))
+       if (IS_ERR(soc)) {
                dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
                        PTR_ERR(soc));
+               return PTR_ERR(soc);
+       }
 
        tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
 
        return 0;
 }
 
-static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
-       { .compatible = "nvidia,tegra124-dfll", },
-       { },
-};
-
 static const struct dev_pm_ops tegra124_dfll_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
                           tegra_dfll_runtime_resume, NULL)
index da9e8e7b5ce5c967daf791372be66f07e4ebdb82..35eeb6adc68edead9d2321fea051f4c9cc904f6b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Utility functions for parsing Tegra CVB voltage tables
  *
- * Copyright (C) 2012-2014 NVIDIA Corporation.  All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -62,9 +62,9 @@ static int round_voltage(int mv, const struct rail_alignment *align, int up)
 }
 
 static int build_opp_table(struct device *dev, const struct cvb_table *table,
+                          struct rail_alignment *align,
                           int speedo_value, unsigned long max_freq)
 {
-       const struct rail_alignment *align = &table->alignment;
        int i, ret, dfll_mv, min_mv, max_mv;
 
        min_mv = round_voltage(table->min_millivolts, align, UP);
@@ -109,8 +109,9 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
  */
 const struct cvb_table *
 tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
-                       size_t count, int process_id, int speedo_id,
-                       int speedo_value, unsigned long max_freq)
+                       size_t count, struct rail_alignment *align,
+                       int process_id, int speedo_id, int speedo_value,
+                       unsigned long max_freq)
 {
        size_t i;
        int ret;
@@ -124,7 +125,8 @@ tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
                if (table->process_id != -1 && table->process_id != process_id)
                        continue;
 
-               ret = build_opp_table(dev, table, speedo_value, max_freq);
+               ret = build_opp_table(dev, table, align, speedo_value,
+                                     max_freq);
                return ret ? ERR_PTR(ret) : table;
        }
 
index c1f077993b2aacbea3a89f0c3557432e776a5115..91a1941c21ef030617da7e72833b3826cd03a826 100644 (file)
@@ -41,6 +41,7 @@ struct cvb_cpu_dfll_data {
        u32 tune0_low;
        u32 tune0_high;
        u32 tune1;
+       unsigned int tune_high_min_millivolts;
 };
 
 struct cvb_table {
@@ -49,7 +50,6 @@ struct cvb_table {
 
        int min_millivolts;
        int max_millivolts;
-       struct rail_alignment alignment;
 
        int speedo_scale;
        int voltage_scale;
@@ -59,8 +59,9 @@ struct cvb_table {
 
 const struct cvb_table *
 tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *cvb_tables,
-                       size_t count, int process_id, int speedo_id,
-                       int speedo_value, unsigned long max_freq);
+                       size_t count, struct rail_alignment *align,
+                       int process_id, int speedo_id, int speedo_value,
+                       unsigned long max_freq);
 void tegra_cvb_remove_opp_table(struct device *dev,
                                const struct cvb_table *table,
                                unsigned long max_freq);
index f65cc0ff76abdb630b0694eb7c3babcc2c46ff71..b0908ec62f73b057828ec0965a80bd9582f8acb7 100644 (file)
@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
        if (ret)
                return ret;
 
-       zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) *
-                                               clock_max_idx, GFP_KERNEL);
+       zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
+                             GFP_KERNEL);
        if (!zynqmp_data)
                return -ENOMEM;
 
index 688f10227793d73a687416ad2a3aa69fab53f3a6..1a6778e81f903d1f14733e86916f25054d8b3b89 100644 (file)
@@ -272,8 +272,8 @@ config ARM_TEGRA20_CPUFREQ
          This adds the CPUFreq driver support for Tegra20 SOCs.
 
 config ARM_TEGRA124_CPUFREQ
-       tristate "Tegra124 CPUFreq support"
-       depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
+       bool "Tegra124 CPUFreq support"
+       depends on ARCH_TEGRA && CPUFREQ_DT
        default y
        help
          This adds the CPUFreq driver support for Tegra124 SOCs.
index b1c5468dca16b3da220e69832fa6c83b20bc0808..47729a22c159dfbfde80a47245d40eb83262a9d3 100644 (file)
@@ -119,6 +119,7 @@ static const struct of_device_id blacklist[] __initconst = {
        { .compatible = "mediatek,mt8176", },
 
        { .compatible = "nvidia,tegra124", },
+       { .compatible = "nvidia,tegra210", },
 
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
index 43530254201a8b3a5f98fdcb032ea6c3b635bb96..ba3795e13ac6f5904a197572c22f908beae64760 100644 (file)
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_opp.h>
-#include <linux/regulator/consumer.h>
 #include <linux/types.h>
 
 struct tegra124_cpufreq_priv {
-       struct regulator *vdd_cpu_reg;
        struct clk *cpu_clk;
        struct clk *pllp_clk;
        struct clk *pllx_clk;
@@ -60,14 +58,6 @@ static int tegra124_cpu_switch_to_dfll(struct tegra124_cpufreq_priv *priv)
        return ret;
 }
 
-static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
-{
-       clk_set_parent(priv->cpu_clk, priv->pllp_clk);
-       clk_disable_unprepare(priv->dfll_clk);
-       regulator_sync_voltage(priv->vdd_cpu_reg);
-       clk_set_parent(priv->cpu_clk, priv->pllx_clk);
-}
-
 static int tegra124_cpufreq_probe(struct platform_device *pdev)
 {
        struct tegra124_cpufreq_priv *priv;
@@ -88,16 +78,10 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
        if (!np)
                return -ENODEV;
 
-       priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
-       if (IS_ERR(priv->vdd_cpu_reg)) {
-               ret = PTR_ERR(priv->vdd_cpu_reg);
-               goto out_put_np;
-       }
-
        priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
        if (IS_ERR(priv->cpu_clk)) {
                ret = PTR_ERR(priv->cpu_clk);
-               goto out_put_vdd_cpu_reg;
+               goto out_put_np;
        }
 
        priv->dfll_clk = of_clk_get_by_name(np, "dfll");
@@ -129,15 +113,13 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
                platform_device_register_full(&cpufreq_dt_devinfo);
        if (IS_ERR(priv->cpufreq_dt_pdev)) {
                ret = PTR_ERR(priv->cpufreq_dt_pdev);
-               goto out_switch_to_pllx;
+               goto out_put_pllp_clk;
        }
 
        platform_set_drvdata(pdev, priv);
 
        return 0;
 
-out_switch_to_pllx:
-       tegra124_cpu_switch_to_pllx(priv);
 out_put_pllp_clk:
        clk_put(priv->pllp_clk);
 out_put_pllx_clk:
@@ -146,34 +128,15 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
        clk_put(priv->dfll_clk);
 out_put_cpu_clk:
        clk_put(priv->cpu_clk);
-out_put_vdd_cpu_reg:
-       regulator_put(priv->vdd_cpu_reg);
 out_put_np:
        of_node_put(np);
 
        return ret;
 }
 
-static int tegra124_cpufreq_remove(struct platform_device *pdev)
-{
-       struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev);
-
-       platform_device_unregister(priv->cpufreq_dt_pdev);
-       tegra124_cpu_switch_to_pllx(priv);
-
-       clk_put(priv->pllp_clk);
-       clk_put(priv->pllx_clk);
-       clk_put(priv->dfll_clk);
-       clk_put(priv->cpu_clk);
-       regulator_put(priv->vdd_cpu_reg);
-
-       return 0;
-}
-
 static struct platform_driver tegra124_cpufreq_platdrv = {
        .driver.name    = "cpufreq-tegra124",
        .probe          = tegra124_cpufreq_probe,
-       .remove         = tegra124_cpufreq_remove,
 };
 
 static int __init tegra_cpufreq_init(void)
@@ -181,7 +144,8 @@ static int __init tegra_cpufreq_init(void)
        int ret;
        struct platform_device *pdev;
 
-       if (!of_machine_is_compatible("nvidia,tegra124"))
+       if (!(of_machine_is_compatible("nvidia,tegra124") ||
+               of_machine_is_compatible("nvidia,tegra210")))
                return -ENODEV;
 
        /*
index 425d5d97461311277d925585d7f2eec3ccbcd967..77f4c0045de2972f3eb010f4f81022cf908b5ee2 100644 (file)
@@ -4503,7 +4503,7 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
                nctx->cb = dpaa2_caam_fqdan_cb;
 
                /* Register notification callbacks */
-               err = dpaa2_io_service_register(NULL, nctx);
+               err = dpaa2_io_service_register(NULL, nctx, dev);
                if (unlikely(err)) {
                        dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
                        nctx->cb = NULL;
@@ -4536,7 +4536,7 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
                ppriv = per_cpu_ptr(priv->ppriv, cpu);
                if (!ppriv->nctx.cb)
                        break;
-               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+               dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
        }
 
        for_each_online_cpu(cpu) {
@@ -4556,7 +4556,7 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
 
        for_each_online_cpu(cpu) {
                ppriv = per_cpu_ptr(priv->ppriv, cpu);
-               dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+               dpaa2_io_service_deregister(NULL, &ppriv->nctx, priv->dev);
                dpaa2_io_store_destroy(ppriv->store);
 
                if (++i == priv->num_pairs)
index 4213cb0bb2a792cc6bdb2da5e2cc2ff621687654..f8664bac9fa82bae6d1bf7be616c3a3a83b562bb 100644 (file)
@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
 #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST  0xA0
 
 /* Sticky registers for Uncorrected Errors */
-#define S10_SYSMGR_UE_VAL_OFST            0x120
-#define S10_SYSMGR_UE_ADDR_OFST           0x124
+#define S10_SYSMGR_UE_VAL_OFST            0x220
+#define S10_SYSMGR_UE_ADDR_OFST           0x224
 
 #define S10_DDR0_IRQ_MASK                 BIT(16)
 
index 09b845e901140b9e1bcb79e4a7e773337a07d85d..a785ffd5af891abe95938da021afb5358ec6d84b 100644 (file)
@@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
        if (device->is_local)
                return -ENODEV;
 
-       if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
-               WARN_ON(dma_set_max_seg_size(device->card->device,
-                                            SBP2_MAX_SEG_SIZE));
-
        shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
        if (shost == NULL)
                return -ENOMEM;
@@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
        .eh_abort_handler       = sbp2_scsi_abort,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
+       .max_segment_size       = SBP2_MAX_SEG_SIZE,
        .can_queue              = 1,
        .sdev_attrs             = sbp2_scsi_sysfs_attrs,
 };
index 97f5424dbac9872da032fdb221a8a932cfc47a26..4b56a587dacd4b5b65d6d74e83eaec177ba0b7cb 100644 (file)
@@ -18,6 +18,14 @@ struct imx_sc_msg_req_misc_set_ctrl {
        u16 resource;
 } __packed;
 
+struct imx_sc_msg_req_cpu_start {
+       struct imx_sc_rpc_msg hdr;
+       u32 address_hi;
+       u32 address_lo;
+       u16 resource;
+       u8 enable;
+} __packed;
+
 struct imx_sc_msg_req_misc_get_ctrl {
        struct imx_sc_rpc_msg hdr;
        u32 ctrl;
@@ -97,3 +105,33 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
        return 0;
 }
 EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in]     ipc         IPC handle
+ * @param[in]     resource    resource the control is associated with
+ * @param[in]     enable      true for start, false for stop
+ * @param[in]     phys_addr   initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+                       bool enable, u64 phys_addr)
+{
+       struct imx_sc_msg_req_cpu_start msg;
+       struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+       hdr->ver = IMX_SC_RPC_VERSION;
+       hdr->svc = IMX_SC_RPC_SVC_PM;
+       hdr->func = IMX_SC_PM_FUNC_CPU_START;
+       hdr->size = 4;
+
+       msg.address_hi = phys_addr >> 32;
+       msg.address_lo = phys_addr;
+       msg.resource = resource;
+       msg.enable = enable;
+
+       return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
index 407245f2efd0d670c8e230538359a9cfb7d7de44..39a94c7177fc2675f685b2d1e2aa62ab6a17e1cb 100644 (file)
@@ -322,6 +322,7 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
 
 static const struct of_device_id imx_sc_pd_match[] = {
        { .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
+       { .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
        { /* sentinel */ }
 };
 
index a13558154ac305fc19e3dee687e07ea26e7c6f25..61be15d9df7dc2737ab4d1841356de4004e75836 100644 (file)
@@ -238,6 +238,16 @@ static int rpi_firmware_probe(struct platform_device *pdev)
        return 0;
 }
 
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+       struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+       if (!fw)
+               return;
+
+       rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
 static int rpi_firmware_remove(struct platform_device *pdev)
 {
        struct rpi_firmware *fw = platform_get_drvdata(pdev);
@@ -278,6 +288,7 @@ static struct platform_driver rpi_firmware_driver = {
                .of_match_table = rpi_firmware_of_match,
        },
        .probe          = rpi_firmware_probe,
+       .shutdown       = rpi_firmware_shutdown,
        .remove         = rpi_firmware_remove,
 };
 module_platform_driver(rpi_firmware_driver);
index 1b826dcca719b01e6c36983c4c3e744ea4eb08fe..676b01caff05ce876f383a86978482f5f0d89814 100644 (file)
@@ -1,4 +1,7 @@
 tegra-bpmp-y                   = bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC)        += bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC)        += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC)        += bpmp-tegra186.o
 tegra-bpmp-$(CONFIG_DEBUG_FS)  += bpmp-debugfs.o
 obj-$(CONFIG_TEGRA_BPMP)       += tegra-bpmp.o
 obj-$(CONFIG_TEGRA_IVC)                += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644 (file)
index 0000000..54d560c
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+       int (*init)(struct tegra_bpmp *bpmp);
+       void (*deinit)(struct tegra_bpmp *bpmp);
+       bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+       bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+       int (*ack_response)(struct tegra_bpmp_channel *channel);
+       int (*ack_request)(struct tegra_bpmp_channel *channel);
+       bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+       bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+       int (*post_response)(struct tegra_bpmp_channel *channel);
+       int (*post_request)(struct tegra_bpmp_channel *channel);
+       int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+       int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644 (file)
index 0000000..ea30875
--- /dev/null
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+       struct tegra_bpmp *parent;
+
+       struct {
+               struct gen_pool *pool;
+               dma_addr_t phys;
+               void *virt;
+       } tx, rx;
+
+       struct {
+               struct mbox_client client;
+               struct mbox_chan *channel;
+       } mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+       struct tegra186_bpmp *priv;
+
+       priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+       return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+       void *frame;
+
+       frame = tegra_ivc_read_get_next_frame(channel->ivc);
+       if (IS_ERR(frame)) {
+               channel->ib = NULL;
+               return false;
+       }
+
+       channel->ib = frame;
+
+       return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+       void *frame;
+
+       frame = tegra_ivc_write_get_next_frame(channel->ivc);
+       if (IS_ERR(frame)) {
+               channel->ob = NULL;
+               return false;
+       }
+
+       channel->ob = frame;
+
+       return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+       return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+       return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+       struct tegra186_bpmp *priv = bpmp->priv;
+       int err;
+
+       err = mbox_send_message(priv->mbox.channel, NULL);
+       if (err < 0)
+               return err;
+
+       mbox_client_txdone(priv->mbox.channel, 0);
+
+       return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+       struct tegra_bpmp *bpmp = data;
+       struct tegra186_bpmp *priv = bpmp->priv;
+
+       if (WARN_ON(priv->mbox.channel == NULL))
+               return;
+
+       tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+                                     struct tegra_bpmp *bpmp,
+                                     unsigned int index)
+{
+       struct tegra186_bpmp *priv = bpmp->priv;
+       size_t message_size, queue_size;
+       unsigned int offset;
+       int err;
+
+       channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+                                   GFP_KERNEL);
+       if (!channel->ivc)
+               return -ENOMEM;
+
+       message_size = tegra_ivc_align(MSG_MIN_SZ);
+       queue_size = tegra_ivc_total_queue_size(message_size);
+       offset = queue_size * index;
+
+       err = tegra_ivc_init(channel->ivc, NULL,
+                            priv->rx.virt + offset, priv->rx.phys + offset,
+                            priv->tx.virt + offset, priv->tx.phys + offset,
+                            1, message_size, tegra186_bpmp_ivc_notify,
+                            bpmp);
+       if (err < 0) {
+               dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+                       index, err);
+               return err;
+       }
+
+       init_completion(&channel->completion);
+       channel->bpmp = bpmp;
+
+       return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+       /* reset the channel state */
+       tegra_ivc_reset(channel->ivc);
+
+       /* sync the channel state with BPMP */
+       while (tegra_ivc_notified(channel->ivc))
+               ;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+       tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+       struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+       tegra_bpmp_handle_rx(bpmp);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+       struct tegra186_bpmp *priv;
+       unsigned int i;
+       int err;
+
+       priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       bpmp->priv = priv;
+       priv->parent = bpmp;
+
+       priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+       if (!priv->tx.pool) {
+               dev_err(bpmp->dev, "TX shmem pool not found\n");
+               return -ENOMEM;
+       }
+
+       priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
+       if (!priv->tx.virt) {
+               dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+               return -ENOMEM;
+       }
+
+       priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+       if (!priv->rx.pool) {
+               dev_err(bpmp->dev, "RX shmem pool not found\n");
+               err = -ENOMEM;
+               goto free_tx;
+       }
+
+       priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
+       if (!priv->rx.virt) {
+               dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+               err = -ENOMEM;
+               goto free_tx;
+       }
+
+       err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+                                        bpmp->soc->channels.cpu_tx.offset);
+       if (err < 0)
+               goto free_rx;
+
+       err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+                                        bpmp->soc->channels.cpu_rx.offset);
+       if (err < 0)
+               goto cleanup_tx_channel;
+
+       for (i = 0; i < bpmp->threaded.count; i++) {
+               unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+               err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+                                                bpmp, index);
+               if (err < 0)
+                       goto cleanup_channels;
+       }
+
+       /* mbox registration */
+       priv->mbox.client.dev = bpmp->dev;
+       priv->mbox.client.rx_callback = mbox_handle_rx;
+       priv->mbox.client.tx_block = false;
+       priv->mbox.client.knows_txdone = false;
+
+       priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+       if (IS_ERR(priv->mbox.channel)) {
+               err = PTR_ERR(priv->mbox.channel);
+               dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+               goto cleanup_channels;
+       }
+
+       tegra186_bpmp_channel_reset(bpmp->tx_channel);
+       tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+       for (i = 0; i < bpmp->threaded.count; i++)
+               tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+       return 0;
+
+cleanup_channels:
+       for (i = 0; i < bpmp->threaded.count; i++) {
+               if (!bpmp->threaded_channels[i].bpmp)
+                       continue;
+
+               tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+       }
+
+       tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+       tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+       gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+free_tx:
+       gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+
+       return err;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+       struct tegra186_bpmp *priv = bpmp->priv;
+       unsigned int i;
+
+       mbox_free_channel(priv->mbox.channel);
+
+       for (i = 0; i < bpmp->threaded.count; i++)
+               tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+
+       tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+       tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+       gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+       gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+       unsigned int i;
+
+       /* reset message channels */
+       tegra186_bpmp_channel_reset(bpmp->tx_channel);
+       tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+       for (i = 0; i < bpmp->threaded.count; i++)
+               tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+       return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+       .init = tegra186_bpmp_init,
+       .deinit = tegra186_bpmp_deinit,
+       .is_response_ready = tegra186_bpmp_is_message_ready,
+       .is_request_ready = tegra186_bpmp_is_message_ready,
+       .ack_response = tegra186_bpmp_ack_message,
+       .ack_request = tegra186_bpmp_ack_message,
+       .is_response_channel_free = tegra186_bpmp_is_channel_free,
+       .is_request_channel_free = tegra186_bpmp_is_channel_free,
+       .post_response = tegra186_bpmp_post_message,
+       .post_request = tegra186_bpmp_post_message,
+       .ring_doorbell = tegra186_bpmp_ring_doorbell,
+       .resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644 (file)
index 0000000..ae15940
--- /dev/null
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET         0x000
+#define RESULT_OFFSET(id)      (0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT       16
+#define TRIGGER_CMD_GET                4
+
+#define STA_OFFSET             0
+#define SET_OFFSET             4
+#define CLR_OFFSET             8
+
+#define CH_MASK(ch)    (0x3 << ((ch) * 2))
+#define SL_SIGL(ch)    (0x0 << ((ch) * 2))
+#define SL_QUED(ch)    (0x1 << ((ch) * 2))
+#define MA_FREE(ch)    (0x2 << ((ch) * 2))
+#define MA_ACKD(ch)    (0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+       void __iomem *atomics;
+       void __iomem *arb_sema;
+       struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+       struct tegra210_bpmp *priv = bpmp->priv;
+
+       return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+       unsigned int index = channel->index;
+
+       return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+       unsigned int index = channel->index;
+
+       return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+       unsigned int index = channel->index;
+
+       return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+       unsigned int index = channel->index;
+
+       return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+       struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+       __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+       return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+       struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+       __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+       return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+       struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+       __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+                    priv->arb_sema + CLR_OFFSET);
+
+       return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+       struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+       __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+       return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+       struct tegra210_bpmp *priv = bpmp->priv;
+       struct irq_data *irq_data = priv->tx_irq_data;
+
+       /*
+        * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+        * available messages
+        */
+       if (irq_data->chip->irq_retrigger)
+               return irq_data->chip->irq_retrigger(irq_data);
+
+       return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+       struct tegra_bpmp *bpmp = data;
+
+       tegra_bpmp_handle_rx(bpmp);
+
+       return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+                                     struct tegra_bpmp *bpmp,
+                                     unsigned int index)
+{
+       struct tegra210_bpmp *priv = bpmp->priv;
+       u32 address;
+       void *p;
+
+       /* Retrieve channel base address from BPMP */
+       writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+              priv->atomics + TRIGGER_OFFSET);
+       address = readl(priv->atomics + RESULT_OFFSET(index));
+
+       p = devm_ioremap(bpmp->dev, address, 0x80);
+       if (!p)
+               return -ENOMEM;
+
+       channel->ib = p;
+       channel->ob = p;
+       channel->index = index;
+       init_completion(&channel->completion);
+       channel->bpmp = bpmp;
+
+       return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+       struct platform_device *pdev = to_platform_device(bpmp->dev);
+       struct tegra210_bpmp *priv;
+       struct resource *res;
+       unsigned int i;
+       int err;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       bpmp->priv = priv;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->atomics))
+               return PTR_ERR(priv->atomics);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->arb_sema))
+               return PTR_ERR(priv->arb_sema);
+
+       err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+                                        bpmp->soc->channels.cpu_tx.offset);
+       if (err < 0)
+               return err;
+
+       err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+                                        bpmp->soc->channels.cpu_rx.offset);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < bpmp->threaded.count; i++) {
+               unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+               err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+                                                bpmp, index);
+               if (err < 0)
+                       return err;
+       }
+
+       err = platform_get_irq_byname(pdev, "tx");
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err);
+               return err;
+       }
+
+       priv->tx_irq_data = irq_get_irq_data(err);
+       if (!priv->tx_irq_data) {
+               dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+               return err;
+       }
+
+       err = platform_get_irq_byname(pdev, "rx");
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err);
+               return err;
+       }
+
+       err = devm_request_irq(&pdev->dev, err, rx_irq,
+                              IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+       .init = tegra210_bpmp_init,
+       .is_response_ready = tegra210_bpmp_is_response_ready,
+       .is_request_ready = tegra210_bpmp_is_request_ready,
+       .ack_response = tegra210_bpmp_ack_response,
+       .ack_request = tegra210_bpmp_ack_request,
+       .is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+       .is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+       .post_response = tegra210_bpmp_post_response,
+       .post_request = tegra210_bpmp_post_request,
+       .ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
index 689478b92bce638ea364485087361a5a378b59dc..dd775e8ba5a02ff930d2153b7c65cd76cd43784c 100644 (file)
@@ -26,6 +26,8 @@
 #include <soc/tegra/bpmp-abi.h>
 #include <soc/tegra/ivc.h>
 
+#include "bpmp-private.h"
+
 #define MSG_ACK                BIT(0)
 #define MSG_RING       BIT(1)
 #define TAG_SZ         32
@@ -36,6 +38,14 @@ mbox_client_to_bpmp(struct mbox_client *client)
        return container_of(client, struct tegra_bpmp, mbox.client);
 }
 
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+       struct tegra_bpmp *bpmp = channel->bpmp;
+
+       return bpmp->soc->ops;
+}
+
 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
 {
        struct platform_device *pdev;
@@ -96,22 +106,21 @@ static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
               (msg->rx.size == 0 || msg->rx.data);
 }
 
-static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
 {
-       void *frame;
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
 
-       frame = tegra_ivc_read_get_next_frame(channel->ivc);
-       if (IS_ERR(frame)) {
-               channel->ib = NULL;
-               return false;
-       }
+       return ops->is_response_ready(channel);
+}
 
-       channel->ib = frame;
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
 
-       return true;
+       return ops->is_request_ready(channel);
 }
 
-static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
 {
        unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
        ktime_t end;
@@ -119,29 +128,45 @@ static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
        end = ktime_add_us(ktime_get(), timeout);
 
        do {
-               if (tegra_bpmp_master_acked(channel))
+               if (tegra_bpmp_is_response_ready(channel))
                        return 0;
        } while (ktime_before(ktime_get(), end));
 
        return -ETIMEDOUT;
 }
 
-static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
 {
-       void *frame;
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
 
-       frame = tegra_ivc_write_get_next_frame(channel->ivc);
-       if (IS_ERR(frame)) {
-               channel->ob = NULL;
-               return false;
-       }
+       return ops->ack_response(channel);
+}
 
-       channel->ob = frame;
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
 
-       return true;
+       return ops->ack_request(channel);
 }
 
-static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+       return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+       return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
 {
        unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
        ktime_t start, now;
@@ -149,7 +174,7 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
        start = ns_to_ktime(local_clock());
 
        do {
-               if (tegra_bpmp_master_free(channel))
+               if (tegra_bpmp_is_request_channel_free(channel))
                        return 0;
 
                now = ns_to_ktime(local_clock());
@@ -158,6 +183,25 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
        return -ETIMEDOUT;
 }
 
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+       return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+       const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+       return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+       return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
                                         void *data, size_t size, int *ret)
 {
@@ -166,7 +210,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
        if (data && size > 0)
                memcpy(data, channel->ib->data, size);
 
-       err = tegra_ivc_read_advance(channel->ivc);
+       err = tegra_bpmp_ack_response(channel);
        if (err < 0)
                return err;
 
@@ -210,7 +254,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
        if (data && size > 0)
                memcpy(channel->ob->data, data, size);
 
-       return tegra_ivc_write_advance(channel->ivc);
+       return tegra_bpmp_post_request(channel);
 }
 
 static struct tegra_bpmp_channel *
@@ -238,7 +282,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
 
        channel = &bpmp->threaded_channels[index];
 
-       if (!tegra_bpmp_master_free(channel)) {
+       if (!tegra_bpmp_is_request_channel_free(channel)) {
                err = -EBUSY;
                goto unlock;
        }
@@ -270,7 +314,7 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
 {
        int err;
 
-       err = tegra_bpmp_wait_master_free(channel);
+       err = tegra_bpmp_wait_request_channel_free(channel);
        if (err < 0)
                return err;
 
@@ -302,13 +346,11 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
 
        spin_unlock(&bpmp->atomic_tx_lock);
 
-       err = mbox_send_message(bpmp->mbox.channel, NULL);
+       err = tegra_bpmp_ring_doorbell(bpmp);
        if (err < 0)
                return err;
 
-       mbox_client_txdone(bpmp->mbox.channel, 0);
-
-       err = tegra_bpmp_wait_ack(channel);
+       err = tegra_bpmp_wait_response(channel);
        if (err < 0)
                return err;
 
@@ -335,12 +377,10 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
        if (IS_ERR(channel))
                return PTR_ERR(channel);
 
-       err = mbox_send_message(bpmp->mbox.channel, NULL);
+       err = tegra_bpmp_ring_doorbell(bpmp);
        if (err < 0)
                return err;
 
-       mbox_client_txdone(bpmp->mbox.channel, 0);
-
        timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
 
        err = wait_for_completion_timeout(&channel->completion, timeout);
@@ -369,38 +409,34 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
 {
        unsigned long flags = channel->ib->flags;
        struct tegra_bpmp *bpmp = channel->bpmp;
-       struct tegra_bpmp_mb_data *frame;
        int err;
 
        if (WARN_ON(size > MSG_DATA_MIN_SZ))
                return;
 
-       err = tegra_ivc_read_advance(channel->ivc);
+       err = tegra_bpmp_ack_request(channel);
        if (WARN_ON(err < 0))
                return;
 
        if ((flags & MSG_ACK) == 0)
                return;
 
-       frame = tegra_ivc_write_get_next_frame(channel->ivc);
-       if (WARN_ON(IS_ERR(frame)))
+       if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
                return;
 
-       frame->code = code;
+       channel->ob->code = code;
 
        if (data && size > 0)
-               memcpy(frame->data, data, size);
+               memcpy(channel->ob->data, data, size);
 
-       err = tegra_ivc_write_advance(channel->ivc);
+       err = tegra_bpmp_post_response(channel);
        if (WARN_ON(err < 0))
                return;
 
        if (flags & MSG_RING) {
-               err = mbox_send_message(bpmp->mbox.channel, NULL);
+               err = tegra_bpmp_ring_doorbell(bpmp);
                if (WARN_ON(err < 0))
                        return;
-
-               mbox_client_txdone(bpmp->mbox.channel, 0);
        }
 }
 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
@@ -627,9 +663,8 @@ static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
        complete(&channel->completion);
 }
 
-static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
 {
-       struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
        struct tegra_bpmp_channel *channel;
        unsigned int i, count;
        unsigned long *busy;
@@ -638,7 +673,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
        count = bpmp->soc->channels.thread.count;
        busy = bpmp->threaded.busy;
 
-       if (tegra_bpmp_master_acked(channel))
+       if (tegra_bpmp_is_request_ready(channel))
                tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
 
        spin_lock(&bpmp->lock);
@@ -648,7 +683,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
 
                channel = &bpmp->threaded_channels[i];
 
-               if (tegra_bpmp_master_acked(channel)) {
+               if (tegra_bpmp_is_response_ready(channel)) {
                        tegra_bpmp_channel_signal(channel);
                        clear_bit(i, busy);
                }
@@ -657,74 +692,9 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
        spin_unlock(&bpmp->lock);
 }
 
-static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
-{
-       struct tegra_bpmp *bpmp = data;
-       int err;
-
-       if (WARN_ON(bpmp->mbox.channel == NULL))
-               return;
-
-       err = mbox_send_message(bpmp->mbox.channel, NULL);
-       if (err < 0)
-               return;
-
-       mbox_client_txdone(bpmp->mbox.channel, 0);
-}
-
-static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
-                                  struct tegra_bpmp *bpmp,
-                                  unsigned int index)
-{
-       size_t message_size, queue_size;
-       unsigned int offset;
-       int err;
-
-       channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
-                                   GFP_KERNEL);
-       if (!channel->ivc)
-               return -ENOMEM;
-
-       message_size = tegra_ivc_align(MSG_MIN_SZ);
-       queue_size = tegra_ivc_total_queue_size(message_size);
-       offset = queue_size * index;
-
-       err = tegra_ivc_init(channel->ivc, NULL,
-                            bpmp->rx.virt + offset, bpmp->rx.phys + offset,
-                            bpmp->tx.virt + offset, bpmp->tx.phys + offset,
-                            1, message_size, tegra_bpmp_ivc_notify,
-                            bpmp);
-       if (err < 0) {
-               dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
-                       index, err);
-               return err;
-       }
-
-       init_completion(&channel->completion);
-       channel->bpmp = bpmp;
-
-       return 0;
-}
-
-static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
-{
-       /* reset the channel state */
-       tegra_ivc_reset(channel->ivc);
-
-       /* sync the channel state with BPMP */
-       while (tegra_ivc_notified(channel->ivc))
-               ;
-}
-
-static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
-{
-       tegra_ivc_cleanup(channel->ivc);
-}
-
 static int tegra_bpmp_probe(struct platform_device *pdev)
 {
        struct tegra_bpmp *bpmp;
-       unsigned int i;
        char tag[TAG_SZ];
        size_t size;
        int err;
@@ -736,32 +706,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
        bpmp->soc = of_device_get_match_data(&pdev->dev);
        bpmp->dev = &pdev->dev;
 
-       bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
-       if (!bpmp->tx.pool) {
-               dev_err(&pdev->dev, "TX shmem pool not found\n");
-               return -ENOMEM;
-       }
-
-       bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
-       if (!bpmp->tx.virt) {
-               dev_err(&pdev->dev, "failed to allocate from TX pool\n");
-               return -ENOMEM;
-       }
-
-       bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
-       if (!bpmp->rx.pool) {
-               dev_err(&pdev->dev, "RX shmem pool not found\n");
-               err = -ENOMEM;
-               goto free_tx;
-       }
-
-       bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
-       if (!bpmp->rx.virt) {
-               dev_err(&pdev->dev, "failed to allocate from RX pool\n");
-               err = -ENOMEM;
-               goto free_tx;
-       }
-
        INIT_LIST_HEAD(&bpmp->mrqs);
        spin_lock_init(&bpmp->lock);
 
@@ -771,81 +715,38 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
        size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
 
        bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-       if (!bpmp->threaded.allocated) {
-               err = -ENOMEM;
-               goto free_rx;
-       }
+       if (!bpmp->threaded.allocated)
+               return -ENOMEM;
 
        bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-       if (!bpmp->threaded.busy) {
-               err = -ENOMEM;
-               goto free_rx;
-       }
+       if (!bpmp->threaded.busy)
+               return -ENOMEM;
 
        spin_lock_init(&bpmp->atomic_tx_lock);
        bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
                                        GFP_KERNEL);
-       if (!bpmp->tx_channel) {
-               err = -ENOMEM;
-               goto free_rx;
-       }
+       if (!bpmp->tx_channel)
+               return -ENOMEM;
 
        bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
                                        GFP_KERNEL);
-       if (!bpmp->rx_channel) {
-               err = -ENOMEM;
-               goto free_rx;
-       }
+       if (!bpmp->rx_channel)
+               return -ENOMEM;
 
        bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
                                               sizeof(*bpmp->threaded_channels),
                                               GFP_KERNEL);
-       if (!bpmp->threaded_channels) {
-               err = -ENOMEM;
-               goto free_rx;
-       }
-
-       err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
-                                     bpmp->soc->channels.cpu_tx.offset);
-       if (err < 0)
-               goto free_rx;
+       if (!bpmp->threaded_channels)
+               return -ENOMEM;
 
-       err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
-                                     bpmp->soc->channels.cpu_rx.offset);
+       err = bpmp->soc->ops->init(bpmp);
        if (err < 0)
-               goto cleanup_tx_channel;
-
-       for (i = 0; i < bpmp->threaded.count; i++) {
-               err = tegra_bpmp_channel_init(
-                       &bpmp->threaded_channels[i], bpmp,
-                       bpmp->soc->channels.thread.offset + i);
-               if (err < 0)
-                       goto cleanup_threaded_channels;
-       }
-
-       /* mbox registration */
-       bpmp->mbox.client.dev = &pdev->dev;
-       bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
-       bpmp->mbox.client.tx_block = false;
-       bpmp->mbox.client.knows_txdone = false;
-
-       bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
-       if (IS_ERR(bpmp->mbox.channel)) {
-               err = PTR_ERR(bpmp->mbox.channel);
-               dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
-               goto cleanup_threaded_channels;
-       }
-
-       /* reset message channels */
-       tegra_bpmp_channel_reset(bpmp->tx_channel);
-       tegra_bpmp_channel_reset(bpmp->rx_channel);
-       for (i = 0; i < bpmp->threaded.count; i++)
-               tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+               return err;
 
        err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
                                     tegra_bpmp_mrq_handle_ping, bpmp);
        if (err < 0)
-               goto free_mbox;
+               goto deinit;
 
        err = tegra_bpmp_ping(bpmp);
        if (err < 0) {
@@ -867,17 +768,23 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
        if (err < 0)
                goto free_mrq;
 
-       err = tegra_bpmp_init_clocks(bpmp);
-       if (err < 0)
-               goto free_mrq;
+       if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+               err = tegra_bpmp_init_clocks(bpmp);
+               if (err < 0)
+                       goto free_mrq;
+       }
 
-       err = tegra_bpmp_init_resets(bpmp);
-       if (err < 0)
-               goto free_mrq;
+       if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+               err = tegra_bpmp_init_resets(bpmp);
+               if (err < 0)
+                       goto free_mrq;
+       }
 
-       err = tegra_bpmp_init_powergates(bpmp);
-       if (err < 0)
-               goto free_mrq;
+       if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+               err = tegra_bpmp_init_powergates(bpmp);
+               if (err < 0)
+                       goto free_mrq;
+       }
 
        err = tegra_bpmp_init_debugfs(bpmp);
        if (err < 0)
@@ -887,41 +794,27 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
 
 free_mrq:
        tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
-free_mbox:
-       mbox_free_channel(bpmp->mbox.channel);
-cleanup_threaded_channels:
-       for (i = 0; i < bpmp->threaded.count; i++) {
-               if (bpmp->threaded_channels[i].bpmp)
-                       tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
-       }
+deinit:
+       if (bpmp->soc->ops->deinit)
+               bpmp->soc->ops->deinit(bpmp);
 
-       tegra_bpmp_channel_cleanup(bpmp->rx_channel);
-cleanup_tx_channel:
-       tegra_bpmp_channel_cleanup(bpmp->tx_channel);
-free_rx:
-       gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
-free_tx:
-       gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
        return err;
 }
 
 static int __maybe_unused tegra_bpmp_resume(struct device *dev)
 {
        struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
-       unsigned int i;
-
-       /* reset message channels */
-       tegra_bpmp_channel_reset(bpmp->tx_channel);
-       tegra_bpmp_channel_reset(bpmp->rx_channel);
-
-       for (i = 0; i < bpmp->threaded.count; i++)
-               tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
 
-       return 0;
+       if (bpmp->soc->ops->resume)
+               return bpmp->soc->ops->resume(bpmp);
+       else
+               return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
 
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
 static const struct tegra_bpmp_soc tegra186_soc = {
        .channels = {
                .cpu_tx = {
@@ -938,11 +831,42 @@ static const struct tegra_bpmp_soc tegra186_soc = {
                        .timeout = 0,
                },
        },
+       .ops = &tegra186_bpmp_ops,
        .num_resets = 193,
 };
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+       .channels = {
+               .cpu_tx = {
+                       .offset = 0,
+                       .count = 1,
+                       .timeout = 60 * USEC_PER_SEC,
+               },
+               .thread = {
+                       .offset = 4,
+                       .count = 1,
+                       .timeout = 600 * USEC_PER_SEC,
+               },
+               .cpu_rx = {
+                       .offset = 8,
+                       .count = 1,
+                       .timeout = 0,
+               },
+       },
+       .ops = &tegra210_bpmp_ops,
+};
+#endif
 
 static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+    IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
        { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+       { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
        { }
 };
 
index 69ed1464175c058cfdeda0eb47faf470914477bc..3fbbb61012c4b8db011a95919886e8f57c7f22fc 100644 (file)
@@ -146,25 +146,8 @@ static int ti_sci_debug_show(struct seq_file *s, void *unused)
        return 0;
 }
 
-/**
- * ti_sci_debug_open() - debug file open
- * @inode:     inode pointer
- * @file:      file pointer
- *
- * Return: result of single_open
- */
-static int ti_sci_debug_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ti_sci_debug_show, inode->i_private);
-}
-
-/* log file operations */
-static const struct file_operations ti_sci_debug_fops = {
-       .open = ti_sci_debug_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
 
 /**
  * ti_sci_debugfs_create() - Create log debug file
index 8f44b9cd295a3790a71af40a3802faf65c41df7c..bd33bbf70daf061d6609c111c4aafa829b2880c6 100644 (file)
@@ -6,6 +6,7 @@ menu "Zynq MPSoC Firmware Drivers"
 
 config ZYNQMP_FIRMWARE
        bool "Enable Xilinx Zynq MPSoC firmware interface"
+       select MFD_CORE
        help
          Firmware interface driver is used by different
          drivers to communicate with the firmware for
index 9a1c72a9280f1bce66ee5544e39b8b8abf86638c..98f936125643d9194f1aba79191c71823479604e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/compiler.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/mfd/core.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/firmware/xlnx-zynqmp.h>
 #include "zynqmp-debug.h"
 
+static const struct mfd_cell firmware_devs[] = {
+       {
+               .name = "zynqmp_power_controller",
+       },
+};
+
 /**
  * zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
  * @ret_status:                PMUFW return code
@@ -186,6 +193,29 @@ static int zynqmp_pm_get_api_version(u32 *version)
        return ret;
 }
 
+/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode:     IDCODE register
+ * @version:    version register
+ *
+ * Return:      Returns the status of the operation and the idcode and version
+ *              registers in @idcode and @version.
+ */
+static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+       u32 ret_payload[PAYLOAD_ARG_CNT];
+       int ret;
+
+       if (!idcode || !version)
+               return -EINVAL;
+
+       ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+       *idcode = ret_payload[1];
+       *version = ret_payload[2];
+
+       return ret;
+}
+
 /**
  * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
  * @version:   Returned version value
@@ -469,8 +499,129 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
                                   arg1, arg2, out);
 }
 
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset:             Reset to be configured
+ * @assert_flag:       Flag stating should reset be asserted (1) or
+ *                     released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+                                 const enum zynqmp_pm_reset_action assert_flag)
+{
+       return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+                                  0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset:      Reset whose status should be returned
+ * @status:     Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+                                     u32 *status)
+{
+       u32 ret_payload[PAYLOAD_ARG_CNT];
+       int ret;
+
+       if (!status)
+               return -EINVAL;
+
+       ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+                                 0, 0, ret_payload);
+       *status = ret_payload[1];
+
+       return ret;
+}
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ *                            master has initialized its own power management
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_init_finalize(void)
+{
+       return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_suspend_mode()        - Set system suspend mode
+ * @mode:      Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+       return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node:              Node ID of the slave
+ * @capabilities:      Requested capabilities of the slave
+ * @qos:               Quality of service (not supported)
+ * @ack:               Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+                                 const u32 qos,
+                                 const enum zynqmp_pm_request_ack ack)
+{
+       return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+                                  qos, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node:      Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_release_node(const u32 node)
+{
+       return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node:              Node ID of the slave
+ * @capabilities:      Requested capabilities of the slave
+ * @qos:               Quality of service (not supported)
+ * @ack:               Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+                                    const u32 qos,
+                                    const enum zynqmp_pm_request_ack ack)
+{
+       return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+                                  qos, ack, NULL);
+}
+
 static const struct zynqmp_eemi_ops eemi_ops = {
        .get_api_version = zynqmp_pm_get_api_version,
+       .get_chipid = zynqmp_pm_get_chipid,
        .query_data = zynqmp_pm_query_data,
        .clock_enable = zynqmp_pm_clock_enable,
        .clock_disable = zynqmp_pm_clock_disable,
@@ -482,6 +633,13 @@ static const struct zynqmp_eemi_ops eemi_ops = {
        .clock_setparent = zynqmp_pm_clock_setparent,
        .clock_getparent = zynqmp_pm_clock_getparent,
        .ioctl = zynqmp_pm_ioctl,
+       .reset_assert = zynqmp_pm_reset_assert,
+       .reset_get_status = zynqmp_pm_reset_get_status,
+       .init_finalize = zynqmp_pm_init_finalize,
+       .set_suspend_mode = zynqmp_pm_set_suspend_mode,
+       .request_node = zynqmp_pm_request_node,
+       .release_node = zynqmp_pm_release_node,
+       .set_requirement = zynqmp_pm_set_requirement,
 };
 
 /**
@@ -538,11 +696,19 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
 
        zynqmp_pm_api_debugfs_init();
 
+       ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+                             ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+               return ret;
+       }
+
        return of_platform_populate(dev->of_node, NULL, NULL, dev);
 }
 
 static int zynqmp_firmware_remove(struct platform_device *pdev)
 {
+       mfd_remove_devices(&pdev->dev);
        zynqmp_pm_api_debugfs_exit();
 
        return 0;
index a028661d9e2013dd2a6e5611448438c7590fec82..92b11de1958132c28e4ffd68e1fd782a8e2e5771 100644 (file)
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
 };
 
index b8747a5c9204d6341a92ab16e51091616f4715d6..99d596dc0e8976a78fee73261526001146490a76 100644 (file)
@@ -32,6 +32,7 @@
 #include "vega10_pptable.h"
 
 #define NUM_DSPCLK_LEVELS 8
+#define VEGA10_ENGINECLOCK_HARDMAX 198000
 
 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
                enum phm_platform_caps cap)
@@ -258,7 +259,26 @@ static int init_over_drive_limits(
                struct pp_hwmgr *hwmgr,
                const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
 {
-       hwmgr->platform_descriptor.overdriveLimit.engineClock =
+       const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
+                       (const ATOM_Vega10_GFXCLK_Dependency_Table *)
+                       (((unsigned long) powerplay_table) +
+                       le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
+       bool is_acg_enabled = false;
+       ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
+
+       if (gfxclk_dep_table->ucRevId == 1) {
+               patom_record_v2 =
+                       (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
+               is_acg_enabled =
+                       (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
+       }
+
+       if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
+               !is_acg_enabled)
+               hwmgr->platform_descriptor.overdriveLimit.engineClock =
+                       VEGA10_ENGINECLOCK_HARDMAX;
+       else
+               hwmgr->platform_descriptor.overdriveLimit.engineClock =
                        le32_to_cpu(powerplay_table->ulMaxODEngineClock);
        hwmgr->platform_descriptor.overdriveLimit.memoryClock =
                        le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
index 5567ddc7760f031de674fc5016a17cbd96d3cac0..55bb7885e22880b258ab3b7a108b5b4b0ca1770f 100644 (file)
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 
        i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
        i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+
+       wa_ctx->indirect_ctx.obj = NULL;
+       wa_ctx->indirect_ctx.shadow_va = NULL;
 }
 
 static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
        list_del_init(&workload->list);
 
-       if (!workload->status) {
-               release_shadow_batch_buffer(workload);
-               release_shadow_wa_ctx(&workload->wa_ctx);
-       }
-
        if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
                /* if workload->status is not successful means HW GPU
                 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu_submission *s = &workload->vgpu->submission;
 
+       release_shadow_batch_buffer(workload);
+       release_shadow_wa_ctx(&workload->wa_ctx);
+
        if (workload->shadow_mm)
                intel_vgpu_mm_put(workload->shadow_mm);
 
index 4796f40a6d4f11a997083eaf0493725942eb3daa..eab9341a5152f90214427aa20cfc7b6414a94858 100644 (file)
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
         */
        if (!(prio & I915_PRIORITY_NEWCLIENT)) {
                prio |= I915_PRIORITY_NEWCLIENT;
+               active->sched.attr.priority = prio;
                list_move_tail(&active->sched.link,
                               i915_sched_lookup_priolist(engine, prio));
        }
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
+                       GEM_BUG_ON(last &&
+                                  need_preempt(engine, last, rq_prio(rq)));
+
                        /*
                         * Can we combine this request with the current port?
                         * It has to be the same context/ringbuffer and not
index 5beb83d1cf87769948575b74957777ce0e5b5d6e..ce1b3cc4bf6d5aea13aa213eccbeea37719f166c 100644 (file)
@@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
        np = dev_pm_opp_get_of_node(opp);
 
        if (np) {
-               of_property_read_u32(np, "qcom,level", &val);
+               of_property_read_u32(np, "opp-level", &val);
                of_node_put(np);
        }
 
index 2e4372ef17a34fd2fc3028c6f8a543477ce54247..2cfee1a4fe0b871ee4cd241ad2b874bc39bd1f94 100644 (file)
@@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        adreno_gpu->rev = config->rev;
 
        adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
-       adreno_gpu_config.irqname = "kgsl_3d0_irq";
 
        adreno_gpu_config.va_start = SZ_16M;
        adreno_gpu_config.va_end = 0xffffffff;
index fd75870eb17f7c7d5e8f8446f526b19715042b03..6aefcd6db46b4d36295f66bae809d99acc2ada85 100644 (file)
@@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
                        &pdpu->pipe_qos_cfg);
 }
 
-static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
-{
-       struct dpu_plane *pdpu = to_dpu_plane(plane);
-       struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
-
-       if (!pdpu->is_rt_pipe)
-               return;
-
-       pm_runtime_get_sync(&dpu_kms->pdev->dev);
-       _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
-       pm_runtime_put_sync(&dpu_kms->pdev->dev);
-}
-
 /**
  * _dpu_plane_set_ot_limit - set OT limit for the given plane
  * @plane:             Pointer to drm plane
@@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
 }
 
 #ifdef CONFIG_DEBUG_FS
+static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+       if (!pdpu->is_rt_pipe)
+               return;
+
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
 static ssize_t _dpu_plane_danger_read(struct file *file,
                        char __user *buff, size_t count, loff_t *ppos)
 {
index 9cd6a96c6bf2a522d413681f20d918753921f554..927e5d86f7c17a77eca29c8836d87efeefd984b7 100644 (file)
@@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma);
 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
-               struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+               struct msm_gem_vma *vma, int prot,
+               struct sg_table *sgt, int npages);
 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma);
 
@@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt);
 
+__printf(2, 3)
 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
 
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
@@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
 int msm_debugfs_late_init(struct drm_device *dev);
 int msm_rd_debugfs_init(struct drm_minor *minor);
 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
+__printf(3, 4)
 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
                const char *fmt, ...);
 int msm_perf_debugfs_init(struct drm_minor *minor);
 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
 #else
 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+__printf(3, 4)
 static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
                const char *fmt, ...) {}
 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
index 51a95da694d8d498dee29bd91ddb880b3478c356..c8886d3071fa35756682d9a27662b61074c27713 100644 (file)
@@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
        struct page **pages;
+       int prot = IOMMU_READ;
+
+       if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
+               prot |= IOMMU_WRITE;
 
        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
@@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
-       return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
-                       obj->size >> PAGE_SHIFT);
+       return msm_gem_map_vma(aspace, vma, prot,
+                       msm_obj->sgt, obj->size >> PAGE_SHIFT);
 }
 
 /* get iova and pin it. Should have a matching put */
index 557360788084eb3db21e0c964722a0ce1923f92b..49c04829cf34412ae0b07f6358f596e3b11eae58 100644 (file)
@@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 
 int
 msm_gem_map_vma(struct msm_gem_address_space *aspace,
-               struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+               struct msm_gem_vma *vma, int prot,
+               struct sg_table *sgt, int npages)
 {
        unsigned size = npages << PAGE_SHIFT;
        int ret = 0;
@@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
 
        if (aspace->mmu)
                ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
-                               size, IOMMU_READ | IOMMU_WRITE);
+                               size, prot);
 
        if (ret)
                vma->mapped = false;
index 5f3eff3043554491eca50a559599b0e5293e1357..10babd18e28605b1c76faf55c51748c5e85504f1 100644 (file)
@@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        }
 
        /* Get Interrupt: */
-       gpu->irq = platform_get_irq_byname(pdev, config->irqname);
+       gpu->irq = platform_get_irq(pdev, 0);
        if (gpu->irq < 0) {
                ret = gpu->irq;
                DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
index efb49bb64191732a0a8ee683e1f9790389295fa9..ca17086f72c923352328674a9aa50da5c71a6ff1 100644 (file)
@@ -31,7 +31,6 @@ struct msm_gpu_state;
 
 struct msm_gpu_config {
        const char *ioname;
-       const char *irqname;
        uint64_t va_start;
        uint64_t va_end;
        unsigned int nr_rings;
@@ -63,7 +62,7 @@ struct msm_gpu_funcs {
        struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
        void (*recover)(struct msm_gpu *gpu);
        void (*destroy)(struct msm_gpu *gpu);
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
        /* show GPU status in debugfs: */
        void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
                        struct drm_printer *p);
index 90e9d0a48dc0409feca3c8feab838fa83aeed946..d21172933d92804f7847b9a1b4d1ec14001c8570 100644 (file)
@@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
                char *fptr = &fifo->buf[fifo->head];
                int n;
 
-               wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+               wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+               if (!rd->open)
+                       return;
 
                /* Note that smp_load_acquire() is not strictly required
                 * as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@ static int rd_open(struct inode *inode, struct file *file)
 static int rd_release(struct inode *inode, struct file *file)
 {
        struct msm_rd_state *rd = inode->i_private;
+
        rd->open = false;
+       wake_up_all(&rd->fifo_event);
+
        return 0;
 }
 
index 061d2e0d9011ee88991b3f0fb1b4e2dd54925bee..416da53767018ca9acbcef98689c173af0732fca 100644 (file)
@@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
        val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
        val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
        writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+
+       clk_disable_unprepare(hdmi->tmds_clk);
 }
 
 static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
 
        DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
 
+       clk_prepare_enable(hdmi->tmds_clk);
+
        sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
        val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
        val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
index f41d5fe51abe3b812b600c7fa79d789f350ef3ce..9993b692598fb84d1700e26ef7f97856ff842955 100644 (file)
@@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
 {
        struct hid_collection *collection;
        unsigned usage;
+       int collection_index;
 
        usage = parser->local.usage[0];
 
@@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
        parser->collection_stack[parser->collection_stack_ptr++] =
                parser->device->maxcollection;
 
-       collection = parser->device->collection +
-               parser->device->maxcollection++;
+       collection_index = parser->device->maxcollection++;
+       collection = parser->device->collection + collection_index;
        collection->type = type;
        collection->usage = usage;
        collection->level = parser->collection_stack_ptr - 1;
-       collection->parent = parser->active_collection;
-       parser->active_collection = collection;
+       collection->parent_idx = (collection->level == 0) ? -1 :
+               parser->collection_stack[collection->level - 1];
 
        if (type == HID_COLLECTION_APPLICATION)
                parser->device->maxapplication++;
@@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser)
                return -EINVAL;
        }
        parser->collection_stack_ptr--;
-       if (parser->active_collection)
-               parser->active_collection = parser->active_collection->parent;
        return 0;
 }
 
@@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid,
                usage = &field->usage[i];
 
                collection = &hid->collection[usage->collection_index];
-               while (collection && collection != multiplier_collection)
-                       collection = collection->parent;
+               while (collection->parent_idx != -1 &&
+                      collection != multiplier_collection)
+                       collection = &hid->collection[collection->parent_idx];
 
-               if (collection || multiplier_collection == NULL)
+               if (collection->parent_idx != -1 ||
+                   multiplier_collection == NULL)
                        usage->resolution_multiplier = effective_multiplier;
 
        }
@@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid,
         * applicable fields later.
         */
        multiplier_collection = &hid->collection[multiplier->usage->collection_index];
-       while (multiplier_collection &&
+       while (multiplier_collection->parent_idx != -1 &&
               multiplier_collection->type != HID_COLLECTION_LOGICAL)
-               multiplier_collection = multiplier_collection->parent;
+               multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
 
        effective_multiplier = hid_calculate_multiplier(hid, multiplier);
 
index 518fa76414f560f8e76d88a2079310cc8b8c4936..24f846d67478cec0d71501569cef2ea5f706cbab 100644 (file)
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
 
+#define I2C_VENDOR_ID_GOODIX           0x27c6
+#define I2C_DEVICE_ID_GOODIX_01F0      0x01f0
+
 #define USB_VENDOR_ID_GOODTOUCH                0x1aad
 #define USB_DEVICE_ID_GOODTOUCH_000f   0x000f
 
index 8555ce7e737b37a78160d930a9ba12ed311e001b..c5edfa966343dc098c63af7d4513ffeed02348de 100644 (file)
@@ -179,6 +179,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
        { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
                I2C_HID_QUIRK_NO_RUNTIME_PM },
+       { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
index ce0ba20627236dde727d2f6fd486a1b8d281d327..bea4c9850247bcdbdc9a4668719e2f6fa8ad4fe2 100644 (file)
@@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
 int vmbus_disconnect_ring(struct vmbus_channel *channel)
 {
        struct vmbus_channel *cur_channel, *tmp;
-       unsigned long flags;
-       LIST_HEAD(list);
        int ret;
 
        if (channel->primary_channel != NULL)
                return -EINVAL;
 
-       /* Snapshot the list of subchannels */
-       spin_lock_irqsave(&channel->lock, flags);
-       list_splice_init(&channel->sc_list, &list);
-       spin_unlock_irqrestore(&channel->lock, flags);
-
-       list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
+       list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
                if (cur_channel->rescind)
                        wait_for_completion(&cur_channel->rescind_event);
 
index 5301fef16c31b740fab409ced2f3e7ef6c5bbe23..7c6349a50ef173421cdafea3b289ae9868da711e 100644 (file)
@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
                        pfn_cnt -= pgs_ol;
                        /*
                         * Check if the corresponding memory block is already
-                        * online by checking its last previously backed page.
-                        * In case it is we need to bring rest (which was not
-                        * backed previously) online too.
+                        * online. It is possible to observe struct pages still
+                        * being uninitialized here so check section instead.
+                        * In case the section is online we need to bring the
+                        * rest of pfns (which were not backed previously)
+                        * online too.
                         */
                        if (start_pfn > has->start_pfn &&
-                           !PageReserved(pfn_to_page(start_pfn - 1)))
+                           online_section_nr(pfn_to_section_nr(start_pfn)))
                                hv_bring_pgs_online(has, start_pfn, pgs_ol);
 
                }
index 64d0c85d51611199f9bd98e97efe0b54d992df5f..1f1a55e077338cabf233bba7630d805f84fd96f6 100644 (file)
@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
 }
 
 /* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
-                                struct hv_ring_buffer_debug_info *debug_info)
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+                               struct hv_ring_buffer_debug_info *debug_info)
 {
        u32 bytes_avail_towrite;
        u32 bytes_avail_toread;
 
-       if (ring_info->ring_buffer) {
-               hv_get_ringbuffer_availbytes(ring_info,
-                                       &bytes_avail_toread,
-                                       &bytes_avail_towrite);
-
-               debug_info->bytes_avail_toread = bytes_avail_toread;
-               debug_info->bytes_avail_towrite = bytes_avail_towrite;
-               debug_info->current_read_index =
-                       ring_info->ring_buffer->read_index;
-               debug_info->current_write_index =
-                       ring_info->ring_buffer->write_index;
-               debug_info->current_interrupt_mask =
-                       ring_info->ring_buffer->interrupt_mask;
-       }
+       if (!ring_info->ring_buffer)
+               return -EINVAL;
+
+       hv_get_ringbuffer_availbytes(ring_info,
+                                    &bytes_avail_toread,
+                                    &bytes_avail_towrite);
+       debug_info->bytes_avail_toread = bytes_avail_toread;
+       debug_info->bytes_avail_towrite = bytes_avail_towrite;
+       debug_info->current_read_index = ring_info->ring_buffer->read_index;
+       debug_info->current_write_index = ring_info->ring_buffer->write_index;
+       debug_info->current_interrupt_mask
+               = ring_info->ring_buffer->interrupt_mask;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
 
index d0ff65675292bd8b9ac7c6fa99d708e94484a62b..403fee01572c5c93cefadebf0895a7302f777325 100644 (file)
@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info outbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+                                         &outbound);
+       if (ret < 0)
+               return ret;
+
        return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(out_intr_mask);
@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info outbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+                                         &outbound);
+       if (ret < 0)
+               return ret;
        return sprintf(buf, "%d\n", outbound.current_read_index);
 }
 static DEVICE_ATTR_RO(out_read_index);
@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info outbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+                                         &outbound);
+       if (ret < 0)
+               return ret;
        return sprintf(buf, "%d\n", outbound.current_write_index);
 }
 static DEVICE_ATTR_RO(out_write_index);
@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info outbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+                                         &outbound);
+       if (ret < 0)
+               return ret;
        return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info outbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+                                         &outbound);
+       if (ret < 0)
+               return ret;
        return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info inbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       if (ret < 0)
+               return ret;
+
        return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(in_intr_mask);
@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info inbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       if (ret < 0)
+               return ret;
+
        return sprintf(buf, "%d\n", inbound.current_read_index);
 }
 static DEVICE_ATTR_RO(in_read_index);
@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info inbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       if (ret < 0)
+               return ret;
+
        return sprintf(buf, "%d\n", inbound.current_write_index);
 }
 static DEVICE_ATTR_RO(in_write_index);
@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info inbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       if (ret < 0)
+               return ret;
+
        return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct hv_ring_buffer_debug_info inbound;
+       int ret;
 
        if (!hv_dev->channel)
                return -ENODEV;
-       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
-               return -EINVAL;
-       hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+       ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+       if (ret < 0)
+               return ret;
+
        return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(in_write_bytes_avail);
index 4c8c7a620d08dae851de513eebe2710dee3ca89e..a5dc13576394ffa8d7be2c2ae9298211ea216f47 100644 (file)
@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
                drive->proc = proc_mkdir(drive->name, parent);
                if (drive->proc) {
                        ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
-                       proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
+                       proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
                                        drive->proc, &ide_settings_proc_fops,
                                        drive);
                }
index cfc8b94527b97cda3f4b20782af0fbc2b6260f2b..aa4e431cbcd3543ebd617eb4c7a880868eb3f723 100644 (file)
@@ -252,6 +252,8 @@ static const struct xpad_device {
        { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
        { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
        { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+       { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+       { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
        { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
        { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
        XPAD_XBOXONE_VENDOR(0x0e6f),            /* 0x0e6f X-Box One controllers */
        XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
        XPAD_XBOXONE_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x1038),            /* SteelSeries Controllers */
        XPAD_XBOX360_VENDOR(0x11c9),            /* Nacon GC100XF */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
index 8ec483e8688be194078d07f3b47fa40d7f75e9ac..26ec603fe2208522bf562954e452d69e2500527a 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
+#include <linux/overflow.h>
 #include <linux/input/mt.h>
 #include "../input-compat.h"
 
@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
 static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
                                   const struct input_absinfo *abs)
 {
-       int min, max;
+       int min, max, range;
 
        min = abs->minimum;
        max = abs->maximum;
@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
                return -EINVAL;
        }
 
-       if (abs->flat > max - min) {
+       if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
                printk(KERN_DEBUG
                       "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
                       UINPUT_NAME, code, abs->flat, min, max);
index b36084710f696626b3f9a595f22c357169a30fdf..bae08226e3d95de0e9d2001090b09a4bae29a861 100644 (file)
@@ -195,6 +195,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
+       priv->dev = &pdev->dev;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->base)) {
@@ -248,7 +250,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       priv->dev = &pdev->dev;
        device_init_wakeup(priv->dev, 1);
        platform_set_drvdata(pdev, priv);
 
index af6027cc7bbfae2687c1524ee6265c53f6feb39b..068dbbc610fce4d8c9f79c7a9466cd39614ed625 100644 (file)
@@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06
 
 config TOUCHSCREEN_RASPBERRYPI_FW
        tristate "Raspberry Pi's firmware base touch screen support"
-       depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST
+       depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
        help
          Say Y here if you have the official Raspberry Pi 7 inch screen on
          your system.
index d8947b28db2d832523e91db80f7b7a57aa13064e..f04a6df65eb856db877859a06064aa8a182446fa 100644 (file)
@@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
         * If we have reason to believe the IOMMU driver missed the initial
         * probe for dev, replay it to get things in order.
         */
-       if (dev->bus && !device_iommu_mapped(dev))
+       if (!err && dev->bus && !device_iommu_mapped(dev))
                err = iommu_probe_device(dev);
 
        /* Ignore all other errors apart from EPROBE_DEFER */
index db20e992a40f2b617c1c4d5858e4b1e0555ffbce..7f2a45445b00e04aecb29b94b694cb4d51c9593a 100644 (file)
@@ -2399,13 +2399,14 @@ static void its_free_device(struct its_device *its_dev)
        kfree(its_dev);
 }
 
-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
 {
        int idx;
 
-       idx = find_first_zero_bit(dev->event_map.lpi_map,
-                                 dev->event_map.nr_lpis);
-       if (idx == dev->event_map.nr_lpis)
+       idx = bitmap_find_free_region(dev->event_map.lpi_map,
+                                     dev->event_map.nr_lpis,
+                                     get_count_order(nvecs));
+       if (idx < 0)
                return -ENOSPC;
 
        *hwirq = dev->event_map.lpi_base + idx;
@@ -2501,21 +2502,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        int err;
        int i;
 
-       for (i = 0; i < nr_irqs; i++) {
-               err = its_alloc_device_irq(its_dev, &hwirq);
-               if (err)
-                       return err;
+       err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
+       if (err)
+               return err;
 
-               err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+       for (i = 0; i < nr_irqs; i++) {
+               err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
                if (err)
                        return err;
 
                irq_domain_set_hwirq_and_chip(domain, virq + i,
-                                             hwirq, &its_irq_chip, its_dev);
+                                             hwirq + i, &its_irq_chip, its_dev);
                irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
                pr_debug("ID:%d pID:%d vID:%d\n",
-                        (int)(hwirq - its_dev->event_map.lpi_base),
-                        (int) hwirq, virq + i);
+                        (int)(hwirq + i - its_dev->event_map.lpi_base),
+                        (int)(hwirq + i), virq + i);
        }
 
        return 0;
index ad70e7c416e3014ca95d76e7e058cdec986c17ab..fbfa7ff6deb1644aa1fbec858a0567ada0d7cb44 100644 (file)
@@ -24,7 +24,7 @@ struct mbi_range {
        unsigned long           *bm;
 };
 
-static struct mutex            mbi_lock;
+static DEFINE_MUTEX(mbi_lock);
 static phys_addr_t             mbi_phys_base;
 static struct mbi_range                *mbi_ranges;
 static unsigned int            mbi_range_nr;
index e9256dee1a45d93a799fa31dab24b892ae7973e7..8b81271c823c69975d17bd74ea520cab11eb2299 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
@@ -16,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/of_irq.h>
 #include <linux/irqchip/irq-madera.h>
 #include <linux/mfd/madera/core.h>
index 6edfd4bfa169ecbf488c897154d5e7000a9c5581..a93296b9b45debecfb723e780f3f381b15660d2e 100644 (file)
@@ -822,6 +822,7 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
 static const struct irq_domain_ops stm32_exti_h_domain_ops = {
        .alloc  = stm32_exti_h_domain_alloc,
        .free   = irq_domain_free_irqs_common,
+       .xlate = irq_domain_xlate_twocell,
 };
 
 static int
index 0ff22159a0ca96d6b9c831bdd8a1e8ba724b90a9..47d4e0d30bf08b3c5179c9196100b129f8b0437c 100644 (file)
@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
         * capi:cipher_api_spec-iv:ivopts
         */
        tmp = &cipher_in[strlen("capi:")];
-       cipher_api = strsep(&tmp, "-");
-       *ivmode = strsep(&tmp, ":");
-       *ivopts = tmp;
+
+       /* Separate IV options if present, it can contain another '-' in hash name */
+       *ivopts = strrchr(tmp, ':');
+       if (*ivopts) {
+               **ivopts = '\0';
+               (*ivopts)++;
+       }
+       /* Parse IV mode */
+       *ivmode = strrchr(tmp, '-');
+       if (*ivmode) {
+               **ivmode = '\0';
+               (*ivmode)++;
+       }
+       /* The rest is crypto API spec */
+       cipher_api = tmp;
 
        if (*ivmode && !strcmp(*ivmode, "lmk"))
                cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
                goto bad_mem;
 
        chainmode = strsep(&tmp, "-");
-       *ivopts = strsep(&tmp, "-");
-       *ivmode = strsep(&*ivopts, ":");
-
-       if (tmp)
-               DMWARN("Ignoring unexpected additional cipher options");
+       *ivmode = strsep(&tmp, ":");
+       *ivopts = tmp;
 
        /*
         * For compatibility with the original dm-crypt mapping format, if
index 20b0776e39ef3307aa5c418016afff4758850a68..ed3caceaed07c07c33e16b9038f0c3bffd7616d5 100644 (file)
@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
        return r;
 }
 
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
 {
        int r;
        uint32_t ref_count;
@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
        down_read(&pmd->root_lock);
        r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
        if (!r)
-               *result = (ref_count != 0);
+               *result = (ref_count > 1);
        up_read(&pmd->root_lock);
 
        return r;
index 35e954ea20a9b5923ffc5b4d1a7a2cb5cdb3c314..f6be0d733c20267f569b72ab14314d0565425e80 100644 (file)
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
 
 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
 
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
 
 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
index dadd9696340c00d13a47d38822fe886aaf5713d8..ca8af21bf644cce7d58ebbc6668eeaac8583f6f8 100644 (file)
@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
         * passdown we have to check that these blocks are now unused.
         */
        int r = 0;
-       bool used = true;
+       bool shared = true;
        struct thin_c *tc = m->tc;
        struct pool *pool = tc->pool;
        dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
        while (b != end) {
                /* find start of unmapped run */
                for (; b < end; b++) {
-                       r = dm_pool_block_is_used(pool->pmd, b, &used);
+                       r = dm_pool_block_is_shared(pool->pmd, b, &shared);
                        if (r)
                                goto out;
 
-                       if (!used)
+                       if (!shared)
                                break;
                }
 
@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
 
                /* find end of run */
                for (e = b + 1; e != end; e++) {
-                       r = dm_pool_block_is_used(pool->pmd, e, &used);
+                       r = dm_pool_block_is_shared(pool->pmd, e, &shared);
                        if (r)
                                goto out;
 
-                       if (used)
+                       if (shared)
                                break;
                }
 
index d67c95ef8d7e9487c21a994977e182c0976e7264..2b53c3841b530b591c0c8ed688c0ea2b94b5f273 100644 (file)
@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
 
        __bio_clone_fast(clone, bio);
 
-       if (unlikely(bio_integrity(bio) != NULL)) {
+       if (bio_integrity(bio)) {
                int r;
 
                if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
@@ -1336,11 +1336,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
                        return r;
        }
 
-       bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
-       clone->bi_iter.bi_size = to_bytes(len);
-
-       if (unlikely(bio_integrity(bio) != NULL))
-               bio_integrity_trim(clone);
+       bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
 
        return 0;
 }
@@ -1588,6 +1584,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
        ci->sector = bio->bi_iter.bi_sector;
 }
 
+#define __dm_part_stat_sub(part, field, subnd) \
+       (part_stat_get(part, field) -= (subnd))
+
 /*
  * Entry point to split a bio into clones and submit them to the targets.
  */
@@ -1642,7 +1641,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                                struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
                                                          GFP_NOIO, &md->queue->bio_split);
                                ci.io->orig_bio = b;
+
+                               /*
+                                * Adjust IO stats for each split, otherwise upon queue
+                                * reentry there will be redundant IO accounting.
+                                * NOTE: this is a stop-gap fix, a proper fix involves
+                                * significant refactoring of DM core's bio splitting
+                                * (by eliminating DM's splitting and just using bio_split)
+                                */
+                               part_stat_lock();
+                               __dm_part_stat_sub(&dm_disk(md)->part0,
+                                                  sectors[op_stat_group(bio_op(bio))], ci.sector_count);
+                               part_stat_unlock();
+
                                bio_chain(b, bio);
+                               trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
                                ret = generic_make_request(bio);
                                break;
                        }
@@ -1713,6 +1726,15 @@ static blk_qc_t __process_bio(struct mapped_device *md,
        return ret;
 }
 
+static blk_qc_t dm_process_bio(struct mapped_device *md,
+                              struct dm_table *map, struct bio *bio)
+{
+       if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
+               return __process_bio(md, map, bio);
+       else
+               return __split_and_process_bio(md, map, bio);
+}
+
 static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct mapped_device *md = q->queuedata;
@@ -1733,10 +1755,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
                return ret;
        }
 
-       if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
-               ret = __process_bio(md, map, bio);
-       else
-               ret = __split_and_process_bio(md, map, bio);
+       ret = dm_process_bio(md, map, bio);
 
        dm_put_live_table(md, srcu_idx);
        return ret;
@@ -2415,9 +2434,9 @@ static void dm_wq_work(struct work_struct *work)
                        break;
 
                if (dm_request_based(md))
-                       generic_make_request(c);
+                       (void) generic_make_request(c);
                else
-                       __split_and_process_bio(md, map, c);
+                       (void) dm_process_bio(md, map, c);
        }
 
        dm_put_live_table(md, srcu_idx);
index 12980a4ad46080b3e510ccd237a421c4bde8325c..ee6fb6af655e294e33c0a0a8c97b811a2295b01a 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_MFD_88PM805)     += 88pm805.o 88pm80x.o
 obj-$(CONFIG_MFD_ACT8945A)     += act8945a.o
 obj-$(CONFIG_MFD_SM501)                += sm501.o
 obj-$(CONFIG_MFD_ASIC3)                += asic3.o tmio_core.o
+obj-$(CONFIG_ARCH_BCM2835)     += bcm2835-pm.o
 obj-$(CONFIG_MFD_BCM590XX)     += bcm590xx.o
 obj-$(CONFIG_MFD_BD9571MWV)    += bd9571mwv.o
 cros_ec_core-objs              := cros_ec.o
diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c
new file mode 100644 (file)
index 0000000..42fe67f
--- /dev/null
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PM MFD driver for Broadcom BCM2835
+ *
+ * This driver binds to the PM block and creates the MFD device for
+ * the WDT and power drivers.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+static const struct mfd_cell bcm2835_pm_devs[] = {
+       { .name = "bcm2835-wdt" },
+};
+
+static const struct mfd_cell bcm2835_power_devs[] = {
+       { .name = "bcm2835-power" },
+};
+
+static int bcm2835_pm_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct device *dev = &pdev->dev;
+       struct bcm2835_pm *pm;
+       int ret;
+
+       pm = devm_kzalloc(dev, sizeof(*pm), GFP_KERNEL);
+       if (!pm)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, pm);
+
+       pm->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pm->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pm->base))
+               return PTR_ERR(pm->base);
+
+       ret = devm_mfd_add_devices(dev, -1,
+                                  bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs),
+                                  NULL, 0, NULL);
+       if (ret)
+               return ret;
+
+       /* We'll use the presence of the AXI ASB regs in the
+        * bcm2835-pm binding as the key for whether we can reference
+        * the full PM register range and support power domains.
+        */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (res) {
+               pm->asb = devm_ioremap_resource(dev, res);
+               if (IS_ERR(pm->asb))
+                       return PTR_ERR(pm->asb);
+
+               ret = devm_mfd_add_devices(dev, -1,
+                                          bcm2835_power_devs,
+                                          ARRAY_SIZE(bcm2835_power_devs),
+                                          NULL, 0, NULL);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id bcm2835_pm_of_match[] = {
+       { .compatible = "brcm,bcm2835-pm-wdt", },
+       { .compatible = "brcm,bcm2835-pm", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match);
+
+static struct platform_driver bcm2835_pm_driver = {
+       .probe          = bcm2835_pm_probe,
+       .driver = {
+               .name = "bcm2835-pm",
+               .of_match_table = bcm2835_pm_of_match,
+       },
+};
+module_platform_driver(bcm2835_pm_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM MFD");
+MODULE_LICENSE("GPL");
index b8aaa684c397b0b8be8fe0c5ae00a37b087b6997..2ed23c99f59fdbebab6cc49b032e04fa5f0c020c 100644 (file)
@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
  *
  * Return:
  *     0 - Success
+ *     Non-zero - Failure
  */
 static int ibmvmc_open(struct inode *inode, struct file *file)
 {
        struct ibmvmc_file_session *session;
-       int rc = 0;
 
        pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
                 (unsigned long)inode, (unsigned long)file,
                 ibmvmc.state);
 
        session = kzalloc(sizeof(*session), GFP_KERNEL);
+       if (!session)
+               return -ENOMEM;
+
        session->file = file;
        file->private_data = session;
 
-       return rc;
+       return 0;
 }
 
 /**
index 78c26cebf5d40c9e547ffc0d7cd3e0dbbedae62c..8f7616557c97acd18d7808a71012a643affa68a1 100644 (file)
@@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
                dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
 
                if (dma_setup_res->status) {
-                       dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
-                                dma_setup_res->status,
-                                mei_hbm_status_str(dma_setup_res->status));
+                       u8 status = dma_setup_res->status;
+
+                       if (status == MEI_HBMS_NOT_ALLOWED) {
+                               dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
+                       } else {
+                               dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
+                                        status,
+                                        mei_hbm_status_str(status));
+                       }
                        dev->hbm_f_dr_supported = 0;
                        mei_dmam_ring_free(dev);
                }
index e4b10b2d1a0838af03f16f29922b86c653cf99ba..23739a60517f8efc7fcf57136c6a759b9ca2d401 100644 (file)
 #define MEI_DEV_ID_BXT_M      0x1A9A  /* Broxton M */
 #define MEI_DEV_ID_APL_I      0x5A9A  /* Apollo Lake I */
 
+#define MEI_DEV_ID_DNV_IE     0x19E5  /* Denverton IE */
+
 #define MEI_DEV_ID_GLK        0x319A  /* Gemini Lake */
 
 #define MEI_DEV_ID_KBP        0xA2BA  /* Kaby Point */
index 73ace2d59dea9787c010d07769ed20b9881bcb76..e89497f858ae352bc773838a823c2a2113f7ccc5 100644 (file)
@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
+
        {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
index 595ac065b4016d762b7fd69aaf7932dd6152cac6..95ff7c5a1dfb62c1363006b5a0919562572cfbce 100644 (file)
@@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
        struct resource r;
 
        if (acpi_dev_resource_io(res, &r)) {
+#ifdef CONFIG_HAS_IOPORT_MAP
                base = ioport_map(r.start, resource_size(&r));
                return AE_OK;
+#else
+               return AE_ERROR;
+#endif
        } else if (acpi_dev_resource_memory(res, &r)) {
                base = ioremap(r.start, resource_size(&r));
                return AE_OK;
index e26b8145efb32411bf4feb2659163a8b7ae59350..a44ec8bb54181d048f5cdd7dc64a8c3aed91e208 100644 (file)
@@ -116,7 +116,7 @@ config MMC_RICOH_MMC
 
 config MMC_SDHCI_ACPI
        tristate "SDHCI support for ACPI enumerated SDHCI controllers"
-       depends on MMC_SDHCI && ACPI
+       depends on MMC_SDHCI && ACPI && PCI
        select IOSF_MBI if X86
        help
          This selects support for ACPI enumerated SDHCI controllers,
@@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP
        tristate "TI SDHCI Controller Support"
        depends on MMC_SDHCI_PLTFM && OF
        select THERMAL
-       select TI_SOC_THERMAL
+       imply TI_SOC_THERMAL
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in TI's DRA7 SOCs. The controller supports
index ed8f2254b66a842adf97213628d0ba8dcb6a3dd2..aa38b1a8017e8c0a522adf49efacd92593f8f22c 100644 (file)
@@ -1,11 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2018 Mellanox Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
  */
 
 #include <linux/bitfield.h>
index c2690c1a50ffc1e2a086916d3aa4b870d2c80c07..f19ec60bcbdc246a1c2c995a8c16265048696d22 100644 (file)
@@ -179,6 +179,8 @@ struct meson_host {
        struct sd_emmc_desc *descs;
        dma_addr_t descs_dma_addr;
 
+       int irq;
+
        bool vqmmc_enabled;
 };
 
@@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct meson_host *host = mmc_priv(mmc);
+       int adj = 0;
+
+       /* enable signal resampling w/o delay */
+       adj = ADJUST_ADJ_EN;
+       writel(adj, host->regs + host->data->adjust);
 
        return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
 }
@@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 
+               /* disable signal resampling */
+               writel(0, host->regs + host->data->adjust);
+
                /* Reset rx phase */
                clk_set_phase(host->rx_clk, 0);
 
@@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
 
 static void meson_mmc_cfg_init(struct meson_host *host)
 {
-       u32 cfg = 0, adj = 0;
+       u32 cfg = 0;
 
        cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
                          ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
@@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host)
        cfg |= CFG_ERR_ABORT;
 
        writel(cfg, host->regs + SD_EMMC_CFG);
-
-       /* enable signal resampling w/o delay */
-       adj = ADJUST_ADJ_EN;
-       writel(adj, host->regs + host->data->adjust);
 }
 
 static int meson_mmc_card_busy(struct mmc_host *mmc)
@@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
        struct resource *res;
        struct meson_host *host;
        struct mmc_host *mmc;
-       int ret, irq;
+       int ret;
 
        mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
        if (!mmc)
@@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
                goto free_host;
        }
 
-       irq = platform_get_irq(pdev, 0);
-       if (irq <= 0) {
+       host->irq = platform_get_irq(pdev, 0);
+       if (host->irq <= 0) {
                dev_err(&pdev->dev, "failed to get interrupt resource.\n");
                ret = -EINVAL;
                goto free_host;
@@ -1331,9 +1337,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
        writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
               host->regs + SD_EMMC_IRQ_EN);
 
-       ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
-                                       meson_mmc_irq_thread, IRQF_SHARED,
-                                       NULL, host);
+       ret = request_threaded_irq(host->irq, meson_mmc_irq,
+                       meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
        if (ret)
                goto err_init_clk;
 
@@ -1351,7 +1356,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
        if (host->bounce_buf == NULL) {
                dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
                ret = -ENOMEM;
-               goto err_init_clk;
+               goto err_free_irq;
        }
 
        host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1370,6 +1375,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
 err_bounce_buf:
        dma_free_coherent(host->dev, host->bounce_buf_size,
                          host->bounce_buf, host->bounce_dma_addr);
+err_free_irq:
+       free_irq(host->irq, host);
 err_init_clk:
        clk_disable_unprepare(host->mmc_clk);
 err_core_clk:
@@ -1387,6 +1394,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
 
        /* disable interrupts */
        writel(0, host->regs + SD_EMMC_IRQ_EN);
+       free_irq(host->irq, host);
 
        dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
                          host->descs, host->descs_dma_addr);
index 0db99057c44f7185ae0f6642d47b9c833faa77cb..9d12c06c7fd683b97ae8e8c241ec9a53cdbc033d 100644 (file)
@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
 
        iproc_host->data = iproc_data;
 
-       mmc_of_parse(host->mmc);
+       ret = mmc_of_parse(host->mmc);
+       if (ret)
+               goto err;
+
        sdhci_get_property(pdev);
 
        host->mmc->caps |= iproc_host->data->mmc_caps;
index 3b3f88ffab53cded04a2c1586727bf82472f43ad..c05e4d50d43d74a32ef38c656fb85bbb8b06e962 100644 (file)
@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
 struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
 {
        struct can_priv *priv = netdev_priv(dev);
-       struct sk_buff *skb = priv->echo_skb[idx];
-       struct canfd_frame *cf;
 
        if (idx >= priv->echo_skb_max) {
                netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
                return NULL;
        }
 
-       if (!skb) {
-               netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
-                          __func__, idx);
-               return NULL;
-       }
+       if (priv->echo_skb[idx]) {
+               /* Using "struct canfd_frame::len" for the frame
+                * length is supported on both CAN and CANFD frames.
+                */
+               struct sk_buff *skb = priv->echo_skb[idx];
+               struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+               u8 len = cf->len;
 
-       /* Using "struct canfd_frame::len" for the frame
-        * length is supported on both CAN and CANFD frames.
-        */
-       cf = (struct canfd_frame *)skb->data;
-       *len_ptr = cf->len;
-       priv->echo_skb[idx] = NULL;
+               *len_ptr = len;
+               priv->echo_skb[idx] = NULL;
 
-       return skb;
+               return skb;
+       }
+
+       return NULL;
 }
 
 /*
index 0f36eafe3ac16d5ba55432608acfd12eee0dd8e0..1c66fb2ad76bebbfbcdce793d0685fef5a627773 100644 (file)
@@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev)
                }
        } else {
                /* clear and invalidate unused mailboxes first */
-               for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) {
+               for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
                        mb = flexcan_get_mb(priv, i);
                        priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
                                    &mb->can_ctrl);
@@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
        gpr_np = of_find_node_by_phandle(phandle);
        if (!gpr_np) {
                dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
-               return PTR_ERR(gpr_np);
+               return -ENODEV;
        }
 
        priv = netdev_priv(dev);
index 02921d877c08a7a93556db45a116d78ed2c485cd..aa1d1f5339d2a4b78ee2092d0c9683e1262e2581 100644 (file)
@@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
 
                phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
                                     priv->phy_iface);
-               if (IS_ERR(phydev))
+               if (IS_ERR(phydev)) {
                        netdev_err(dev, "Could not attach to PHY\n");
+                       phydev = NULL;
+               }
 
        } else {
                int ret;
index 809a155eb193c7021734392956373c89323fd5aa..f6d244c663fd5d8f7a9524455c96798c9ac30663 100644 (file)
@@ -9,8 +9,9 @@ config FSL_DPAA2_ETH
 
 config FSL_DPAA2_PTP_CLOCK
        tristate "Freescale DPAA2 PTP Clock"
-       depends on FSL_DPAA2_ETH && POSIX_TIMERS
-       select PTP_1588_CLOCK
+       depends on FSL_DPAA2_ETH
+       imply PTP_1588_CLOCK
+       default y
        help
          This driver adds support for using the DPAA2 1588 timer module
          as a PTP clock.
index 1ca9a18139ec5b3b63c6a655a4bc19a3d4bb8428..c500ea77aaa05014588d7e7b5089539e545e618b 100644 (file)
@@ -1902,7 +1902,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 
                /* Register the new context */
                channel->dpio = dpaa2_io_service_select(i);
-               err = dpaa2_io_service_register(channel->dpio, nctx);
+               err = dpaa2_io_service_register(channel->dpio, nctx, dev);
                if (err) {
                        dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
                        /* If no affine DPIO for this core, there's probably
@@ -1942,7 +1942,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
        return 0;
 
 err_set_cdan:
-       dpaa2_io_service_deregister(channel->dpio, nctx);
+       dpaa2_io_service_deregister(channel->dpio, nctx, dev);
 err_service_reg:
        free_channel(priv, channel);
 err_alloc_ch:
@@ -1962,13 +1962,14 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 
 static void free_dpio(struct dpaa2_eth_priv *priv)
 {
-       int i;
+       struct device *dev = priv->net_dev->dev.parent;
        struct dpaa2_eth_channel *ch;
+       int i;
 
        /* deregister CDAN notifications and free channels */
        for (i = 0; i < priv->num_channels; i++) {
                ch = priv->channel[i];
-               dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
+               dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
                free_channel(priv, ch);
        }
 }
index ae0f88bce9aa6edca13e5cba73b5aba8814097ff..2370dc204202f556093ae200b64a11d36ce36f42 100644 (file)
@@ -3467,7 +3467,7 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_clk_ipg;
 
-       fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+       fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
                ret = regulator_enable(fep->reg_phy);
                if (ret) {
index 098d8764c0ea96ed2c270e287672cccb7b720c45..dd71d5db727471d0047608a39d12dc78f3a8848a 100644 (file)
@@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
        unsigned long lpar_rc;
        u16 mss = 0;
 
-restart_poll:
        while (frames_processed < budget) {
                if (!ibmveth_rxq_pending_buffer(adapter))
                        break;
@@ -1401,7 +1400,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
                    napi_reschedule(napi)) {
                        lpar_rc = h_vio_signal(adapter->vdev->unit_address,
                                               VIO_IRQ_DISABLE);
-                       goto restart_poll;
                }
        }
 
index db909b6069b5076208dbae35bd9358676c050ed5..65f8a4b6ed0c45e59e206ea8a907275ae2177b9e 100644 (file)
@@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
 
        if (entries_per_copy < entries) {
                for (i = 0; i < entries / entries_per_copy; i++) {
-                       err = copy_to_user(buf, init_ents, PAGE_SIZE);
+                       err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
+                               -EFAULT : 0;
                        if (err)
                                goto out;
 
                        buf += PAGE_SIZE;
                }
        } else {
-               err = copy_to_user(buf, init_ents, entries * cqe_size);
+               err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
+                       -EFAULT : 0;
        }
 
 out:
index 7df728f1e5b526809d6db486a80e9feb841b598c..6e501af0e5322d648adf3938f9ac0e4d2a2d6360 100644 (file)
@@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
 {
        struct mlx4_cmd_mailbox *mailbox;
        __be32 *outbox;
+       u64 qword_field;
        u32 dword_field;
-       int err;
+       u16 word_field;
        u8 byte_field;
+       int err;
        static const u8 a0_dmfs_query_hw_steering[] =  {
                [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
                [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
 
        /* QPC/EEC/CQC/EQC/RDMARC attributes */
 
-       MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
-       MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
-       MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
-       MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
-       MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
-       MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
-       MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
-       MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
-       MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
-       MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
-       MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
-       MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
-       MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+       MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
+       param->qpc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
+       param->log_num_qps = byte_field & 0x1f;
+       MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+       param->srqc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+       param->log_num_srqs = byte_field & 0x1f;
+       MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
+       param->cqc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
+       param->log_num_cqs = byte_field & 0x1f;
+       MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+       param->altc_base = qword_field;
+       MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+       param->auxc_base = qword_field;
+       MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
+       param->eqc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
+       param->log_num_eqs = byte_field & 0x1f;
+       MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+       param->num_sys_eqs = word_field & 0xfff;
+       MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+       param->rdmarc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
+       param->log_rd_per_qp = byte_field & 0x7;
 
        MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
        if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
        /* steering attributes */
        if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
                MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
-               MLX4_GET(param->log_mc_entry_sz, outbox,
-                        INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
-               MLX4_GET(param->log_mc_table_sz, outbox,
-                        INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
-               MLX4_GET(byte_field, outbox,
-                        INIT_HCA_FS_A0_OFFSET);
+               MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+               param->log_mc_entry_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+               param->log_mc_table_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
                param->dmfs_high_steer_mode =
                        a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
        } else {
                MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
-               MLX4_GET(param->log_mc_entry_sz, outbox,
-                        INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
-               MLX4_GET(param->log_mc_hash_sz,  outbox,
-                        INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
-               MLX4_GET(param->log_mc_table_sz, outbox,
-                        INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+               MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+               param->log_mc_entry_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+               param->log_mc_hash_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+               param->log_mc_table_sz = byte_field & 0x1f;
        }
 
        /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
        /* TPT attributes */
 
        MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
-       MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
-       MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
+       param->mw_enabled = byte_field >> 7;
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       param->log_mpt_sz = byte_field & 0x3f;
        MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
        MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
 
        /* UAR attributes */
 
        MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
-       MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+       param->log_uar_sz = byte_field & 0xf;
 
        /* phv_check enable */
        MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
index ffc1ada4e6da3408a0d13867618a7e735fd5c8df..d28c8f9ca55ba31bacbd0f96e0538616621da14f 100644 (file)
@@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        int i;
 
        priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
-               ETH_HLEN + VLAN_HLEN;
+               ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
 
        /* Allocate RX and TX skb rings */
        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
 {
        u8 *hw_csum;
 
-       /* The hardware checksum is 2 bytes appended to packet data */
-       if (unlikely(skb->len < 2))
+       /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
+        * appended to packet data
+        */
+       if (unlikely(skb->len < sizeof(__sum16)))
                return;
-       hw_csum = skb_tail_pointer(skb) - 2;
+       hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
        skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
        skb->ip_summed = CHECKSUM_COMPLETE;
-       skb_trim(skb, skb->len - 2);
+       skb_trim(skb, skb->len - sizeof(__sum16));
 }
 
 /* Packet receive function for Ethernet AVB */
index b6a50058bb8db90b0f8c459469b3cc058692efdc..2f2bda68d861492cc6f42cbe27ca9b0cf0f46b80 100644 (file)
@@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
        { NVRAM_PARTITION_TYPE_EXPANSION_UEFI,     0,    0, "sfc_uefi" },
        { NVRAM_PARTITION_TYPE_STATUS,             0,    0, "sfc_status" }
 };
+#define EF10_NVRAM_PARTITION_COUNT     ARRAY_SIZE(efx_ef10_nvram_types)
 
 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
                                        struct efx_mcdi_mtd_partition *part,
-                                       unsigned int type)
+                                       unsigned int type,
+                                       unsigned long *found)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
        MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
        const struct efx_ef10_nvram_type_info *info;
        size_t size, erase_size, outlen;
+       int type_idx = 0;
        bool protected;
        int rc;
 
-       for (info = efx_ef10_nvram_types; ; info++) {
-               if (info ==
-                   efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+       for (type_idx = 0; ; type_idx++) {
+               if (type_idx == EF10_NVRAM_PARTITION_COUNT)
                        return -ENODEV;
+               info = efx_ef10_nvram_types + type_idx;
                if ((type & ~info->type_mask) == info->type)
                        break;
        }
@@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
        if (protected)
                return -ENODEV; /* hide it */
 
+       /* If we've already exposed a partition of this type, hide this
+        * duplicate.  All operations on MTDs are keyed by the type anyway,
+        * so we can't act on the duplicate.
+        */
+       if (__test_and_set_bit(type_idx, found))
+               return -EEXIST;
+
        part->nvram_type = type;
 
        MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
@@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
 static int efx_ef10_mtd_probe(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+       DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT);
        struct efx_mcdi_mtd_partition *parts;
        size_t outlen, n_parts_total, i, n_parts;
        unsigned int type;
@@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
        for (i = 0; i < n_parts_total; i++) {
                type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
                                        i);
-               rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
-               if (rc == 0)
-                       n_parts++;
-               else if (rc != -ENODEV)
+               rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
+                                                 found);
+               if (rc == -EEXIST || rc == -ENODEV)
+                       continue;
+               if (rc)
                        goto fail;
+               n_parts++;
        }
 
        rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
index 9020b084b953880466cdf9c0354ee8208b987873..7ec4eb74fe2160b53af40e08e4bc95feda125100 100644 (file)
@@ -1,22 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
  *
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * This driver uses the sungem driver (c) David Miller
  * (davem@redhat.com) as its basis.
  *
index 13f3860496a861d3d2f9716fabd93add43b74f6c..ae5f05f03f8804e14616cc1c040b462d43b7351f 100644 (file)
@@ -1,23 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
 /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
  * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
  *
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * vendor id: 0x108E (Sun Microsystems, Inc.)
  * device id: 0xabba (Cassini)
  * revision ids: 0x01 = Cassini
index ef6f766f6389380894d855494d371035fc26325e..e859ae2e42d5a152a567de048e898eeafa99fcb5 100644 (file)
@@ -144,6 +144,8 @@ struct hv_netvsc_packet {
        u32 total_data_buflen;
 };
 
+#define NETVSC_HASH_KEYLEN 40
+
 struct netvsc_device_info {
        unsigned char mac_adr[ETH_ALEN];
        u32  num_chn;
@@ -151,6 +153,8 @@ struct netvsc_device_info {
        u32  recv_sections;
        u32  send_section_size;
        u32  recv_section_size;
+
+       u8 rss_key[NETVSC_HASH_KEYLEN];
 };
 
 enum rndis_device_state {
@@ -160,8 +164,6 @@ enum rndis_device_state {
        RNDIS_DEV_DATAINITIALIZED,
 };
 
-#define NETVSC_HASH_KEYLEN 40
-
 struct rndis_device {
        struct net_device *ndev;
 
@@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net,
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
+int rndis_set_subchannel(struct net_device *ndev,
+                        struct netvsc_device *nvdev,
+                        struct netvsc_device_info *dev_info);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type {
 
 enum rndis_per_pkt_info_interal_type {
        RNDIS_PKTINFO_ID = 1,
-       /* Add more memebers here */
+       /* Add more members here */
 
        RNDIS_PKTINFO_MAX
 };
index 922054c1d5448bf6406742c52a94a7bc4226575c..813d195bbd57fed2ef96ea708b637ca8197be458 100644 (file)
@@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
 
        rdev = nvdev->extension;
        if (rdev) {
-               ret = rndis_set_subchannel(rdev->ndev, nvdev);
+               ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
                if (ret == 0) {
                        netif_device_attach(rdev->ndev);
                } else {
@@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context)
        prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
 
        if (napi_schedule_prep(&nvchan->napi)) {
-               /* disable interupts from host */
+               /* disable interrupts from host */
                hv_begin_read(rbi);
 
                __napi_schedule_irqoff(&nvchan->napi);
index 91ed15ea58835c6704838c69ada7b5b0c98b6715..256adbd044f5ea138154c0a9126aacdaf1aafcb4 100644 (file)
@@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
 {
        int j = 0;
 
-       /* Deal with compund pages by ignoring unused part
+       /* Deal with compound pages by ignoring unused part
         * of the page.
         */
        page += (offset >> PAGE_SHIFT);
@@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net,
        }
 }
 
+/* Alloc struct netvsc_device_info, and initialize it from either existing
+ * struct netvsc_device, or from default values.
+ */
+static struct netvsc_device_info *netvsc_devinfo_get
+                       (struct netvsc_device *nvdev)
+{
+       struct netvsc_device_info *dev_info;
+
+       dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
+
+       if (!dev_info)
+               return NULL;
+
+       if (nvdev) {
+               dev_info->num_chn = nvdev->num_chn;
+               dev_info->send_sections = nvdev->send_section_cnt;
+               dev_info->send_section_size = nvdev->send_section_size;
+               dev_info->recv_sections = nvdev->recv_section_cnt;
+               dev_info->recv_section_size = nvdev->recv_section_size;
+
+               memcpy(dev_info->rss_key, nvdev->extension->rss_key,
+                      NETVSC_HASH_KEYLEN);
+       } else {
+               dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+               dev_info->send_sections = NETVSC_DEFAULT_TX;
+               dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
+               dev_info->recv_sections = NETVSC_DEFAULT_RX;
+               dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
+       }
+
+       return dev_info;
+}
+
 static int netvsc_detach(struct net_device *ndev,
                         struct netvsc_device *nvdev)
 {
@@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev,
                return PTR_ERR(nvdev);
 
        if (nvdev->num_chn > 1) {
-               ret = rndis_set_subchannel(ndev, nvdev);
+               ret = rndis_set_subchannel(ndev, nvdev, dev_info);
 
                /* if unavailable, just proceed with one queue */
                if (ret) {
@@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net,
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        unsigned int orig, count = channels->combined_count;
-       struct netvsc_device_info device_info;
+       struct netvsc_device_info *device_info;
        int ret;
 
        /* We do not support separate count for rx, tx, or other */
@@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net,
 
        orig = nvdev->num_chn;
 
-       memset(&device_info, 0, sizeof(device_info));
-       device_info.num_chn = count;
-       device_info.send_sections = nvdev->send_section_cnt;
-       device_info.send_section_size = nvdev->send_section_size;
-       device_info.recv_sections = nvdev->recv_section_cnt;
-       device_info.recv_section_size = nvdev->recv_section_size;
+       device_info = netvsc_devinfo_get(nvdev);
+
+       if (!device_info)
+               return -ENOMEM;
+
+       device_info->num_chn = count;
 
        ret = netvsc_detach(net, nvdev);
        if (ret)
-               return ret;
+               goto out;
 
-       ret = netvsc_attach(net, &device_info);
+       ret = netvsc_attach(net, device_info);
        if (ret) {
-               device_info.num_chn = orig;
-               if (netvsc_attach(net, &device_info))
+               device_info->num_chn = orig;
+               if (netvsc_attach(net, device_info))
                        netdev_err(net, "restoring channel setting failed\n");
        }
 
+out:
+       kfree(device_info);
        return ret;
 }
 
@@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
        struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
        int orig_mtu = ndev->mtu;
-       struct netvsc_device_info device_info;
+       struct netvsc_device_info *device_info;
        int ret = 0;
 
        if (!nvdev || nvdev->destroy)
                return -ENODEV;
 
+       device_info = netvsc_devinfo_get(nvdev);
+
+       if (!device_info)
+               return -ENOMEM;
+
        /* Change MTU of underlying VF netdev first. */
        if (vf_netdev) {
                ret = dev_set_mtu(vf_netdev, mtu);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
-       memset(&device_info, 0, sizeof(device_info));
-       device_info.num_chn = nvdev->num_chn;
-       device_info.send_sections = nvdev->send_section_cnt;
-       device_info.send_section_size = nvdev->send_section_size;
-       device_info.recv_sections = nvdev->recv_section_cnt;
-       device_info.recv_section_size = nvdev->recv_section_size;
-
        ret = netvsc_detach(ndev, nvdev);
        if (ret)
                goto rollback_vf;
 
        ndev->mtu = mtu;
 
-       ret = netvsc_attach(ndev, &device_info);
-       if (ret)
-               goto rollback;
-
-       return 0;
+       ret = netvsc_attach(ndev, device_info);
+       if (!ret)
+               goto out;
 
-rollback:
        /* Attempt rollback to original MTU */
        ndev->mtu = orig_mtu;
 
-       if (netvsc_attach(ndev, &device_info))
+       if (netvsc_attach(ndev, device_info))
                netdev_err(ndev, "restoring mtu failed\n");
 rollback_vf:
        if (vf_netdev)
                dev_set_mtu(vf_netdev, orig_mtu);
 
+out:
+       kfree(device_info);
        return ret;
 }
 
@@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
 {
        struct net_device_context *ndevctx = netdev_priv(ndev);
        struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
-       struct netvsc_device_info device_info;
+       struct netvsc_device_info *device_info;
        struct ethtool_ringparam orig;
        u32 new_tx, new_rx;
        int ret = 0;
@@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
            new_rx == orig.rx_pending)
                return 0;        /* no change */
 
-       memset(&device_info, 0, sizeof(device_info));
-       device_info.num_chn = nvdev->num_chn;
-       device_info.send_sections = new_tx;
-       device_info.send_section_size = nvdev->send_section_size;
-       device_info.recv_sections = new_rx;
-       device_info.recv_section_size = nvdev->recv_section_size;
+       device_info = netvsc_devinfo_get(nvdev);
+
+       if (!device_info)
+               return -ENOMEM;
+
+       device_info->send_sections = new_tx;
+       device_info->recv_sections = new_rx;
 
        ret = netvsc_detach(ndev, nvdev);
        if (ret)
-               return ret;
+               goto out;
 
-       ret = netvsc_attach(ndev, &device_info);
+       ret = netvsc_attach(ndev, device_info);
        if (ret) {
-               device_info.send_sections = orig.tx_pending;
-               device_info.recv_sections = orig.rx_pending;
+               device_info->send_sections = orig.tx_pending;
+               device_info->recv_sections = orig.rx_pending;
 
-               if (netvsc_attach(ndev, &device_info))
+               if (netvsc_attach(ndev, device_info))
                        netdev_err(ndev, "restoring ringparam failed");
        }
 
+out:
+       kfree(device_info);
        return ret;
 }
 
@@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
        if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
                return NOTIFY_DONE;
 
-       /* if syntihetic interface is a different namespace,
+       /* if synthetic interface is a different namespace,
         * then move the VF to that namespace; join will be
         * done again in that context.
         */
@@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev,
 {
        struct net_device *net = NULL;
        struct net_device_context *net_device_ctx;
-       struct netvsc_device_info device_info;
+       struct netvsc_device_info *device_info = NULL;
        struct netvsc_device *nvdev;
        int ret = -ENOMEM;
 
@@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev,
        netif_set_real_num_rx_queues(net, 1);
 
        /* Notify the netvsc driver of the new device */
-       memset(&device_info, 0, sizeof(device_info));
-       device_info.num_chn = VRSS_CHANNEL_DEFAULT;
-       device_info.send_sections = NETVSC_DEFAULT_TX;
-       device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
-       device_info.recv_sections = NETVSC_DEFAULT_RX;
-       device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
-
-       nvdev = rndis_filter_device_add(dev, &device_info);
+       device_info = netvsc_devinfo_get(NULL);
+
+       if (!device_info) {
+               ret = -ENOMEM;
+               goto devinfo_failed;
+       }
+
+       nvdev = rndis_filter_device_add(dev, device_info);
        if (IS_ERR(nvdev)) {
                ret = PTR_ERR(nvdev);
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
                goto rndis_failed;
        }
 
-       memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+       memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
 
        /* We must get rtnl lock before scheduling nvdev->subchan_work,
         * otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev,
         * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
         * -> ... -> device_add() -> ... -> __device_attach() can't get
         * the device lock, so all the subchannels can't be processed --
-        * finally netvsc_subchan_work() hangs for ever.
+        * finally netvsc_subchan_work() hangs forever.
         */
        rtnl_lock();
 
@@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev,
 
        list_add(&net_device_ctx->list, &netvsc_dev_list);
        rtnl_unlock();
+
+       kfree(device_info);
        return 0;
 
 register_failed:
        rtnl_unlock();
        rndis_filter_device_remove(dev, nvdev);
 rndis_failed:
+       kfree(device_info);
+devinfo_failed:
        free_percpu(net_device_ctx->vf_stats);
 no_stats:
        hv_set_drvdata(dev, NULL);
index 8b537a049c1e5960c2f3f31c3afc59b4b4ef19e0..73b60592de06ca5e9dc6a2c5bc9db8a34ec88cbf 100644 (file)
@@ -774,8 +774,8 @@ rndis_filter_set_offload_params(struct net_device *ndev,
        return ret;
 }
 
-int rndis_filter_set_rss_param(struct rndis_device *rdev,
-                              const u8 *rss_key)
+static int rndis_set_rss_param_msg(struct rndis_device *rdev,
+                                  const u8 *rss_key, u16 flag)
 {
        struct net_device *ndev = rdev->ndev;
        struct rndis_request *request;
@@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
        rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
        rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
        rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
-       rssp->flag = 0;
+       rssp->flag = flag;
        rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
                         NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
                         NDIS_HASH_TCP_IPV6;
@@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
 
        wait_for_completion(&request->wait_event);
        set_complete = &request->response_msg.msg.set_complete;
-       if (set_complete->status == RNDIS_STATUS_SUCCESS)
-               memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
-       else {
+       if (set_complete->status == RNDIS_STATUS_SUCCESS) {
+               if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
+                   !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
+                       memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+
+       } else {
                netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
                           set_complete->status);
                ret = -EINVAL;
@@ -842,6 +845,16 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
        return ret;
 }
 
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+                              const u8 *rss_key)
+{
+       /* Disable RSS before change */
+       rndis_set_rss_param_msg(rdev, rss_key,
+                               NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
+
+       return rndis_set_rss_param_msg(rdev, rss_key, 0);
+}
+
 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
                                                 struct netvsc_device *net_device)
 {
@@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
+int rndis_set_subchannel(struct net_device *ndev,
+                        struct netvsc_device *nvdev,
+                        struct netvsc_device_info *dev_info)
 {
        struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
        struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
        wait_event(nvdev->subchan_open,
                   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
 
-       /* ignore failues from setting rss parameters, still have channels */
-       rndis_filter_set_rss_param(rdev, netvsc_hash_key);
+       /* ignore failures from setting rss parameters, still have channels */
+       if (dev_info)
+               rndis_filter_set_rss_param(rdev, dev_info->rss_key);
+       else
+               rndis_filter_set_rss_param(rdev, netvsc_hash_key);
 
        netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
        netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
index 8ebe7f5484aee02af92d47a4e286d970adcc73b9..f14ba5366b911e7791472f9d1cbabaae8b6fb622 100644 (file)
@@ -1,13 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
 /* Driver for Asix PHYs
  *
  * Author: Michael Schmitz <schmitzmic@gmail.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
  */
 #include <linux/kernel.h>
 #include <linux/errno.h>
index b03fedd6c1d847164857d218fa1cb99ca987aebf..287f3ccf1da1deb649f2852ec8ed73ff573bd219 100644 (file)
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Hisilicon Fast Ethernet MDIO Bus Driver
  *
  * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/clk.h>
@@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver);
 
 MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver");
 MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
index f1da70b9b55ffa6107a372611e3ea34ad9d321c7..95abf7072f3226ecb02897d372b71b2bd8d082e3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /**
  * drivers/net/phy/rockchip.c
  *
@@ -6,12 +7,6 @@
  * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
  *
  * David Wu <david.wu@rock-chips.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include <linux/ethtool.h>
@@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl);
 
 MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>");
 MODULE_DESCRIPTION("Rockchip Ethernet PHY driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
index b654f05b2ccd0b85c88cd42d52dc7c4fea44b868..3d93993e74da09abfa63252247c680be069401d3 100644 (file)
@@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
        chipcode &= AX_CHIPCODE_MASK;
 
-       (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
-                                           ax88772a_hw_reset(dev, 0);
+       ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
+                                                 ax88772a_hw_reset(dev, 0);
+
+       if (ret < 0) {
+               netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
+               return ret;
+       }
 
        /* Read PHYID register *AFTER* the PHY was reset properly */
        phyid = asix_get_phyid(dev);
index 3a4b8786f7ea92d4286135c2070fe5a9158691dd..320edcac469985308ea96a7883dcdf86367d83ae 100644 (file)
@@ -2761,6 +2761,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                        BIT(NL80211_CHAN_WIDTH_160);
        }
 
+       if (!n_limits) {
+               err = -EINVAL;
+               goto failed_hw;
+       }
+
        data->if_combination.n_limits = n_limits;
        data->if_combination.max_interfaces = 2048;
        data->if_combination.limits = data->if_limits;
index 64b218699656f5a02a70ea1357868d96793d8e49..3a93e4d9828bb5355934a4b06a6bf157188cfeaa 100644 (file)
@@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
        SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
        dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
 
-       if (!dev->ieee80211_ptr)
+       if (!dev->ieee80211_ptr) {
+               err = -ENOMEM;
                goto remove_handler;
+       }
 
        dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
        dev->ieee80211_ptr->wiphy = common_wiphy;
index 0cf58cabc9eda43aed6a006a88b4f18fc88daa18..3cf50274fadb023b485fa613954b5b51af83240f 100644 (file)
@@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev)
        struct nvdimm_drvdata *ndd;
        int rc;
 
+       rc = nvdimm_security_setup_events(dev);
+       if (rc < 0) {
+               dev_err(dev, "security event setup failed: %d\n", rc);
+               return rc;
+       }
+
        rc = nvdimm_check_config_data(dev);
        if (rc) {
                /* not required for non-aliased nvdimm, ex. NVDIMM-N */
index 4890310df87440a3fcd459de19d563e026c0922d..efe412a6b5b916220a1313a7ac194ce10f5232ca 100644 (file)
@@ -578,13 +578,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
 }
 EXPORT_SYMBOL_GPL(__nvdimm_create);
 
-int nvdimm_security_setup_events(struct nvdimm *nvdimm)
+static void shutdown_security_notify(void *data)
 {
-       nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd,
-                       "security");
+       struct nvdimm *nvdimm = data;
+
+       sysfs_put(nvdimm->sec.overwrite_state);
+}
+
+int nvdimm_security_setup_events(struct device *dev)
+{
+       struct nvdimm *nvdimm = to_nvdimm(dev);
+
+       if (nvdimm->sec.state < 0 || !nvdimm->sec.ops
+                       || !nvdimm->sec.ops->overwrite)
+               return 0;
+       nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
        if (!nvdimm->sec.overwrite_state)
-               return -ENODEV;
-       return 0;
+               return -ENOMEM;
+
+       return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
 }
 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
 
index cfde992684e7db07de208b94b0f4b382e59701d3..379bf4305e6159a2568940df7cb06f07f774b55c 100644 (file)
@@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
 void nvdimm_set_aliasing(struct device *dev);
 void nvdimm_set_locked(struct device *dev);
 void nvdimm_clear_locked(struct device *dev);
+int nvdimm_security_setup_events(struct device *dev);
 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
 int nvdimm_security_unlock(struct device *dev);
 #else
index df4b3a6db51bfdf8307e38da3cd568209266f0a5..b9fff3b8ed1b1dd180b50de141bbc3d2af73a485 100644 (file)
@@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
        timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
        ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
                ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
-       if (!(ctrl->anacap & (1 << 6)))
-               ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+       ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
 
        if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
                dev_err(ctrl->device,
index 0a2fd2949ad788b9f5daae0acf3d7d54b8a6ad18..52abc3a6de129cab702ee1ca488bf8946940657e 100644 (file)
@@ -119,6 +119,7 @@ struct nvme_rdma_ctrl {
 
        struct nvme_ctrl        ctrl;
        bool                    use_inline_data;
+       u32                     io_queues[HCTX_MAX_TYPES];
 };
 
 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
 {
        return nvme_rdma_queue_idx(queue) >
-               queue->ctrl->ctrl.opts->nr_io_queues +
-               queue->ctrl->ctrl.opts->nr_write_queues;
+               queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+               queue->ctrl->io_queues[HCTX_TYPE_READ];
 }
 
 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
@@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
        nr_io_queues = min_t(unsigned int, nr_io_queues,
                                ibdev->num_comp_vectors);
 
-       nr_io_queues += min(opts->nr_write_queues, num_online_cpus());
-       nr_io_queues += min(opts->nr_poll_queues, num_online_cpus());
+       if (opts->nr_write_queues) {
+               ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+                               min(opts->nr_write_queues, nr_io_queues);
+               nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
+       } else {
+               ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
+       }
+
+       ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
+
+       if (opts->nr_poll_queues) {
+               ctrl->io_queues[HCTX_TYPE_POLL] =
+                       min(opts->nr_poll_queues, num_online_cpus());
+               nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
+       }
 
        ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
        if (ret)
@@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return
 nvme_rdma_timeout(struct request *rq, bool reserved)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+       struct nvme_rdma_queue *queue = req->queue;
+       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-       dev_warn(req->queue->ctrl->ctrl.device,
-                "I/O %d QID %d timeout, reset controller\n",
-                rq->tag, nvme_rdma_queue_idx(req->queue));
+       dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+                rq->tag, nvme_rdma_queue_idx(queue));
 
-       /* queue error recovery */
-       nvme_rdma_error_recovery(req->queue->ctrl);
+       if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+               /*
+                * Teardown immediately if controller times out while starting
+                * or we are already started error recovery. all outstanding
+                * requests are completed on shutdown, so we return BLK_EH_DONE.
+                */
+               flush_work(&ctrl->err_work);
+               nvme_rdma_teardown_io_queues(ctrl, false);
+               nvme_rdma_teardown_admin_queue(ctrl, false);
+               return BLK_EH_DONE;
+       }
 
-       /* fail with DNR on cmd timeout */
-       nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+       dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+       nvme_rdma_error_recovery(ctrl);
 
-       return BLK_EH_DONE;
+       return BLK_EH_RESET_TIMER;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
        struct nvme_rdma_ctrl *ctrl = set->driver_data;
 
        set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
-       set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
+       set->map[HCTX_TYPE_DEFAULT].nr_queues =
+                       ctrl->io_queues[HCTX_TYPE_DEFAULT];
+       set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
        if (ctrl->ctrl.opts->nr_write_queues) {
                /* separate read/write queues */
-               set->map[HCTX_TYPE_DEFAULT].nr_queues =
-                               ctrl->ctrl.opts->nr_write_queues;
                set->map[HCTX_TYPE_READ].queue_offset =
-                               ctrl->ctrl.opts->nr_write_queues;
+                               ctrl->io_queues[HCTX_TYPE_DEFAULT];
        } else {
                /* mixed read/write queues */
-               set->map[HCTX_TYPE_DEFAULT].nr_queues =
-                               ctrl->ctrl.opts->nr_io_queues;
                set->map[HCTX_TYPE_READ].queue_offset = 0;
        }
        blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
 
        if (ctrl->ctrl.opts->nr_poll_queues) {
                set->map[HCTX_TYPE_POLL].nr_queues =
-                               ctrl->ctrl.opts->nr_poll_queues;
+                               ctrl->io_queues[HCTX_TYPE_POLL];
                set->map[HCTX_TYPE_POLL].queue_offset =
-                               ctrl->ctrl.opts->nr_io_queues;
+                               ctrl->io_queues[HCTX_TYPE_DEFAULT];
                if (ctrl->ctrl.opts->nr_write_queues)
                        set->map[HCTX_TYPE_POLL].queue_offset +=
-                               ctrl->ctrl.opts->nr_write_queues;
+                               ctrl->io_queues[HCTX_TYPE_READ];
                blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
        }
        return 0;
index 265a0543b381c318f792e5088d8b2f1549cd9556..5f0a004252422f970c2f90ced7a01e4f92dd0041 100644 (file)
@@ -1948,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
        struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 
-       dev_dbg(ctrl->ctrl.device,
+       dev_warn(ctrl->ctrl.device,
                "queue %d: timeout request %#x type %d\n",
-               nvme_tcp_queue_id(req->queue), rq->tag,
-               pdu->hdr.type);
+               nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
 
        if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
-               union nvme_result res = {};
-
-               nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
-               nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res);
+               /*
+                * Teardown immediately if controller times out while starting
+                * or we are already started error recovery. all outstanding
+                * requests are completed on shutdown, so we return BLK_EH_DONE.
+                */
+               flush_work(&ctrl->err_work);
+               nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
+               nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
                return BLK_EH_DONE;
        }
 
-       /* queue error recovery */
+       dev_warn(ctrl->ctrl.device, "starting error recovery\n");
        nvme_tcp_error_recovery(&ctrl->ctrl);
 
        return BLK_EH_RESET_TIMER;
index a8d23eb80192024e2fe101f91fa58d222a17b698..a884e3a0e8afee4cf0d6bf3b8a8848d1f659c7c0 100644 (file)
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+                               struct nvmet_rdma_rsp *r);
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+                               struct nvmet_rdma_rsp *r);
 
 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
 
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
        spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
        if (unlikely(!rsp)) {
-               rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+               int ret;
+
+               rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
                if (unlikely(!rsp))
                        return NULL;
+               ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
+               if (unlikely(ret)) {
+                       kfree(rsp);
+                       return NULL;
+               }
+
                rsp->allocated = true;
        }
 
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
        unsigned long flags;
 
        if (unlikely(rsp->allocated)) {
+               nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
                kfree(rsp);
                return;
        }
index 0a7a470ee8594e14ecf905d683179d6ac97c105a..4ad846ceac7ca8a7cbc0024fa904a15ea246ea00 100644 (file)
@@ -192,4 +192,14 @@ config SC27XX_EFUSE
          This driver can also be built as a module. If so, the module
          will be called nvmem-sc27xx-efuse.
 
+config NVMEM_ZYNQMP
+       bool "Xilinx ZYNQMP SoC nvmem firmware support"
+       depends on ARCH_ZYNQMP
+       help
+         This is a driver to access hardware related data like
+         soc revision, IDCODE... etc by using the firmware
+         interface.
+
+         If sure, say yes. If unsure, say no.
+
 endif
index 4e8c61628f1a20eb6b0794fef54e6ba25f5231f5..2ece8ffffdda7eac3c3f4746847b80771a74a7b9 100644 (file)
@@ -41,3 +41,5 @@ obj-$(CONFIG_RAVE_SP_EEPROM)  += nvmem-rave-sp-eeprom.o
 nvmem-rave-sp-eeprom-y         := rave-sp-eeprom.o
 obj-$(CONFIG_SC27XX_EFUSE)     += nvmem-sc27xx-efuse.o
 nvmem-sc27xx-efuse-y           := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_ZYNQMP)     += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y           := zynqmp_nvmem.o
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
new file mode 100644 (file)
index 0000000..490c8fc
--- /dev/null
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define SILICON_REVISION_MASK 0xF
+
+struct zynqmp_nvmem_data {
+       struct device *dev;
+       struct nvmem_device *nvmem;
+};
+
+static int zynqmp_nvmem_read(void *context, unsigned int offset,
+                            void *val, size_t bytes)
+{
+       int ret;
+       int idcode, version;
+       struct zynqmp_nvmem_data *priv = context;
+
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->get_chipid)
+               return -ENXIO;
+
+       ret = eemi_ops->get_chipid(&idcode, &version);
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
+       *(int *)val = version & SILICON_REVISION_MASK;
+
+       return 0;
+}
+
+static struct nvmem_config econfig = {
+       .name = "zynqmp-nvmem",
+       .owner = THIS_MODULE,
+       .word_size = 1,
+       .size = 1,
+       .read_only = true,
+};
+
+static const struct of_device_id zynqmp_nvmem_match[] = {
+       { .compatible = "xlnx,zynqmp-nvmem-fw", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
+
+static int zynqmp_nvmem_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct zynqmp_nvmem_data *priv;
+
+       priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->dev = dev;
+       econfig.dev = dev;
+       econfig.reg_read = zynqmp_nvmem_read;
+       econfig.priv = priv;
+
+       priv->nvmem = devm_nvmem_register(dev, &econfig);
+
+       return PTR_ERR_OR_ZERO(priv->nvmem);
+}
+
+static struct platform_driver zynqmp_nvmem_driver = {
+       .probe = zynqmp_nvmem_probe,
+       .driver = {
+               .name = "zynqmp-nvmem",
+               .of_match_table = zynqmp_nvmem_match,
+       },
+};
+
+module_platform_driver(zynqmp_nvmem_driver);
+
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>, Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("ZynqMP NVMEM driver");
+MODULE_LICENSE("GPL");
index 18f1639dbc4a601d951330ea179335cb704ce660..e06a0ab05ad62db14aad9d49188e959db2bb03c5 100644 (file)
@@ -130,6 +130,24 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 
+/**
+ * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
+ * @opp:       opp for which level value has to be returned for
+ *
+ * Return: level read from device tree corresponding to the opp, else
+ * return 0.
+ */
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+       if (IS_ERR_OR_NULL(opp) || !opp->available) {
+               pr_err("%s: Invalid parameters\n", __func__);
+               return 0;
+       }
+
+       return opp->level;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+
 /**
  * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  * @opp: opp for which turbo mode is being verified
index 06f0f632ec4771aa84874221900e07c1b8b924ef..1779f2c9329183a47536619deda17f1f48374376 100644 (file)
@@ -594,6 +594,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
                new_opp->rate = (unsigned long)rate;
        }
 
+       of_property_read_u32(np, "opp-level", &new_opp->level);
+
        /* Check if the OPP supports hardware's hierarchy of versions or not */
        if (!_opp_is_supported(dev, opp_table, np)) {
                dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
index e24d81497375dc2f3283426848dab05574e10001..4458175aa66178b36692f9b57d4ab1063e7a131b 100644 (file)
@@ -60,6 +60,7 @@ extern struct list_head opp_tables;
  * @suspend:   true if suspend OPP
  * @pstate: Device's power domain's performance state.
  * @rate:      Frequency in hertz
+ * @level:     Performance level
  * @supplies:  Power supplies voltage/current values
  * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
  *             frequency from any other OPP's frequency.
@@ -80,6 +81,7 @@ struct dev_pm_opp {
        bool suspend;
        unsigned int pstate;
        unsigned long rate;
+       unsigned int level;
 
        struct dev_pm_opp_supply *supplies;
 
index 6fd6e07ab345f6813c8285038206dad2ff3c1726..09a77e556ecebfa9c7b318e88a5fdaadb6a41747 100644 (file)
@@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
 
        err = reset_control_deassert(priv->reset);
        if (err && priv->no_suspend_override)
-               reset_control_assert(priv->no_suspend_override);
+               reset_control_deassert(priv->no_suspend_override);
 
        return err;
 }
@@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy");
+       priv->reset = devm_reset_control_get(&pdev->dev, "phy");
        if (IS_ERR(priv->reset))
                return PTR_ERR(priv->reset);
 
index 77fdaa5519772f55df8deafccbee86a8304b17a0..a52c5bb350333ec14951c32b11f741dd9287db80 100644 (file)
@@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
 
        if (args->args_count < 1)
                return ERR_PTR(-EINVAL);
+       if (!priv || !priv->if_phys)
+               return ERR_PTR(-ENODEV);
        if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
            args->args_count < 2)
                return ERR_PTR(-EINVAL);
-       if (!priv || !priv->if_phys)
-               return ERR_PTR(-ENODEV);
        if (phy_id > priv->soc_data->num_ports)
                return ERR_PTR(-EINVAL);
        if (phy_id != priv->if_phys[phy_id - 1].id)
index 2e01bd833ffdbb69b41071d3eccf29a367125410..2c8c23db92fb29e8ebdd04212da41ab6ec2a7162 100644 (file)
@@ -40,6 +40,14 @@ config RESET_BERLIN
        help
          This enables the reset controller driver for Marvell Berlin SoCs.
 
+config RESET_BRCMSTB
+       tristate "Broadcom STB reset controller"
+       depends on ARCH_BRCMSTB || COMPILE_TEST
+       default ARCH_BRCMSTB
+       help
+         This enables the reset controller driver for Broadcom STB SoCs using
+         a SUN_TOP_CTRL_SW_INIT style controller.
+
 config RESET_HSDK
        bool "Synopsys HSDK Reset Driver"
        depends on HAS_IOMEM
@@ -48,9 +56,9 @@ config RESET_HSDK
          This enables the reset controller driver for HSDK board.
 
 config RESET_IMX7
-       bool "i.MX7 Reset Driver" if COMPILE_TEST
+       bool "i.MX7/8 Reset Driver" if COMPILE_TEST
        depends on HAS_IOMEM
-       default SOC_IMX7D
+       default SOC_IMX7D || (ARM64 && ARCH_MXC)
        select MFD_SYSCON
        help
          This enables the reset controller driver for i.MX7 SoCs.
index dc7874df78d9b641fd47ebf225af048a71e95c34..61456b8f659c08bad5431270398973fb867a2fb7 100644 (file)
@@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
 obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
 obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
 obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
 obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
 obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
 obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
 obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
 obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
 obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
+obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
 
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
new file mode 100644 (file)
index 0000000..a608f44
--- /dev/null
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB generic reset controller for SW_INIT style reset controller
+ *
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ * Copyright (C) 2018 Broadcom
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+struct brcmstb_reset {
+       void __iomem *base;
+       struct reset_controller_dev rcdev;
+};
+
+#define SW_INIT_SET            0x00
+#define SW_INIT_CLEAR          0x04
+#define SW_INIT_STATUS         0x08
+
+#define SW_INIT_BIT(id)                BIT((id) & 0x1f)
+#define SW_INIT_BANK(id)       ((id) >> 5)
+
+/* A full bank contains extra registers that we are not utilizing but still
+ * qualify as a single bank.
+ */
+#define SW_INIT_BANK_SIZE      0x18
+
+static inline
+struct brcmstb_reset *to_brcmstb(struct reset_controller_dev *rcdev)
+{
+       return container_of(rcdev, struct brcmstb_reset, rcdev);
+}
+
+static int brcmstb_reset_assert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+       struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+       writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_SET);
+
+       return 0;
+}
+
+static int brcmstb_reset_deassert(struct reset_controller_dev *rcdev,
+                                 unsigned long id)
+{
+       unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+       struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+       writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_CLEAR);
+       /* Maximum reset delay after de-asserting a line and seeing block
+        * operation is typically 14us for the worst case, build some slack
+        * here.
+        */
+       usleep_range(100, 200);
+
+       return 0;
+}
+
+static int brcmstb_reset_status(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+       struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+       return readl_relaxed(priv->base + off + SW_INIT_STATUS) &
+                            SW_INIT_BIT(id);
+}
+
+static const struct reset_control_ops brcmstb_reset_ops = {
+       .assert = brcmstb_reset_assert,
+       .deassert = brcmstb_reset_deassert,
+       .status = brcmstb_reset_status,
+};
+
+static int brcmstb_reset_probe(struct platform_device *pdev)
+{
+       struct device *kdev = &pdev->dev;
+       struct brcmstb_reset *priv;
+       struct resource *res;
+
+       priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
+           !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
+               dev_err(kdev, "incorrect register range\n");
+               return -EINVAL;
+       }
+
+       priv->base = devm_ioremap_resource(kdev, res);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       dev_set_drvdata(kdev, priv);
+
+       priv->rcdev.owner = THIS_MODULE;
+       priv->rcdev.nr_resets = DIV_ROUND_DOWN_ULL(resource_size(res),
+                                                  SW_INIT_BANK_SIZE) * 32;
+       priv->rcdev.ops = &brcmstb_reset_ops;
+       priv->rcdev.of_node = kdev->of_node;
+       /* Use defaults: 1 cell and simple xlate function */
+
+       return devm_reset_controller_register(kdev, &priv->rcdev);
+}
+
+static const struct of_device_id brcmstb_reset_of_match[] = {
+       { .compatible = "brcm,brcmstb-reset" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver brcmstb_reset_driver = {
+       .probe  = brcmstb_reset_probe,
+       .driver = {
+               .name = "brcmstb-reset",
+               .of_match_table = brcmstb_reset_of_match,
+       },
+};
+module_platform_driver(brcmstb_reset_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB reset controller");
+MODULE_LICENSE("GPL");
index 77911fa8f31d7d168bd8fcc985d3f1d15e3036a3..aed76e33a0a933713ededfb1137ac0a6de804a83 100644 (file)
 
 #include <linux/mfd/syscon.h>
 #include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/reset-controller.h>
 #include <linux/regmap.h>
 #include <dt-bindings/reset/imx7-reset.h>
+#include <dt-bindings/reset/imx8mq-reset.h>
+
+struct imx7_src_signal {
+       unsigned int offset, bit;
+};
+
+struct imx7_src_variant {
+       const struct imx7_src_signal *signals;
+       unsigned int signals_num;
+       struct reset_control_ops ops;
+};
 
 struct imx7_src {
        struct reset_controller_dev rcdev;
        struct regmap *regmap;
+       const struct imx7_src_signal *signals;
 };
 
 enum imx7_src_registers {
@@ -39,9 +52,14 @@ enum imx7_src_registers {
        SRC_DDRC_RCR            = 0x1000,
 };
 
-struct imx7_src_signal {
-       unsigned int offset, bit;
-};
+static int imx7_reset_update(struct imx7_src *imx7src,
+                            unsigned long id, unsigned int value)
+{
+       const struct imx7_src_signal *signal = &imx7src->signals[id];
+
+       return regmap_update_bits(imx7src->regmap,
+                                 signal->offset, signal->bit, value);
+}
 
 static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
        [IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) },
@@ -81,8 +99,8 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
                          unsigned long id, bool assert)
 {
        struct imx7_src *imx7src = to_imx7_src(rcdev);
-       const struct imx7_src_signal *signal = &imx7_src_signals[id];
-       unsigned int value = assert ? signal->bit : 0;
+       const unsigned int bit = imx7src->signals[id].bit;
+       unsigned int value = assert ? bit : 0;
 
        switch (id) {
        case IMX7_RESET_PCIEPHY:
@@ -95,12 +113,11 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
                break;
 
        case IMX7_RESET_PCIE_CTRL_APPS_EN:
-               value = (assert) ? 0 : signal->bit;
+               value = assert ? 0 : bit;
                break;
        }
 
-       return regmap_update_bits(imx7src->regmap,
-                                 signal->offset, signal->bit, value);
+       return imx7_reset_update(imx7src, id, value);
 }
 
 static int imx7_reset_assert(struct reset_controller_dev *rcdev,
@@ -115,9 +132,133 @@ static int imx7_reset_deassert(struct reset_controller_dev *rcdev,
        return imx7_reset_set(rcdev, id, false);
 }
 
-static const struct reset_control_ops imx7_reset_ops = {
-       .assert         = imx7_reset_assert,
-       .deassert       = imx7_reset_deassert,
+static const struct imx7_src_variant variant_imx7 = {
+       .signals = imx7_src_signals,
+       .signals_num = ARRAY_SIZE(imx7_src_signals),
+       .ops = {
+               .assert   = imx7_reset_assert,
+               .deassert = imx7_reset_deassert,
+       },
+};
+
+enum imx8mq_src_registers {
+       SRC_A53RCR0             = 0x0004,
+       SRC_HDMI_RCR            = 0x0030,
+       SRC_DISP_RCR            = 0x0034,
+       SRC_GPU_RCR             = 0x0040,
+       SRC_VPU_RCR             = 0x0044,
+       SRC_PCIE2_RCR           = 0x0048,
+       SRC_MIPIPHY1_RCR        = 0x004c,
+       SRC_MIPIPHY2_RCR        = 0x0050,
+       SRC_DDRC2_RCR           = 0x1004,
+};
+
+static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
+       [IMX8MQ_RESET_A53_CORE_POR_RESET0]      = { SRC_A53RCR0, BIT(0) },
+       [IMX8MQ_RESET_A53_CORE_POR_RESET1]      = { SRC_A53RCR0, BIT(1) },
+       [IMX8MQ_RESET_A53_CORE_POR_RESET2]      = { SRC_A53RCR0, BIT(2) },
+       [IMX8MQ_RESET_A53_CORE_POR_RESET3]      = { SRC_A53RCR0, BIT(3) },
+       [IMX8MQ_RESET_A53_CORE_RESET0]          = { SRC_A53RCR0, BIT(4) },
+       [IMX8MQ_RESET_A53_CORE_RESET1]          = { SRC_A53RCR0, BIT(5) },
+       [IMX8MQ_RESET_A53_CORE_RESET2]          = { SRC_A53RCR0, BIT(6) },
+       [IMX8MQ_RESET_A53_CORE_RESET3]          = { SRC_A53RCR0, BIT(7) },
+       [IMX8MQ_RESET_A53_DBG_RESET0]           = { SRC_A53RCR0, BIT(8) },
+       [IMX8MQ_RESET_A53_DBG_RESET1]           = { SRC_A53RCR0, BIT(9) },
+       [IMX8MQ_RESET_A53_DBG_RESET2]           = { SRC_A53RCR0, BIT(10) },
+       [IMX8MQ_RESET_A53_DBG_RESET3]           = { SRC_A53RCR0, BIT(11) },
+       [IMX8MQ_RESET_A53_ETM_RESET0]           = { SRC_A53RCR0, BIT(12) },
+       [IMX8MQ_RESET_A53_ETM_RESET1]           = { SRC_A53RCR0, BIT(13) },
+       [IMX8MQ_RESET_A53_ETM_RESET2]           = { SRC_A53RCR0, BIT(14) },
+       [IMX8MQ_RESET_A53_ETM_RESET3]           = { SRC_A53RCR0, BIT(15) },
+       [IMX8MQ_RESET_A53_SOC_DBG_RESET]        = { SRC_A53RCR0, BIT(20) },
+       [IMX8MQ_RESET_A53_L2RESET]              = { SRC_A53RCR0, BIT(21) },
+       [IMX8MQ_RESET_SW_NON_SCLR_M4C_RST]      = { SRC_M4RCR, BIT(0) },
+       [IMX8MQ_RESET_OTG1_PHY_RESET]           = { SRC_USBOPHY1_RCR, BIT(0) },
+       [IMX8MQ_RESET_OTG2_PHY_RESET]           = { SRC_USBOPHY2_RCR, BIT(0) },
+       [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N]    = { SRC_MIPIPHY_RCR, BIT(1) },
+       [IMX8MQ_RESET_MIPI_DSI_RESET_N]         = { SRC_MIPIPHY_RCR, BIT(2) },
+       [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N]     = { SRC_MIPIPHY_RCR, BIT(3) },
+       [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N]     = { SRC_MIPIPHY_RCR, BIT(4) },
+       [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N]    = { SRC_MIPIPHY_RCR, BIT(5) },
+       [IMX8MQ_RESET_PCIEPHY]                  = { SRC_PCIEPHY_RCR,
+                                                   BIT(2) | BIT(1) },
+       [IMX8MQ_RESET_PCIEPHY_PERST]            = { SRC_PCIEPHY_RCR, BIT(3) },
+       [IMX8MQ_RESET_PCIE_CTRL_APPS_EN]        = { SRC_PCIEPHY_RCR, BIT(6) },
+       [IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF]   = { SRC_PCIEPHY_RCR, BIT(11) },
+       [IMX8MQ_RESET_HDMI_PHY_APB_RESET]       = { SRC_HDMI_RCR, BIT(0) },
+       [IMX8MQ_RESET_DISP_RESET]               = { SRC_DISP_RCR, BIT(0) },
+       [IMX8MQ_RESET_GPU_RESET]                = { SRC_GPU_RCR, BIT(0) },
+       [IMX8MQ_RESET_VPU_RESET]                = { SRC_VPU_RCR, BIT(0) },
+       [IMX8MQ_RESET_PCIEPHY2]                 = { SRC_PCIE2_RCR,
+                                                   BIT(2) | BIT(1) },
+       [IMX8MQ_RESET_PCIEPHY2_PERST]           = { SRC_PCIE2_RCR, BIT(3) },
+       [IMX8MQ_RESET_PCIE2_CTRL_APPS_EN]       = { SRC_PCIE2_RCR, BIT(6) },
+       [IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF]  = { SRC_PCIE2_RCR, BIT(11) },
+       [IMX8MQ_RESET_MIPI_CSI1_CORE_RESET]     = { SRC_MIPIPHY1_RCR, BIT(0) },
+       [IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET]  = { SRC_MIPIPHY1_RCR, BIT(1) },
+       [IMX8MQ_RESET_MIPI_CSI1_ESC_RESET]      = { SRC_MIPIPHY1_RCR, BIT(2) },
+       [IMX8MQ_RESET_MIPI_CSI2_CORE_RESET]     = { SRC_MIPIPHY2_RCR, BIT(0) },
+       [IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET]  = { SRC_MIPIPHY2_RCR, BIT(1) },
+       [IMX8MQ_RESET_MIPI_CSI2_ESC_RESET]      = { SRC_MIPIPHY2_RCR, BIT(2) },
+       [IMX8MQ_RESET_DDRC1_PRST]               = { SRC_DDRC_RCR, BIT(0) },
+       [IMX8MQ_RESET_DDRC1_CORE_RESET]         = { SRC_DDRC_RCR, BIT(1) },
+       [IMX8MQ_RESET_DDRC1_PHY_RESET]          = { SRC_DDRC_RCR, BIT(2) },
+       [IMX8MQ_RESET_DDRC2_PHY_RESET]          = { SRC_DDRC2_RCR, BIT(0) },
+       [IMX8MQ_RESET_DDRC2_CORE_RESET]         = { SRC_DDRC2_RCR, BIT(1) },
+       [IMX8MQ_RESET_DDRC2_PRST]               = { SRC_DDRC2_RCR, BIT(2) },
+};
+
+static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
+                           unsigned long id, bool assert)
+{
+       struct imx7_src *imx7src = to_imx7_src(rcdev);
+       const unsigned int bit = imx7src->signals[id].bit;
+       unsigned int value = assert ? bit : 0;
+
+       switch (id) {
+       case IMX8MQ_RESET_PCIEPHY:
+       case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */
+               /*
+                * wait for more than 10us to release phy g_rst and
+                * btnrst
+                */
+               if (!assert)
+                       udelay(10);
+               break;
+
+       case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
+       case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN:   /* fallthrough */
+       case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N:        /* fallthrough */
+       case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */
+       case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */
+       case IMX8MQ_RESET_MIPI_DSI_RESET_N:     /* fallthrough */
+       case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N:        /* fallthrough */
+               value = assert ? 0 : bit;
+               break;
+       }
+
+       return imx7_reset_update(imx7src, id, value);
+}
+
+static int imx8mq_reset_assert(struct reset_controller_dev *rcdev,
+                              unsigned long id)
+{
+       return imx8mq_reset_set(rcdev, id, true);
+}
+
+static int imx8mq_reset_deassert(struct reset_controller_dev *rcdev,
+                                unsigned long id)
+{
+       return imx8mq_reset_set(rcdev, id, false);
+}
+
+static const struct imx7_src_variant variant_imx8mq = {
+       .signals = imx8mq_src_signals,
+       .signals_num = ARRAY_SIZE(imx8mq_src_signals),
+       .ops = {
+               .assert   = imx8mq_reset_assert,
+               .deassert = imx8mq_reset_deassert,
+       },
 };
 
 static int imx7_reset_probe(struct platform_device *pdev)
@@ -125,11 +266,13 @@ static int imx7_reset_probe(struct platform_device *pdev)
        struct imx7_src *imx7src;
        struct device *dev = &pdev->dev;
        struct regmap_config config = { .name = "src" };
+       const struct imx7_src_variant *variant = of_device_get_match_data(dev);
 
        imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL);
        if (!imx7src)
                return -ENOMEM;
 
+       imx7src->signals = variant->signals;
        imx7src->regmap = syscon_node_to_regmap(dev->of_node);
        if (IS_ERR(imx7src->regmap)) {
                dev_err(dev, "Unable to get imx7-src regmap");
@@ -138,15 +281,16 @@ static int imx7_reset_probe(struct platform_device *pdev)
        regmap_attach_dev(dev, imx7src->regmap, &config);
 
        imx7src->rcdev.owner     = THIS_MODULE;
-       imx7src->rcdev.nr_resets = IMX7_RESET_NUM;
-       imx7src->rcdev.ops       = &imx7_reset_ops;
+       imx7src->rcdev.nr_resets = variant->signals_num;
+       imx7src->rcdev.ops       = &variant->ops;
        imx7src->rcdev.of_node   = dev->of_node;
 
        return devm_reset_controller_register(dev, &imx7src->rcdev);
 }
 
 static const struct of_device_id imx7_reset_dt_ids[] = {
-       { .compatible = "fsl,imx7d-src", },
+       { .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
+       { .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
        { /* sentinel */ },
 };
 
index 318cfc51c4419363e4135fc9ae6cbc8b4ae66e6b..96953992c2bb543112ee1a2e8ee963275aadb4a7 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/reset-controller.h>
+#include <linux/reset/socfpga.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
@@ -18,7 +19,6 @@
 #include "reset-simple.h"
 
 #define SOCFPGA_NR_BANKS       8
-void __init socfpga_reset_init(void);
 
 static int a10_reset_init(struct device_node *np)
 {
index db9a1a75523f420b7bab02d6c11e44159d2be194..b06d724d8f217be90593cb87215babb4413197d8 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/reset-controller.h>
+#include <linux/reset/sunxi.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
new file mode 100644 (file)
index 0000000..2ef1f13
--- /dev/null
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START)
+#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START
+
+struct zynqmp_reset_data {
+       struct reset_controller_dev rcdev;
+       const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+static inline struct zynqmp_reset_data *
+to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
+{
+       return container_of(rcdev, struct zynqmp_reset_data, rcdev);
+}
+
+static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
+                              unsigned long id)
+{
+       struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+       return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+                                           PM_RESET_ACTION_ASSERT);
+}
+
+static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
+                                unsigned long id)
+{
+       struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+       return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+                                           PM_RESET_ACTION_RELEASE);
+}
+
+static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
+                              unsigned long id)
+{
+       struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+       int val, err;
+
+       err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+       if (err)
+               return err;
+
+       return val;
+}
+
+static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+       return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+                                           PM_RESET_ACTION_PULSE);
+}
+
+static struct reset_control_ops zynqmp_reset_ops = {
+       .reset = zynqmp_reset_reset,
+       .assert = zynqmp_reset_assert,
+       .deassert = zynqmp_reset_deassert,
+       .status = zynqmp_reset_status,
+};
+
+static int zynqmp_reset_probe(struct platform_device *pdev)
+{
+       struct zynqmp_reset_data *priv;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, priv);
+
+       priv->eemi_ops = zynqmp_pm_get_eemi_ops();
+       if (!priv->eemi_ops)
+               return -ENXIO;
+
+       priv->rcdev.ops = &zynqmp_reset_ops;
+       priv->rcdev.owner = THIS_MODULE;
+       priv->rcdev.of_node = pdev->dev.of_node;
+       priv->rcdev.nr_resets = ZYNQMP_NR_RESETS;
+
+       return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id zynqmp_reset_dt_ids[] = {
+       { .compatible = "xlnx,zynqmp-reset", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver zynqmp_reset_driver = {
+       .probe  = zynqmp_reset_probe,
+       .driver = {
+               .name           = KBUILD_MODNAME,
+               .of_match_table = zynqmp_reset_dt_ids,
+       },
+};
+
+static int __init zynqmp_reset_init(void)
+{
+       return platform_driver_register(&zynqmp_reset_driver);
+}
+
+arch_initcall(zynqmp_reset_init);
index 194ffd5c8580401a13eefb3f85e61fdba2d5e7e6..039b2074db7e5d39a88aeed516630e62dfc05e4e 100644 (file)
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
 
 static void __ref sclp_cpu_change_notify(struct work_struct *work)
 {
+       lock_device_hotplug();
        smp_rescan_cpus();
+       unlock_device_hotplug();
 }
 
 static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
index 634ddb90e7aae6010eb3c6f231e68d343d17ab90..7e56a11836c18c30d4c1d430a3978277e0121e5b 100644 (file)
@@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                shost->max_sectors = (shost->sg_tablesize * 8) + 112;
        }
 
-       error = dma_set_max_seg_size(&pdev->dev,
-               (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
-                       (shost->max_sectors << 9) : 65536);
-       if (error)
-               goto out_deinit;
+       if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
+               shost->max_segment_size = shost->max_sectors << 9;
+       else
+               shost->max_segment_size = 65536;
 
        /*
         * Firmware printf works only with older firmware.
index 8a004036e3d72a666d3f49e22ee83539f165ba75..9bd2bd8dc2be24692c8ad72b13a1243721a5284c 100644 (file)
@@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
        }
 
        fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+       ln->fc_vport = fc_vport;
 
        if (csio_fcoe_alloc_vnp(hw, ln))
                goto error;
 
        *(struct csio_lnode **)fc_vport->dd_data = ln;
-       ln->fc_vport = fc_vport;
        if (!fc_vport->node_name)
                fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
        if (!fc_vport->port_name)
index 4c66b19e61996a80c6440211e78086a1b4257c10..8c9f7904222888251a010eb3ab392f4e78a25f34 100644 (file)
@@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
                         lport);
 
        /* release any threads waiting for the unreg to complete */
-       complete(&lport->lport_unreg_done);
+       if (lport->vport->localport)
+               complete(lport->lport_unreg_cmp);
 }
 
 /* lpfc_nvme_remoteport_delete
@@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
  */
 void
 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
-                          struct lpfc_nvme_lport *lport)
+                          struct lpfc_nvme_lport *lport,
+                          struct completion *lport_unreg_cmp)
 {
 #if (IS_ENABLED(CONFIG_NVME_FC))
        u32 wait_tmo;
@@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
         */
        wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
        while (true) {
-               ret = wait_for_completion_timeout(&lport->lport_unreg_done,
-                                                 wait_tmo);
+               ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
                if (unlikely(!ret)) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
                                         "6176 Lport %p Localport %p wait "
@@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
        struct lpfc_nvme_lport *lport;
        struct lpfc_nvme_ctrl_stat *cstat;
        int ret;
+       DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
 
        if (vport->nvmei_support == 0)
                return;
 
        localport = vport->localport;
-       vport->localport = NULL;
        lport = (struct lpfc_nvme_lport *)localport->private;
        cstat = lport->cstat;
 
@@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
        /* lport's rport list is clear.  Unregister
         * lport and release resources.
         */
-       init_completion(&lport->lport_unreg_done);
+       lport->lport_unreg_cmp = &lport_unreg_cmp;
        ret = nvme_fc_unregister_localport(localport);
 
        /* Wait for completion.  This either blocks
         * indefinitely or succeeds
         */
-       lpfc_nvme_lport_unreg_wait(vport, lport);
+       lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+       vport->localport = NULL;
        kfree(cstat);
 
        /* Regardless of the unregister upcall response, clear
index cfd4719be25c3d3eb35ae4ae5ea7e56d357d0d21..b234d02989942ba65f0a87db0aab71c9c1b7cbb2 100644 (file)
@@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
 /* Declare nvme-based local and remote port definitions. */
 struct lpfc_nvme_lport {
        struct lpfc_vport *vport;
-       struct completion lport_unreg_done;
+       struct completion *lport_unreg_cmp;
        /* Add stats counters here */
        struct lpfc_nvme_ctrl_stat *cstat;
        atomic_t fc4NvmeLsRequests;
index 6245f442d784bed3056de8c0830cd9e687b59a6a..95fee83090eb7bf03fa2f9ea2263e06b761e6ede 100644 (file)
@@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
        struct lpfc_nvmet_tgtport *tport = targetport->private;
 
        /* release any threads waiting for the unreg to complete */
-       complete(&tport->tport_unreg_done);
+       if (tport->phba->targetport)
+               complete(tport->tport_unreg_cmp);
 }
 
 static void
@@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
        struct lpfc_nvmet_tgtport *tgtp;
        struct lpfc_queue *wq;
        uint32_t qidx;
+       DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
 
        if (phba->nvmet_support == 0)
                return;
@@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
                        wq = phba->sli4_hba.nvme_wq[qidx];
                        lpfc_nvmet_wqfull_flush(phba, wq, NULL);
                }
-               init_completion(&tgtp->tport_unreg_done);
+               tgtp->tport_unreg_cmp = &tport_unreg_cmp;
                nvmet_fc_unregister_targetport(phba->targetport);
-               wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+               wait_for_completion_timeout(&tport_unreg_cmp, 5);
                lpfc_nvmet_cleanup_io_context(phba);
        }
        phba->targetport = NULL;
index 1aaff63f1f419209b08cdfa0c6e7141a2018854d..0ec1082ce7ef62dd503ce8086cbc47a56a5b9ec5 100644 (file)
@@ -34,7 +34,7 @@
 /* Used for NVME Target */
 struct lpfc_nvmet_tgtport {
        struct lpfc_hba *phba;
-       struct completion tport_unreg_done;
+       struct completion *tport_unreg_cmp;
 
        /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
        atomic_t rcv_ls_req_in;
index b13cc9288ba0d9db3fab88849a5fe4a86f6537ff..6d65ac584eba0178846b3b0fc6526f6c10ec8863 100644 (file)
@@ -1842,8 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
        blk_queue_segment_boundary(q, shost->dma_boundary);
        dma_set_seg_boundary(dev, shost->dma_boundary);
 
-       blk_queue_max_segment_size(q,
-               min(shost->max_segment_size, dma_get_max_seg_size(dev)));
+       blk_queue_max_segment_size(q, shost->max_segment_size);
+       dma_set_max_seg_size(dev, shost->max_segment_size);
 
        /*
         * Set a reasonable default alignment:  The larger of 32-byte (dword),
index 71334aaf14472c5ee4e4f2cc005b8c7430efbe34..2ddf24466a62e39e351bd90e4f7d24edfad5791f 100644 (file)
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
                     const char *prefix)
 {
-       u8 *regs;
+       u32 *regs;
+       size_t pos;
+
+       if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
+               return -EINVAL;
 
        regs = kzalloc(len, GFP_KERNEL);
        if (!regs)
                return -ENOMEM;
 
-       memcpy_fromio(regs, hba->mmio_base + offset, len);
+       for (pos = 0; pos < len; pos += 4)
+               regs[pos / 4] = ufshcd_readl(hba, offset + pos);
+
        ufshcd_hex_dump(prefix, regs, len);
        kfree(regs);
 
index fce33ca76bb6285d94e2c838bf8aa560f2cbb04e..be95a37c3fece881af20d4a42b5f066f6a2db756 100644 (file)
@@ -51,16 +51,30 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
 {
        struct device_node *canvas_node;
        struct platform_device *canvas_pdev;
+       struct meson_canvas *canvas;
 
        canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0);
        if (!canvas_node)
                return ERR_PTR(-ENODEV);
 
        canvas_pdev = of_find_device_by_node(canvas_node);
-       if (!canvas_pdev)
+       if (!canvas_pdev) {
+               of_node_put(canvas_node);
                return ERR_PTR(-EPROBE_DEFER);
+       }
+
+       of_node_put(canvas_node);
+
+       /*
+        * If priv is NULL, it's probably because the canvas hasn't
+        * properly initialized. Bail out with -EINVAL because, in the
+        * current state, this driver probe cannot return -EPROBE_DEFER
+        */
+       canvas = dev_get_drvdata(&canvas_pdev->dev);
+       if (!canvas)
+               return ERR_PTR(-EINVAL);
 
-       return dev_get_drvdata(&canvas_pdev->dev);
+       return canvas;
 }
 EXPORT_SYMBOL_GPL(meson_canvas_get);
 
index daea191a66fa57d1ec9bf2bfaccaae6c0901ba1b..19d4cbc93a17a36924a0259270ab1b509a7fabe7 100644 (file)
@@ -165,6 +165,194 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
        CLK_MSR_ID(82, "ge2d"),
 };
 
+static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+       CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+       CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+       CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+       CLK_MSR_ID(3, "a53_ring_osc"),
+       CLK_MSR_ID(4, "gp0_pll"),
+       CLK_MSR_ID(5, "gp1_pll"),
+       CLK_MSR_ID(7, "clk81"),
+       CLK_MSR_ID(9, "encl"),
+       CLK_MSR_ID(17, "sys_pll_div16"),
+       CLK_MSR_ID(18, "sys_cpu_div16"),
+       CLK_MSR_ID(20, "rtc_osc_out"),
+       CLK_MSR_ID(23, "mmc_clk"),
+       CLK_MSR_ID(28, "sar_adc"),
+       CLK_MSR_ID(31, "mpll_test_out"),
+       CLK_MSR_ID(40, "mod_eth_tx_clk"),
+       CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"),
+       CLK_MSR_ID(42, "mp0_out"),
+       CLK_MSR_ID(43, "fclk_div5"),
+       CLK_MSR_ID(44, "pwm_b"),
+       CLK_MSR_ID(45, "pwm_a"),
+       CLK_MSR_ID(46, "vpu"),
+       CLK_MSR_ID(47, "ddr_dpll_pt"),
+       CLK_MSR_ID(48, "mp1_out"),
+       CLK_MSR_ID(49, "mp2_out"),
+       CLK_MSR_ID(50, "mp3_out"),
+       CLK_MSR_ID(51, "sd_emmm_c"),
+       CLK_MSR_ID(52, "sd_emmc_b"),
+       CLK_MSR_ID(61, "gpio_msr"),
+       CLK_MSR_ID(66, "audio_slv_lrclk_c"),
+       CLK_MSR_ID(67, "audio_slv_lrclk_b"),
+       CLK_MSR_ID(68, "audio_slv_lrclk_a"),
+       CLK_MSR_ID(69, "audio_slv_sclk_c"),
+       CLK_MSR_ID(70, "audio_slv_sclk_b"),
+       CLK_MSR_ID(71, "audio_slv_sclk_a"),
+       CLK_MSR_ID(72, "pwm_d"),
+       CLK_MSR_ID(73, "pwm_c"),
+       CLK_MSR_ID(74, "wifi_beacon"),
+       CLK_MSR_ID(75, "tdmin_lb_lrcl"),
+       CLK_MSR_ID(76, "tdmin_lb_sclk"),
+       CLK_MSR_ID(77, "rng_ring_osc_0"),
+       CLK_MSR_ID(78, "rng_ring_osc_1"),
+       CLK_MSR_ID(79, "rng_ring_osc_2"),
+       CLK_MSR_ID(80, "rng_ring_osc_3"),
+       CLK_MSR_ID(81, "vapb"),
+       CLK_MSR_ID(82, "ge2d"),
+       CLK_MSR_ID(84, "audio_resample"),
+       CLK_MSR_ID(85, "audio_pdm_sys"),
+       CLK_MSR_ID(86, "audio_spdifout"),
+       CLK_MSR_ID(87, "audio_spdifin"),
+       CLK_MSR_ID(88, "audio_lrclk_f"),
+       CLK_MSR_ID(89, "audio_lrclk_e"),
+       CLK_MSR_ID(90, "audio_lrclk_d"),
+       CLK_MSR_ID(91, "audio_lrclk_c"),
+       CLK_MSR_ID(92, "audio_lrclk_b"),
+       CLK_MSR_ID(93, "audio_lrclk_a"),
+       CLK_MSR_ID(94, "audio_sclk_f"),
+       CLK_MSR_ID(95, "audio_sclk_e"),
+       CLK_MSR_ID(96, "audio_sclk_d"),
+       CLK_MSR_ID(97, "audio_sclk_c"),
+       CLK_MSR_ID(98, "audio_sclk_b"),
+       CLK_MSR_ID(99, "audio_sclk_a"),
+       CLK_MSR_ID(100, "audio_mclk_f"),
+       CLK_MSR_ID(101, "audio_mclk_e"),
+       CLK_MSR_ID(102, "audio_mclk_d"),
+       CLK_MSR_ID(103, "audio_mclk_c"),
+       CLK_MSR_ID(104, "audio_mclk_b"),
+       CLK_MSR_ID(105, "audio_mclk_a"),
+       CLK_MSR_ID(106, "pcie_refclk_n"),
+       CLK_MSR_ID(107, "pcie_refclk_p"),
+       CLK_MSR_ID(108, "audio_locker_out"),
+       CLK_MSR_ID(109, "audio_locker_in"),
+};
+
+static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+       CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+       CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+       CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+       CLK_MSR_ID(3, "sys_cpu_ring_osc"),
+       CLK_MSR_ID(4, "gp0_pll"),
+       CLK_MSR_ID(6, "enci"),
+       CLK_MSR_ID(7, "clk81"),
+       CLK_MSR_ID(8, "encp"),
+       CLK_MSR_ID(9, "encl"),
+       CLK_MSR_ID(10, "vdac"),
+       CLK_MSR_ID(11, "eth_tx"),
+       CLK_MSR_ID(12, "hifi_pll"),
+       CLK_MSR_ID(13, "mod_tcon"),
+       CLK_MSR_ID(14, "fec_0"),
+       CLK_MSR_ID(15, "fec_1"),
+       CLK_MSR_ID(16, "fec_2"),
+       CLK_MSR_ID(17, "sys_pll_div16"),
+       CLK_MSR_ID(18, "sys_cpu_div16"),
+       CLK_MSR_ID(19, "lcd_an_ph2"),
+       CLK_MSR_ID(20, "rtc_osc_out"),
+       CLK_MSR_ID(21, "lcd_an_ph3"),
+       CLK_MSR_ID(22, "eth_phy_ref"),
+       CLK_MSR_ID(23, "mpll_50m"),
+       CLK_MSR_ID(24, "eth_125m"),
+       CLK_MSR_ID(25, "eth_rmii"),
+       CLK_MSR_ID(26, "sc_int"),
+       CLK_MSR_ID(27, "in_mac"),
+       CLK_MSR_ID(28, "sar_adc"),
+       CLK_MSR_ID(29, "pcie_inp"),
+       CLK_MSR_ID(30, "pcie_inn"),
+       CLK_MSR_ID(31, "mpll_test_out"),
+       CLK_MSR_ID(32, "vdec"),
+       CLK_MSR_ID(33, "sys_cpu_ring_osc_1"),
+       CLK_MSR_ID(34, "eth_mpll_50m"),
+       CLK_MSR_ID(35, "mali"),
+       CLK_MSR_ID(36, "hdmi_tx_pixel"),
+       CLK_MSR_ID(37, "cdac"),
+       CLK_MSR_ID(38, "vdin_meas"),
+       CLK_MSR_ID(39, "bt656"),
+       CLK_MSR_ID(41, "eth_rx_or_rmii"),
+       CLK_MSR_ID(42, "mp0_out"),
+       CLK_MSR_ID(43, "fclk_div5"),
+       CLK_MSR_ID(44, "pwm_b"),
+       CLK_MSR_ID(45, "pwm_a"),
+       CLK_MSR_ID(46, "vpu"),
+       CLK_MSR_ID(47, "ddr_dpll_pt"),
+       CLK_MSR_ID(48, "mp1_out"),
+       CLK_MSR_ID(49, "mp2_out"),
+       CLK_MSR_ID(50, "mp3_out"),
+       CLK_MSR_ID(51, "sd_emmc_c"),
+       CLK_MSR_ID(52, "sd_emmc_b"),
+       CLK_MSR_ID(53, "sd_emmc_a"),
+       CLK_MSR_ID(54, "vpu_clkc"),
+       CLK_MSR_ID(55, "vid_pll_div_out"),
+       CLK_MSR_ID(56, "wave420l_a"),
+       CLK_MSR_ID(57, "wave420l_c"),
+       CLK_MSR_ID(58, "wave420l_b"),
+       CLK_MSR_ID(59, "hcodec"),
+       CLK_MSR_ID(61, "gpio_msr"),
+       CLK_MSR_ID(62, "hevcb"),
+       CLK_MSR_ID(63, "dsi_meas"),
+       CLK_MSR_ID(64, "spicc_1"),
+       CLK_MSR_ID(65, "spicc_0"),
+       CLK_MSR_ID(66, "vid_lock"),
+       CLK_MSR_ID(67, "dsi_phy"),
+       CLK_MSR_ID(68, "hdcp22_esm"),
+       CLK_MSR_ID(69, "hdcp22_skp"),
+       CLK_MSR_ID(70, "pwm_f"),
+       CLK_MSR_ID(71, "pwm_e"),
+       CLK_MSR_ID(72, "pwm_d"),
+       CLK_MSR_ID(73, "pwm_c"),
+       CLK_MSR_ID(75, "hevcf"),
+       CLK_MSR_ID(77, "rng_ring_osc_0"),
+       CLK_MSR_ID(78, "rng_ring_osc_1"),
+       CLK_MSR_ID(79, "rng_ring_osc_2"),
+       CLK_MSR_ID(80, "rng_ring_osc_3"),
+       CLK_MSR_ID(81, "vapb"),
+       CLK_MSR_ID(82, "ge2d"),
+       CLK_MSR_ID(83, "co_rx"),
+       CLK_MSR_ID(84, "co_tx"),
+       CLK_MSR_ID(89, "hdmi_todig"),
+       CLK_MSR_ID(90, "hdmitx_sys"),
+       CLK_MSR_ID(94, "eth_phy_rx"),
+       CLK_MSR_ID(95, "eth_phy_pll"),
+       CLK_MSR_ID(96, "vpu_b"),
+       CLK_MSR_ID(97, "cpu_b_tmp"),
+       CLK_MSR_ID(98, "ts"),
+       CLK_MSR_ID(99, "ring_osc_out_ee_3"),
+       CLK_MSR_ID(100, "ring_osc_out_ee_4"),
+       CLK_MSR_ID(101, "ring_osc_out_ee_5"),
+       CLK_MSR_ID(102, "ring_osc_out_ee_6"),
+       CLK_MSR_ID(103, "ring_osc_out_ee_7"),
+       CLK_MSR_ID(104, "ring_osc_out_ee_8"),
+       CLK_MSR_ID(105, "ring_osc_out_ee_9"),
+       CLK_MSR_ID(106, "ephy_test"),
+       CLK_MSR_ID(107, "au_dac_g128x"),
+       CLK_MSR_ID(108, "audio_locker_out"),
+       CLK_MSR_ID(109, "audio_locker_in"),
+       CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+       CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+       CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+       CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+       CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+       CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+       CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+       CLK_MSR_ID(117, "audio_resample"),
+       CLK_MSR_ID(118, "audio_pdm_sys"),
+       CLK_MSR_ID(119, "audio_spdifout_b"),
+       CLK_MSR_ID(120, "audio_spdifout"),
+       CLK_MSR_ID(121, "audio_spdifin"),
+       CLK_MSR_ID(122, "audio_pdm_dclk"),
+};
+
 static int meson_measure_id(struct meson_msr_id *clk_msr_id,
                               unsigned int duration)
 {
@@ -337,6 +525,14 @@ static const struct of_device_id meson_msr_match_table[] = {
                .compatible = "amlogic,meson8b-clk-measure",
                .data = (void *)clk_msr_m8,
        },
+       {
+               .compatible = "amlogic,meson-axg-clk-measure",
+               .data = (void *)clk_msr_axg,
+       },
+       {
+               .compatible = "amlogic,meson-g12a-clk-measure",
+               .data = (void *)clk_msr_g12a,
+       },
        { /* sentinel */ }
 };
 
index 055a845ed979bbb24f0dff26ecfe5324b797c932..03fa91fbe2dac0c1fab6ea9687f151a715e215d7 100644 (file)
@@ -1,5 +1,17 @@
 menu "Broadcom SoC drivers"
 
+config BCM2835_POWER
+       bool "BCM2835 power domain driver"
+       depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+       default y if ARCH_BCM2835
+       select PM_GENERIC_DOMAINS if PM
+       select RESET_CONTROLLER
+       help
+         This enables support for the BCM2835 power domains and reset
+         controller.  Any usage of power domains by the Raspberry Pi
+         firmware means that Linux usage of the same power domain
+         must be accessed using the RASPBERRYPI_POWER driver
+
 config RASPBERRYPI_POWER
        bool "Raspberry Pi power domain driver"
        depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
index dc4fced72d21fcff4633d600c60c5c18121c9908..c81df4b2403c5500db8c9b94583c5b3b68ce5f6d 100644 (file)
@@ -1,2 +1,3 @@
+obj-$(CONFIG_BCM2835_POWER)    += bcm2835-power.o
 obj-$(CONFIG_RASPBERRYPI_POWER)        += raspberrypi-power.o
 obj-$(CONFIG_SOC_BRCMSTB)      += brcmstb/
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
new file mode 100644 (file)
index 0000000..9351349
--- /dev/null
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Power domain driver for Broadcom BCM2835
+ *
+ * Copyright (C) 2018 Broadcom
+ */
+
+#include <dt-bindings/soc/bcm2835-pm.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define PM_GNRIC                        0x00
+#define PM_AUDIO                        0x04
+#define PM_STATUS                       0x18
+#define PM_RSTC                                0x1c
+#define PM_RSTS                                0x20
+#define PM_WDOG                                0x24
+#define PM_PADS0                       0x28
+#define PM_PADS2                       0x2c
+#define PM_PADS3                       0x30
+#define PM_PADS4                       0x34
+#define PM_PADS5                       0x38
+#define PM_PADS6                       0x3c
+#define PM_CAM0                                0x44
+#define PM_CAM0_LDOHPEN                        BIT(2)
+#define PM_CAM0_LDOLPEN                        BIT(1)
+#define PM_CAM0_CTRLEN                 BIT(0)
+
+#define PM_CAM1                                0x48
+#define PM_CAM1_LDOHPEN                        BIT(2)
+#define PM_CAM1_LDOLPEN                        BIT(1)
+#define PM_CAM1_CTRLEN                 BIT(0)
+
+#define PM_CCP2TX                      0x4c
+#define PM_CCP2TX_LDOEN                        BIT(1)
+#define PM_CCP2TX_CTRLEN               BIT(0)
+
+#define PM_DSI0                                0x50
+#define PM_DSI0_LDOHPEN                        BIT(2)
+#define PM_DSI0_LDOLPEN                        BIT(1)
+#define PM_DSI0_CTRLEN                 BIT(0)
+
+#define PM_DSI1                                0x54
+#define PM_DSI1_LDOHPEN                        BIT(2)
+#define PM_DSI1_LDOLPEN                        BIT(1)
+#define PM_DSI1_CTRLEN                 BIT(0)
+
+#define PM_HDMI                                0x58
+#define PM_HDMI_RSTDR                  BIT(19)
+#define PM_HDMI_LDOPD                  BIT(1)
+#define PM_HDMI_CTRLEN                 BIT(0)
+
+#define PM_USB                         0x5c
+/* The power gates must be enabled with this bit before enabling the LDO in the
+ * USB block.
+ */
+#define PM_USB_CTRLEN                  BIT(0)
+
+#define PM_PXLDO                       0x60
+#define PM_PXBG                                0x64
+#define PM_DFT                         0x68
+#define PM_SMPS                                0x6c
+#define PM_XOSC                                0x70
+#define PM_SPAREW                      0x74
+#define PM_SPARER                      0x78
+#define PM_AVS_RSTDR                   0x7c
+#define PM_AVS_STAT                    0x80
+#define PM_AVS_EVENT                   0x84
+#define PM_AVS_INTEN                   0x88
+#define PM_DUMMY                       0xfc
+
+#define PM_IMAGE                       0x108
+#define PM_GRAFX                       0x10c
+#define PM_PROC                                0x110
+#define PM_ENAB                                BIT(12)
+#define PM_ISPRSTN                     BIT(8)
+#define PM_H264RSTN                    BIT(7)
+#define PM_PERIRSTN                    BIT(6)
+#define PM_V3DRSTN                     BIT(6)
+#define PM_ISFUNC                      BIT(5)
+#define PM_MRDONE                      BIT(4)
+#define PM_MEMREP                      BIT(3)
+#define PM_ISPOW                       BIT(2)
+#define PM_POWOK                       BIT(1)
+#define PM_POWUP                       BIT(0)
+#define PM_INRUSH_SHIFT                        13
+#define PM_INRUSH_3_5_MA               0
+#define PM_INRUSH_5_MA                 1
+#define PM_INRUSH_10_MA                        2
+#define PM_INRUSH_20_MA                        3
+#define PM_INRUSH_MASK                 (3 << PM_INRUSH_SHIFT)
+
+#define PM_PASSWORD                    0x5a000000
+
+#define PM_WDOG_TIME_SET               0x000fffff
+#define PM_RSTC_WRCFG_CLR              0xffffffcf
+#define PM_RSTS_HADWRH_SET             0x00000040
+#define PM_RSTC_WRCFG_SET              0x00000030
+#define PM_RSTC_WRCFG_FULL_RESET       0x00000020
+#define PM_RSTC_RESET                  0x00000102
+
+#define PM_READ(reg) readl(power->base + (reg))
+#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg))
+
+#define ASB_BRDG_VERSION                0x00
+#define ASB_CPR_CTRL                    0x04
+
+#define ASB_V3D_S_CTRL                 0x08
+#define ASB_V3D_M_CTRL                 0x0c
+#define ASB_ISP_S_CTRL                 0x10
+#define ASB_ISP_M_CTRL                 0x14
+#define ASB_H264_S_CTRL                        0x18
+#define ASB_H264_M_CTRL                        0x1c
+
+#define ASB_REQ_STOP                    BIT(0)
+#define ASB_ACK                         BIT(1)
+#define ASB_EMPTY                       BIT(2)
+#define ASB_FULL                        BIT(3)
+
+#define ASB_AXI_BRDG_ID                        0x20
+
+#define ASB_READ(reg) readl(power->asb + (reg))
+#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg))
+
+struct bcm2835_power_domain {
+       struct generic_pm_domain base;
+       struct bcm2835_power *power;
+       u32 domain;
+       struct clk *clk;
+};
+
+struct bcm2835_power {
+       struct device           *dev;
+       /* PM registers. */
+       void __iomem            *base;
+       /* AXI Async bridge registers. */
+       void __iomem            *asb;
+
+       struct genpd_onecell_data pd_xlate;
+       struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
+       struct reset_controller_dev reset;
+};
+
+static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
+{
+       u64 start = ktime_get_ns();
+
+       /* Enable the module's async AXI bridges. */
+       ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
+       while (ASB_READ(reg) & ASB_ACK) {
+               cpu_relax();
+               if (ktime_get_ns() - start >= 1000)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+{
+       u64 start = ktime_get_ns();
+
+       /* Enable the module's async AXI bridges. */
+       ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
+       while (!(ASB_READ(reg) & ASB_ACK)) {
+               cpu_relax();
+               if (ktime_get_ns() - start >= 1000)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+       struct bcm2835_power *power = pd->power;
+
+       /* Enable functional isolation */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
+
+       /* Enable electrical isolation */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+
+       /* Open the power switches. */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP);
+
+       return 0;
+}
+
+static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+       struct bcm2835_power *power = pd->power;
+       struct device *dev = power->dev;
+       u64 start;
+       int ret;
+       int inrush;
+       bool powok;
+
+       /* If it was already powered on by the fw, leave it that way. */
+       if (PM_READ(pm_reg) & PM_POWUP)
+               return 0;
+
+       /* Enable power.  Allowing too much current at once may result
+        * in POWOK never getting set, so start low and ramp it up as
+        * necessary to succeed.
+        */
+       powok = false;
+       for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) {
+               PM_WRITE(pm_reg,
+                        (PM_READ(pm_reg) & ~PM_INRUSH_MASK) |
+                        (inrush << PM_INRUSH_SHIFT) |
+                        PM_POWUP);
+
+               start = ktime_get_ns();
+               while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) {
+                       cpu_relax();
+                       if (ktime_get_ns() - start >= 3000)
+                               break;
+               }
+       }
+       if (!powok) {
+               dev_err(dev, "Timeout waiting for %s power OK\n",
+                       pd->base.name);
+               ret = -ETIMEDOUT;
+               goto err_disable_powup;
+       }
+
+       /* Disable electrical isolation */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW);
+
+       /* Repair memory */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP);
+       start = ktime_get_ns();
+       while (!(PM_READ(pm_reg) & PM_MRDONE)) {
+               cpu_relax();
+               if (ktime_get_ns() - start >= 1000) {
+                       dev_err(dev, "Timeout waiting for %s memory repair\n",
+                               pd->base.name);
+                       ret = -ETIMEDOUT;
+                       goto err_disable_ispow;
+               }
+       }
+
+       /* Disable functional isolation */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC);
+
+       return 0;
+
+err_disable_ispow:
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+err_disable_powup:
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK));
+       return ret;
+}
+
+static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd,
+                               u32 pm_reg,
+                               u32 asb_m_reg,
+                               u32 asb_s_reg,
+                               u32 reset_flags)
+{
+       struct bcm2835_power *power = pd->power;
+       int ret;
+
+       ret = clk_prepare_enable(pd->clk);
+       if (ret) {
+               dev_err(power->dev, "Failed to enable clock for %s\n",
+                       pd->base.name);
+               return ret;
+       }
+
+       /* Wait 32 clocks for reset to propagate, 1 us will be enough */
+       udelay(1);
+
+       clk_disable_unprepare(pd->clk);
+
+       /* Deassert the resets. */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags);
+
+       ret = clk_prepare_enable(pd->clk);
+       if (ret) {
+               dev_err(power->dev, "Failed to enable clock for %s\n",
+                       pd->base.name);
+               goto err_enable_resets;
+       }
+
+       ret = bcm2835_asb_enable(power, asb_m_reg);
+       if (ret) {
+               dev_err(power->dev, "Failed to enable ASB master for %s\n",
+                       pd->base.name);
+               goto err_disable_clk;
+       }
+       ret = bcm2835_asb_enable(power, asb_s_reg);
+       if (ret) {
+               dev_err(power->dev, "Failed to enable ASB slave for %s\n",
+                       pd->base.name);
+               goto err_disable_asb_master;
+       }
+
+       return 0;
+
+err_disable_asb_master:
+       bcm2835_asb_disable(power, asb_m_reg);
+err_disable_clk:
+       clk_disable_unprepare(pd->clk);
+err_enable_resets:
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+       return ret;
+}
+
+static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd,
+                                u32 pm_reg,
+                                u32 asb_m_reg,
+                                u32 asb_s_reg,
+                                u32 reset_flags)
+{
+       struct bcm2835_power *power = pd->power;
+       int ret;
+
+       ret = bcm2835_asb_disable(power, asb_s_reg);
+       if (ret) {
+               dev_warn(power->dev, "Failed to disable ASB slave for %s\n",
+                        pd->base.name);
+               return ret;
+       }
+       ret = bcm2835_asb_disable(power, asb_m_reg);
+       if (ret) {
+               dev_warn(power->dev, "Failed to disable ASB master for %s\n",
+                        pd->base.name);
+               bcm2835_asb_enable(power, asb_s_reg);
+               return ret;
+       }
+
+       clk_disable_unprepare(pd->clk);
+
+       /* Assert the resets. */
+       PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+
+       return 0;
+}
+
+static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain)
+{
+       struct bcm2835_power_domain *pd =
+               container_of(domain, struct bcm2835_power_domain, base);
+       struct bcm2835_power *power = pd->power;
+
+       switch (pd->domain) {
+       case BCM2835_POWER_DOMAIN_GRAFX:
+               return bcm2835_power_power_on(pd, PM_GRAFX);
+
+       case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+               return bcm2835_asb_power_on(pd, PM_GRAFX,
+                                           ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+                                           PM_V3DRSTN);
+
+       case BCM2835_POWER_DOMAIN_IMAGE:
+               return bcm2835_power_power_on(pd, PM_IMAGE);
+
+       case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+               return bcm2835_asb_power_on(pd, PM_IMAGE,
+                                           0, 0,
+                                           PM_PERIRSTN);
+
+       case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+               return bcm2835_asb_power_on(pd, PM_IMAGE,
+                                           ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+                                           PM_ISPRSTN);
+
+       case BCM2835_POWER_DOMAIN_IMAGE_H264:
+               return bcm2835_asb_power_on(pd, PM_IMAGE,
+                                           ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+                                           PM_H264RSTN);
+
+       case BCM2835_POWER_DOMAIN_USB:
+               PM_WRITE(PM_USB, PM_USB_CTRLEN);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_DSI0:
+               PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+               PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_DSI1:
+               PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+               PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_CCP2TX:
+               PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+               PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_HDMI:
+               PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR);
+               PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN);
+               PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD);
+               usleep_range(100, 200);
+               PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR);
+               return 0;
+
+       default:
+               dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+               return -EINVAL;
+       }
+}
+
+static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
+{
+       struct bcm2835_power_domain *pd =
+               container_of(domain, struct bcm2835_power_domain, base);
+       struct bcm2835_power *power = pd->power;
+
+       switch (pd->domain) {
+       case BCM2835_POWER_DOMAIN_GRAFX:
+               return bcm2835_power_power_off(pd, PM_GRAFX);
+
+       case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+               return bcm2835_asb_power_off(pd, PM_GRAFX,
+                                            ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+                                            PM_V3DRSTN);
+
+       case BCM2835_POWER_DOMAIN_IMAGE:
+               return bcm2835_power_power_off(pd, PM_IMAGE);
+
+       case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+               return bcm2835_asb_power_off(pd, PM_IMAGE,
+                                            0, 0,
+                                            PM_PERIRSTN);
+
+       case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+               return bcm2835_asb_power_off(pd, PM_IMAGE,
+                                            ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+                                            PM_ISPRSTN);
+
+       case BCM2835_POWER_DOMAIN_IMAGE_H264:
+               return bcm2835_asb_power_off(pd, PM_IMAGE,
+                                            ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+                                            PM_H264RSTN);
+
+       case BCM2835_POWER_DOMAIN_USB:
+               PM_WRITE(PM_USB, 0);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_DSI0:
+               PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+               PM_WRITE(PM_DSI0, 0);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_DSI1:
+               PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+               PM_WRITE(PM_DSI1, 0);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_CCP2TX:
+               PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+               PM_WRITE(PM_CCP2TX, 0);
+               return 0;
+
+       case BCM2835_POWER_DOMAIN_HDMI:
+               PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD);
+               PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN);
+               return 0;
+
+       default:
+               dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+               return -EINVAL;
+       }
+}
+
+static void
+bcm2835_init_power_domain(struct bcm2835_power *power,
+                         int pd_xlate_index, const char *name)
+{
+       struct device *dev = power->dev;
+       struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
+
+       dom->clk = devm_clk_get(dev->parent, name);
+
+       dom->base.name = name;
+       dom->base.power_on = bcm2835_power_pd_power_on;
+       dom->base.power_off = bcm2835_power_pd_power_off;
+
+       dom->domain = pd_xlate_index;
+       dom->power = power;
+
+       /* XXX: on/off at boot? */
+       pm_genpd_init(&dom->base, NULL, true);
+
+       power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+}
+
+/** bcm2835_reset_reset - Resets a block that has a reset line in the
+ * PM block.
+ *
+ * The consumer of the reset controller must have the power domain up
+ * -- there's no reset ability with the power domain down.  To reset
+ * the sub-block, we just disable its access to memory through the
+ * ASB, reset, and re-enable.
+ */
+static int bcm2835_reset_reset(struct reset_controller_dev *rcdev,
+                              unsigned long id)
+{
+       struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+                                                  reset);
+       struct bcm2835_power_domain *pd;
+       int ret;
+
+       switch (id) {
+       case BCM2835_RESET_V3D:
+               pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D];
+               break;
+       case BCM2835_RESET_H264:
+               pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264];
+               break;
+       case BCM2835_RESET_ISP:
+               pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP];
+               break;
+       default:
+               dev_err(power->dev, "Bad reset id %ld\n", id);
+               return -EINVAL;
+       }
+
+       ret = bcm2835_power_pd_power_off(&pd->base);
+       if (ret)
+               return ret;
+
+       return bcm2835_power_pd_power_on(&pd->base);
+}
+
+static int bcm2835_reset_status(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+                                                  reset);
+
+       switch (id) {
+       case BCM2835_RESET_V3D:
+               return !PM_READ(PM_GRAFX & PM_V3DRSTN);
+       case BCM2835_RESET_H264:
+               return !PM_READ(PM_IMAGE & PM_H264RSTN);
+       case BCM2835_RESET_ISP:
+               return !PM_READ(PM_IMAGE & PM_ISPRSTN);
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct reset_control_ops bcm2835_reset_ops = {
+       .reset = bcm2835_reset_reset,
+       .status = bcm2835_reset_status,
+};
+
+static const char *const power_domain_names[] = {
+       [BCM2835_POWER_DOMAIN_GRAFX] = "grafx",
+       [BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d",
+
+       [BCM2835_POWER_DOMAIN_IMAGE] = "image",
+       [BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image",
+       [BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264",
+       [BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp",
+
+       [BCM2835_POWER_DOMAIN_USB] = "usb",
+       [BCM2835_POWER_DOMAIN_DSI0] = "dsi0",
+       [BCM2835_POWER_DOMAIN_DSI1] = "dsi1",
+       [BCM2835_POWER_DOMAIN_CAM0] = "cam0",
+       [BCM2835_POWER_DOMAIN_CAM1] = "cam1",
+       [BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx",
+       [BCM2835_POWER_DOMAIN_HDMI] = "hdmi",
+};
+
+static int bcm2835_power_probe(struct platform_device *pdev)
+{
+       struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
+       struct device *dev = &pdev->dev;
+       struct bcm2835_power *power;
+       static const struct {
+               int parent, child;
+       } domain_deps[] = {
+               { BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D },
+               { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI },
+               { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 },
+               { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP },
+               { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB },
+               { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
+               { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
+       };
+       int ret, i;
+       u32 id;
+
+       power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
+       if (!power)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, power);
+
+       power->dev = dev;
+       power->base = pm->base;
+       power->asb = pm->asb;
+
+       id = ASB_READ(ASB_AXI_BRDG_ID);
+       if (id != 0x62726467 /* "BRDG" */) {
+               dev_err(dev, "ASB register ID returned 0x%08x\n", id);
+               return -ENODEV;
+       }
+
+       power->pd_xlate.domains = devm_kcalloc(dev,
+                                              ARRAY_SIZE(power_domain_names),
+                                              sizeof(*power->pd_xlate.domains),
+                                              GFP_KERNEL);
+       if (!power->pd_xlate.domains)
+               return -ENOMEM;
+
+       power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
+
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
+               bcm2835_init_power_domain(power, i, power_domain_names[i]);
+
+       for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
+               pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
+                                      &power->domains[domain_deps[i].child].base);
+       }
+
+       power->reset.owner = THIS_MODULE;
+       power->reset.nr_resets = BCM2835_RESET_COUNT;
+       power->reset.ops = &bcm2835_reset_ops;
+       power->reset.of_node = dev->parent->of_node;
+
+       ret = devm_reset_controller_register(dev, &power->reset);
+       if (ret)
+               return ret;
+
+       of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
+
+       dev_info(dev, "Broadcom BCM2835 power domains driver");
+       return 0;
+}
+
+static int bcm2835_power_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static struct platform_driver bcm2835_power_driver = {
+       .probe          = bcm2835_power_probe,
+       .remove         = bcm2835_power_remove,
+       .driver = {
+               .name = "bcm2835-power",
+       },
+};
+module_platform_driver(bcm2835_power_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset");
+MODULE_LICENSE("GPL");
index ab8f82ee7ee58a0b67ff4174d9344f6fc707ac38..5814d2f395a404c5bf79b305c2377569e6f162c5 100644 (file)
@@ -25,6 +25,7 @@
 #define DPIO_CMDID_ENABLE                              DPIO_CMD(0x002)
 #define DPIO_CMDID_DISABLE                             DPIO_CMD(0x003)
 #define DPIO_CMDID_GET_ATTR                            DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET                               DPIO_CMD(0x005)
 
 struct dpio_cmd_open {
        __le32 dpio_id;
index e58fcc9096e82f7527d0293942b2c77d6a0e9b97..a28799b62d536254350985807c42c277c3fb3d7f 100644 (file)
@@ -30,6 +30,8 @@ struct dpio_priv {
        struct dpaa2_io *io;
 };
 
+static cpumask_var_t cpus_unused_mask;
+
 static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
 {
        struct device *dev = (struct device *)arg;
@@ -86,7 +88,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
        struct dpio_priv *priv;
        int err = -ENOMEM;
        struct device *dev = &dpio_dev->dev;
-       static int next_cpu = -1;
+       int possible_next_cpu;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -108,6 +110,12 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
                goto err_open;
        }
 
+       err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+       if (err) {
+               dev_err(dev, "dpio_reset() failed\n");
+               goto err_reset;
+       }
+
        err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
                                  &dpio_attrs);
        if (err) {
@@ -128,17 +136,14 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
        desc.dpio_id = dpio_dev->obj_desc.id;
 
        /* get the cpu to use for the affinity hint */
-       if (next_cpu == -1)
-               next_cpu = cpumask_first(cpu_online_mask);
-       else
-               next_cpu = cpumask_next(next_cpu, cpu_online_mask);
-
-       if (!cpu_possible(next_cpu)) {
+       possible_next_cpu = cpumask_first(cpus_unused_mask);
+       if (possible_next_cpu >= nr_cpu_ids) {
                dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
                err = -ERANGE;
                goto err_allocate_irqs;
        }
-       desc.cpu = next_cpu;
+       desc.cpu = possible_next_cpu;
+       cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
 
        /*
         * Set the CENA regs to be the cache inhibited area of the portal to
@@ -171,7 +176,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
        if (err)
                goto err_register_dpio_irq;
 
-       priv->io = dpaa2_io_create(&desc);
+       priv->io = dpaa2_io_create(&desc, dev);
        if (!priv->io) {
                dev_err(dev, "dpaa2_io_create failed\n");
                err = -ENOMEM;
@@ -182,7 +187,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
        dev_dbg(dev, "   receives_notifications = %d\n",
                desc.receives_notifications);
        dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-       fsl_mc_portal_free(dpio_dev->mc_io);
 
        return 0;
 
@@ -193,6 +197,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
 err_allocate_irqs:
        dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
 err_get_attr:
+err_reset:
        dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
 err_open:
        fsl_mc_portal_free(dpio_dev->mc_io);
@@ -211,20 +216,17 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
 {
        struct device *dev;
        struct dpio_priv *priv;
-       int err;
+       int err = 0, cpu;
 
        dev = &dpio_dev->dev;
        priv = dev_get_drvdata(dev);
+       cpu = dpaa2_io_get_cpu(priv->io);
 
        dpaa2_io_down(priv->io);
 
        dpio_teardown_irqs(dpio_dev);
 
-       err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
-       if (err) {
-               dev_err(dev, "MC portal allocation failed\n");
-               goto err_mcportal;
-       }
+       cpumask_set_cpu(cpu, cpus_unused_mask);
 
        err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
                        &dpio_dev->mc_handle);
@@ -243,7 +245,7 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
 
 err_open:
        fsl_mc_portal_free(dpio_dev->mc_io);
-err_mcportal:
+
        return err;
 }
 
@@ -267,11 +269,16 @@ static struct fsl_mc_driver dpaa2_dpio_driver = {
 
 static int dpio_driver_init(void)
 {
+       if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL))
+               return -ENOMEM;
+       cpumask_copy(cpus_unused_mask, cpu_online_mask);
+
        return fsl_mc_driver_register(&dpaa2_dpio_driver);
 }
 
 static void dpio_driver_exit(void)
 {
+       free_cpumask_var(cpus_unused_mask);
        fsl_mc_driver_unregister(&dpaa2_dpio_driver);
 }
 module_init(dpio_driver_init);
index ec0837ff039a0a334d11df97bde68d6316bbaaee..b9539ef2c3cdcce9eb02855702f344f90f241ba7 100644 (file)
@@ -27,6 +27,7 @@ struct dpaa2_io {
        /* protect notifications list */
        spinlock_t lock_notifications;
        struct list_head notifications;
+       struct device *dev;
 };
 
 struct dpaa2_io_store {
@@ -98,13 +99,15 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
 /**
  * dpaa2_io_create() - create a dpaa2_io object.
  * @desc: the dpaa2_io descriptor
+ * @dev: the actual DPIO device
  *
  * Activates a "struct dpaa2_io" corresponding to the given config of an actual
  * DPIO object.
  *
  * Return a valid dpaa2_io object for success, or NULL for failure.
  */
-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
+                                struct device *dev)
 {
        struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
 
@@ -146,6 +149,8 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
                dpio_by_cpu[desc->cpu] = obj;
        spin_unlock(&dpio_list_lock);
 
+       obj->dev = dev;
+
        return obj;
 }
 
@@ -160,6 +165,11 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
  */
 void dpaa2_io_down(struct dpaa2_io *d)
 {
+       spin_lock(&dpio_list_lock);
+       dpio_by_cpu[d->dpio_desc.cpu] = NULL;
+       list_del(&d->node);
+       spin_unlock(&dpio_list_lock);
+
        kfree(d);
 }
 
@@ -209,11 +219,25 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
        return IRQ_HANDLED;
 }
 
+/**
+ * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
+ *
+ * @d: the given DPIO object.
+ *
+ * Return the cpu associated with the DPIO object
+ */
+int dpaa2_io_get_cpu(struct dpaa2_io *d)
+{
+       return d->dpio_desc.cpu;
+}
+EXPORT_SYMBOL(dpaa2_io_get_cpu);
+
 /**
  * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
  *                               notifications on the given DPIO service.
  * @d:   the given DPIO service.
  * @ctx: the notification context.
+ * @dev: the device that requests the register
  *
  * The caller should make the MC command to attach a DPAA2 object to
  * a DPIO after this function completes successfully.  In that way:
@@ -228,14 +252,20 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
  * Return 0 for success, or -ENODEV for failure.
  */
 int dpaa2_io_service_register(struct dpaa2_io *d,
-                             struct dpaa2_io_notification_ctx *ctx)
+                             struct dpaa2_io_notification_ctx *ctx,
+                             struct device *dev)
 {
+       struct device_link *link;
        unsigned long irqflags;
 
        d = service_select_by_cpu(d, ctx->desired_cpu);
        if (!d)
                return -ENODEV;
 
+       link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+       if (!link)
+               return -EINVAL;
+
        ctx->dpio_id = d->dpio_desc.dpio_id;
        ctx->qman64 = (u64)(uintptr_t)ctx;
        ctx->dpio_private = d;
@@ -256,12 +286,14 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
  * dpaa2_io_service_deregister - The opposite of 'register'.
  * @service: the given DPIO service.
  * @ctx: the notification context.
+ * @dev: the device that requests to be deregistered
  *
  * This function should be called only after sending the MC command to
  * to detach the notification-producing device from the DPIO.
  */
 void dpaa2_io_service_deregister(struct dpaa2_io *service,
-                                struct dpaa2_io_notification_ctx *ctx)
+                                struct dpaa2_io_notification_ctx *ctx,
+                                struct device *dev)
 {
        struct dpaa2_io *d = ctx->dpio_private;
        unsigned long irqflags;
@@ -272,6 +304,9 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
        spin_lock_irqsave(&d->lock_notifications, irqflags);
        list_del(&ctx->node);
        spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+       if (dev)
+               device_link_remove(dev, d->dev);
 }
 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
 
@@ -438,7 +473,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
  * Return 0 for success, and negative error code for failure.
  */
 int dpaa2_io_service_release(struct dpaa2_io *d,
-                            u32 bpid,
+                            u16 bpid,
                             const u64 *buffers,
                             unsigned int num_buffers)
 {
@@ -467,7 +502,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
  * Eg. if the buffer pool is empty, this will return zero.
  */
 int dpaa2_io_service_acquire(struct dpaa2_io *d,
-                            u32 bpid,
+                            u16 bpid,
                             u64 *buffers,
                             unsigned int num_buffers)
 {
@@ -595,6 +630,7 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
                if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
                        ret = NULL;
        } else {
+               prefetch(&s->vaddr[s->idx]);
                *is_last = 0;
        }
 
index ff37c80e11a010bf0863fd3ff605b1add8190ff2..521bc6946317b78b79e0ca4a5ad9787cc23bc313 100644 (file)
@@ -196,3 +196,26 @@ int dpio_get_api_version(struct fsl_mc_io *mc_io,
 
        return 0;
 }
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPIO object
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+              u32 cmd_flags,
+              u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+                                         cmd_flags,
+                                         token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
index 49194c8e45f124acdc6063bde89d724e80b489c8..b2ac4ba4fb8e1e29aa05c5b5ba20046043e8c045 100644 (file)
@@ -80,4 +80,8 @@ int dpio_get_api_version(struct fsl_mc_io *mc_io,
                         u16 *major_ver,
                         u16 *minor_ver);
 
+int dpio_reset(struct fsl_mc_io        *mc_io,
+              u32 cmd_flags,
+              u16 token);
+
 #endif /* __FSL_DPIO_H */
index 0bddb85c0ae54c37c3d37dee7787e0f39efeb1e8..5a73397ae79e8e4b19944a7ccb634361ef8c58ea 100644 (file)
@@ -180,6 +180,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
        reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
        if (!reg) {
                pr_err("qbman: the portal is not enabled!\n");
+               kfree(p);
                return NULL;
        }
 
index 302e0c8d69d93e1fe482fea2c77d53be4e3ff87e..4f9655087bd73edd7b7cbd27e0bffb1bf5f4b621 100644 (file)
@@ -32,6 +32,7 @@ struct fsl_soc_die_attr {
 static struct guts *guts;
 static struct soc_device_attribute soc_dev_attr;
 static struct soc_device *soc_dev;
+static struct device_node *root;
 
 
 /* SoC die attribute definition for QorIQ platform */
@@ -132,7 +133,7 @@ EXPORT_SYMBOL(fsl_guts_get_svr);
 
 static int fsl_guts_probe(struct platform_device *pdev)
 {
-       struct device_node *root, *np = pdev->dev.of_node;
+       struct device_node *np = pdev->dev.of_node;
        struct device *dev = &pdev->dev;
        struct resource *res;
        const struct fsl_soc_die_attr *soc_die;
@@ -155,9 +156,8 @@ static int fsl_guts_probe(struct platform_device *pdev)
        root = of_find_node_by_path("/");
        if (of_property_read_string(root, "model", &machine))
                of_property_read_string_index(root, "compatible", 0, &machine);
-       of_node_put(root);
        if (machine)
-               soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+               soc_dev_attr.machine = machine;
 
        svr = fsl_guts_get_svr();
        soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -192,6 +192,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
 static int fsl_guts_remove(struct platform_device *dev)
 {
        soc_device_unregister(soc_dev);
+       of_node_put(root);
        return 0;
 }
 
index 2112d18dbb7b91b1cd8c4b44586efcfbf8634a1f..d80f899d22f9f3217d2444ff386bec5fd42db614 100644 (file)
@@ -2,7 +2,7 @@ menu "i.MX SoC drivers"
 
 config IMX_GPCV2_PM_DOMAINS
        bool "i.MX GPCv2 PM domains"
-       depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF)
+       depends on ARCH_MXC || (COMPILE_TEST && OF)
        depends on PM
        select PM_GENERIC_DOMAINS
        default y if SOC_IMX7D
index 8b4f48a2ca57a1c083331e575492d37973cd278d..176f473127b6a4a5d70aca1664ab0839b54aa8a0 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
  */
 
+#include <linux/clk.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 
 #define GPC_M4_PU_PDN_FLG              0x1bc
 
+#define GPC_PU_PWRHSK                  0x1fc
+
+#define IMX8M_GPU_HSK_PWRDNREQN                        BIT(6)
+#define IMX8M_VPU_HSK_PWRDNREQN                        BIT(5)
+#define IMX8M_DISP_HSK_PWRDNREQN               BIT(4)
+
 /*
  * The PGC offset values in Reference Manual
  * (Rev. 1, 01/2018 and the older ones) GPC chapter's
 
 #define GPC_PGC_CTRL_PCR               BIT(0)
 
+#define GPC_CLK_MAX            6
+
 struct imx_pgc_domain {
        struct generic_pm_domain genpd;
        struct regmap *regmap;
        struct regulator *regulator;
+       struct clk *clk[GPC_CLK_MAX];
+       int num_clks;
 
        unsigned int pgc;
 
        const struct {
                u32 pxx;
                u32 map;
+               u32 hsk;
        } bits;
 
        const int voltage;
@@ -125,7 +137,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
        const bool enable_power_control = !on;
        const bool has_regulator = !IS_ERR(domain->regulator);
        unsigned long deadline;
-       int ret = 0;
+       int i, ret = 0;
 
        regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
                           domain->bits.map, domain->bits.map);
@@ -138,10 +150,18 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
                }
        }
 
+       /* Enable reset clocks for all devices in the domain */
+       for (i = 0; i < domain->num_clks; i++)
+               clk_prepare_enable(domain->clk[i]);
+
        if (enable_power_control)
                regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
                                   GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
 
+       if (domain->bits.hsk)
+               regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
+                                  domain->bits.hsk, on ? domain->bits.hsk : 0);
+
        regmap_update_bits(domain->regmap, offset,
                           domain->bits.pxx, domain->bits.pxx);
 
@@ -179,6 +199,10 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
                regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
                                   GPC_PGC_CTRL_PCR, 0);
 
+       /* Disable reset clocks for all devices in the domain */
+       for (i = 0; i < domain->num_clks; i++)
+               clk_disable_unprepare(domain->clk[i]);
+
        if (has_regulator && !on) {
                int err;
 
@@ -328,6 +352,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
                .bits  = {
                        .pxx = IMX8M_GPU_SW_Pxx_REQ,
                        .map = IMX8M_GPU_A53_DOMAIN,
+                       .hsk = IMX8M_GPU_HSK_PWRDNREQN,
                },
                .pgc   = IMX8M_PGC_GPU,
        },
@@ -339,6 +364,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
                .bits  = {
                        .pxx = IMX8M_VPU_SW_Pxx_REQ,
                        .map = IMX8M_VPU_A53_DOMAIN,
+                       .hsk = IMX8M_VPU_HSK_PWRDNREQN,
                },
                .pgc   = IMX8M_PGC_VPU,
        },
@@ -350,6 +376,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
                .bits  = {
                        .pxx = IMX8M_DISP_SW_Pxx_REQ,
                        .map = IMX8M_DISP_A53_DOMAIN,
+                       .hsk = IMX8M_DISP_HSK_PWRDNREQN,
                },
                .pgc   = IMX8M_PGC_DISP,
        },
@@ -390,7 +417,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
 
 static const struct regmap_range imx8m_yes_ranges[] = {
                regmap_reg_range(GPC_LPCR_A_CORE_BSC,
-                                GPC_M4_PU_PDN_FLG),
+                                GPC_PU_PWRHSK),
                regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI),
                                 GPC_PGC_SR(IMX8M_PGC_MIPI)),
                regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1),
@@ -426,6 +453,41 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
        .reg_access_table = &imx8m_access_table,
 };
 
+static int imx_pgc_get_clocks(struct imx_pgc_domain *domain)
+{
+       int i, ret;
+
+       for (i = 0; ; i++) {
+               struct clk *clk = of_clk_get(domain->dev->of_node, i);
+               if (IS_ERR(clk))
+                       break;
+               if (i >= GPC_CLK_MAX) {
+                       dev_err(domain->dev, "more than %d clocks\n",
+                               GPC_CLK_MAX);
+                       ret = -EINVAL;
+                       goto clk_err;
+               }
+               domain->clk[i] = clk;
+       }
+       domain->num_clks = i;
+
+       return 0;
+
+clk_err:
+       while (i--)
+               clk_put(domain->clk[i]);
+
+       return ret;
+}
+
+static void imx_pgc_put_clocks(struct imx_pgc_domain *domain)
+{
+       int i;
+
+       for (i = domain->num_clks - 1; i >= 0; i--)
+               clk_put(domain->clk[i]);
+}
+
 static int imx_pgc_domain_probe(struct platform_device *pdev)
 {
        struct imx_pgc_domain *domain = pdev->dev.platform_data;
@@ -445,9 +507,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
                                      domain->voltage, domain->voltage);
        }
 
+       ret = imx_pgc_get_clocks(domain);
+       if (ret) {
+               if (ret != -EPROBE_DEFER)
+                       dev_err(domain->dev, "Failed to get domain's clocks\n");
+               return ret;
+       }
+
        ret = pm_genpd_init(&domain->genpd, NULL, true);
        if (ret) {
                dev_err(domain->dev, "Failed to init power domain\n");
+               imx_pgc_put_clocks(domain);
                return ret;
        }
 
@@ -456,6 +526,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(domain->dev, "Failed to add genpd provider\n");
                pm_genpd_remove(&domain->genpd);
+               imx_pgc_put_clocks(domain);
        }
 
        return ret;
@@ -467,6 +538,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
 
        of_genpd_del_provider(domain->dev->of_node);
        pm_genpd_remove(&domain->genpd);
+       imx_pgc_put_clocks(domain);
 
        return 0;
 }
index fcbf8a2e40806415f14dd4eae2d78ced55cc8e1c..1ee298f6bf17ea65af1101cc71204cae03dfe541 100644 (file)
@@ -98,6 +98,24 @@ config QCOM_RPMH
          of hardware components aggregate requests for these resources and
          help apply the aggregated state on the resource.
 
+config QCOM_RPMHPD
+       bool "Qualcomm RPMh Power domain driver"
+       depends on QCOM_RPMH && QCOM_COMMAND_DB
+       help
+         QCOM RPMh Power domain driver to support power-domains with
+         performance states. The driver communicates a performance state
+         value to RPMh which then translates it into corresponding voltage
+         for the voltage rail.
+
+config QCOM_RPMPD
+       bool "Qualcomm RPM Power domain driver"
+       depends on QCOM_SMD_RPM=y
+       help
+         QCOM RPM Power domain driver to support power-domains with
+         performance states. The driver communicates a performance state
+         value to RPM which then translates it into corresponding voltage
+         for the voltage rail.
+
 config QCOM_SMEM
        tristate "Qualcomm Shared Memory Manager (SMEM)"
        depends on ARCH_QCOM || COMPILE_TEST
index f25b54cd6cf83d5771680b6c7db94ca12cd47722..ffe519b0cb6694f7db627d03a820f40dbaff66d5 100644 (file)
@@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
 obj-$(CONFIG_QCOM_APR) += apr.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
 obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
index 2e1e4f0a5db8b4374286fec417750409c199ef08..86600d97c36d31a785b52f65cb0fe3091ff5cedd 100644 (file)
@@ -71,6 +71,11 @@ static struct llcc_slice_config sdm845_data[] =  {
        SCT_ENTRY(LLCC_AUDHW,    22, 1024, 1, 1, 0xffc, 0x2,   0, 0, 1, 1, 0),
 };
 
+static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
+{
+       return qcom_llcc_remove(pdev);
+}
+
 static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
 {
        return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
@@ -87,6 +92,7 @@ static struct platform_driver sdm845_qcom_llcc_driver = {
                .of_match_table = sdm845_qcom_llcc_of_match,
        },
        .probe = sdm845_qcom_llcc_probe,
+       .remove = sdm845_qcom_llcc_remove,
 };
 module_platform_driver(sdm845_qcom_llcc_driver);
 
index 80667f7be52c54ce85429b5c8915f5615a0ce5b2..9090ea12eaf3dda4cd54b631e8527f11d0a60440 100644 (file)
@@ -46,7 +46,7 @@
 
 #define BANK_OFFSET_STRIDE           0x80000
 
-static struct llcc_drv_data *drv_data;
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
 
 static const struct regmap_config llcc_regmap_config = {
        .reg_bits = 32,
@@ -68,6 +68,9 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid)
        struct llcc_slice_desc *desc;
        u32 sz, count;
 
+       if (IS_ERR(drv_data))
+               return ERR_CAST(drv_data);
+
        cfg = drv_data->cfg;
        sz = drv_data->cfg_size;
 
@@ -108,6 +111,9 @@ static int llcc_update_act_ctrl(u32 sid,
        u32 slice_status;
        int ret;
 
+       if (IS_ERR(drv_data))
+               return PTR_ERR(drv_data);
+
        act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
        status_reg = LLCC_TRP_STATUSn(sid);
 
@@ -143,6 +149,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc)
        int ret;
        u32 act_ctrl_val;
 
+       if (IS_ERR(drv_data))
+               return PTR_ERR(drv_data);
+
        if (IS_ERR_OR_NULL(desc))
                return -EINVAL;
 
@@ -180,6 +189,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc)
        u32 act_ctrl_val;
        int ret;
 
+       if (IS_ERR(drv_data))
+               return PTR_ERR(drv_data);
+
        if (IS_ERR_OR_NULL(desc))
                return -EINVAL;
 
@@ -289,46 +301,62 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
        return ret;
 }
 
+int qcom_llcc_remove(struct platform_device *pdev)
+{
+       /* Set the global pointer to a error code to avoid referencing it */
+       drv_data = ERR_PTR(-ENODEV);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_remove);
+
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+               const char *name)
+{
+       struct resource *res;
+       void __iomem *base;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+       if (!res)
+               return ERR_PTR(-ENODEV);
+
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return ERR_CAST(base);
+
+       return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
+}
+
 int qcom_llcc_probe(struct platform_device *pdev,
                      const struct llcc_slice_config *llcc_cfg, u32 sz)
 {
        u32 num_banks;
        struct device *dev = &pdev->dev;
-       struct resource *llcc_banks_res, *llcc_bcast_res;
-       void __iomem *llcc_banks_base, *llcc_bcast_base;
        int ret, i;
        struct platform_device *llcc_edac;
 
        drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
-       if (!drv_data)
-               return -ENOMEM;
-
-       llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-                                                       "llcc_base");
-       llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res);
-       if (IS_ERR(llcc_banks_base))
-               return PTR_ERR(llcc_banks_base);
-
-       drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base,
-                                               &llcc_regmap_config);
-       if (IS_ERR(drv_data->regmap))
-               return PTR_ERR(drv_data->regmap);
-
-       llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-                                                       "llcc_broadcast_base");
-       llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res);
-       if (IS_ERR(llcc_bcast_base))
-               return PTR_ERR(llcc_bcast_base);
-
-       drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base,
-                                                       &llcc_regmap_config);
-       if (IS_ERR(drv_data->bcast_regmap))
-               return PTR_ERR(drv_data->bcast_regmap);
+       if (!drv_data) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
+       if (IS_ERR(drv_data->regmap)) {
+               ret = PTR_ERR(drv_data->regmap);
+               goto err;
+       }
+
+       drv_data->bcast_regmap =
+               qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
+       if (IS_ERR(drv_data->bcast_regmap)) {
+               ret = PTR_ERR(drv_data->bcast_regmap);
+               goto err;
+       }
 
        ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
                                                &num_banks);
        if (ret)
-               return ret;
+               goto err;
 
        num_banks &= LLCC_LB_CNT_MASK;
        num_banks >>= LLCC_LB_CNT_SHIFT;
@@ -340,8 +368,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
 
        drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
                                                        GFP_KERNEL);
-       if (!drv_data->offsets)
-               return -ENOMEM;
+       if (!drv_data->offsets) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        for (i = 0; i < num_banks; i++)
                drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
@@ -349,8 +379,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
        drv_data->bitmap = devm_kcalloc(dev,
        BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
                                                GFP_KERNEL);
-       if (!drv_data->bitmap)
-               return -ENOMEM;
+       if (!drv_data->bitmap) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        drv_data->cfg = llcc_cfg;
        drv_data->cfg_size = sz;
@@ -359,7 +391,7 @@ int qcom_llcc_probe(struct platform_device *pdev,
 
        ret = qcom_llcc_cfg_program(pdev);
        if (ret)
-               return ret;
+               goto err;
 
        drv_data->ecc_irq = platform_get_irq(pdev, 0);
        if (drv_data->ecc_irq >= 0) {
@@ -370,6 +402,9 @@ int qcom_llcc_probe(struct platform_device *pdev,
                        dev_err(dev, "Failed to register llcc edac driver\n");
        }
 
+       return 0;
+err:
+       drv_data = ERR_PTR(-ENODEV);
        return ret;
 }
 EXPORT_SYMBOL_GPL(qcom_llcc_probe);
index 09c669e70d636861df12f9670dcdf339cf4021ee..038abc377fdb9705eda5f98a363d6d1b3d100478 100644 (file)
@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *base;
        struct gsbi_info *gsbi;
-       int i;
+       int i, ret;
        u32 mask, gsbi_num;
        const struct crci_config *config = NULL;
 
@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, gsbi);
 
-       return of_platform_populate(node, NULL, NULL, &pdev->dev);
+       ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+       if (ret)
+               clk_disable_unprepare(gsbi->hclk);
+       return ret;
 }
 
 static int gsbi_remove(struct platform_device *pdev)
index 97bb5989aa21158dee92d58e5f22bc74729dbbc2..7200d762a951085d9169a6d1003e5a6002a3add8 100644 (file)
@@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf);
 
-static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
 
 static ssize_t qcom_rmtfs_mem_show(struct device *dev,
                              struct device_attribute *attr,
@@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static struct class rmtfs_class = {
+       .owner          = THIS_MODULE,
+       .name           = "rmtfs",
+};
+
 static const struct file_operations qcom_rmtfs_mem_fops = {
        .owner = THIS_MODULE,
        .open = qcom_rmtfs_mem_open,
@@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
 
        dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
        rmtfs_mem->dev.id = client_id;
+       rmtfs_mem->dev.class = &rmtfs_class;
        rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
 
        ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
@@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = {
        },
 };
 
-static int qcom_rmtfs_mem_init(void)
+static int __init qcom_rmtfs_mem_init(void)
 {
        int ret;
 
+       ret = class_register(&rmtfs_class);
+       if (ret)
+               return ret;
+
        ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
                                  QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
        if (ret < 0) {
                pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
-               return ret;
+               goto unregister_class;
        }
 
        ret = platform_driver_register(&qcom_rmtfs_mem_driver);
        if (ret < 0) {
                pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
-               unregister_chrdev_region(qcom_rmtfs_mem_major,
-                                        QCOM_RMTFS_MEM_DEV_MAX);
+               goto unregister_chrdev;
        }
 
+       return 0;
+
+unregister_chrdev:
+       unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+       class_unregister(&rmtfs_class);
        return ret;
 }
 module_init(qcom_rmtfs_mem_init);
 
-static void qcom_rmtfs_mem_exit(void)
+static void __exit qcom_rmtfs_mem_exit(void)
 {
        platform_driver_unregister(&qcom_rmtfs_mem_driver);
        unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+       class_unregister(&rmtfs_class);
 }
 module_exit(qcom_rmtfs_mem_exit);
 
index c7beb684128916173eb4e411435c9d1be32fc260..035091fd44b803cb131b6ff5b7e5cbd414e52010 100644 (file)
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
        struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
                                                    msg);
        struct completion *compl = rpm_msg->completion;
+       bool free = rpm_msg->needs_free;
 
        rpm_msg->err = r;
 
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
        complete(compl);
 
 exit:
-       if (rpm_msg->needs_free)
+       if (free)
                kfree(rpm_msg);
 }
 
@@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
                WARN_ON(irqs_disabled());
                ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
        } else {
-               ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
-                               &rpm_msg->msg);
                /* Clean up our call by spoofing tx_done */
+               ret = 0;
                rpmh_tx_done(&rpm_msg->msg, ret);
        }
 
@@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
 {
        struct batch_cache_req *req;
        struct rpmh_request *rpm_msgs;
-       DECLARE_COMPLETION_ONSTACK(compl);
+       struct completion *compls;
        struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
        unsigned long time_left;
        int count = 0;
-       int ret, i, j;
+       int ret, i;
+       void *ptr;
 
        if (!cmd || !n)
                return -EINVAL;
@@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
        if (!count)
                return -EINVAL;
 
-       req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+       ptr = kzalloc(sizeof(*req) +
+                     count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
                      GFP_ATOMIC);
-       if (!req)
+       if (!ptr)
                return -ENOMEM;
+
+       req = ptr;
+       compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
        req->count = count;
        rpm_msgs = req->rpm_msgs;
 
@@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
        }
 
        for (i = 0; i < count; i++) {
-               rpm_msgs[i].completion = &compl;
+               struct completion *compl = &compls[i];
+
+               init_completion(compl);
+               rpm_msgs[i].completion = compl;
                ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
                if (ret) {
                        pr_err("Error(%d) sending RPMH message addr=%#x\n",
                               ret, rpm_msgs[i].msg.cmds[0].addr);
-                       for (j = i; j < count; j++)
-                               rpmh_tx_done(&rpm_msgs[j].msg, ret);
                        break;
                }
        }
 
        time_left = RPMH_TIMEOUT_MS;
-       for (i = 0; i < count; i++) {
-               time_left = wait_for_completion_timeout(&compl, time_left);
+       while (i--) {
+               time_left = wait_for_completion_timeout(&compls[i], time_left);
                if (!time_left) {
                        /*
                         * Better hope they never finish because they'll signal
-                        * the completion on our stack and that's bad once
-                        * we've returned from the function.
+                        * the completion that we're going to free once
+                        * we've returned from this function.
                         */
                        WARN_ON(1);
                        ret = -ETIMEDOUT;
@@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
        }
 
 exit:
-       kfree(req);
+       kfree(ptr);
 
        return ret;
 }
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
new file mode 100644 (file)
index 0000000..5741ec3
--- /dev/null
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
+
+#define RPMH_ARC_MAX_LEVELS    16
+
+/**
+ * struct rpmhpd - top level RPMh power domain resource data structure
+ * @dev:               rpmh power domain controller device
+ * @pd:                        generic_pm_domain corrresponding to the power domain
+ * @peer:              A peer power domain in case Active only Voting is
+ *                     supported
+ * @active_only:       True if it represents an Active only peer
+ * @level:             An array of level (vlvl) to corner (hlvl) mappings
+ *                     derived from cmd-db
+ * @level_count:       Number of levels supported by the power domain. max
+ *                     being 16 (0 - 15)
+ * @enabled:           true if the power domain is enabled
+ * @res_name:          Resource name used for cmd-db lookup
+ * @addr:              Resource address as looped up using resource name from
+ *                     cmd-db
+ */
+struct rpmhpd {
+       struct device   *dev;
+       struct generic_pm_domain pd;
+       struct generic_pm_domain *parent;
+       struct rpmhpd   *peer;
+       const bool      active_only;
+       unsigned int    corner;
+       unsigned int    active_corner;
+       u32             level[RPMH_ARC_MAX_LEVELS];
+       size_t          level_count;
+       bool            enabled;
+       const char      *res_name;
+       u32             addr;
+};
+
+struct rpmhpd_desc {
+       struct rpmhpd **rpmhpds;
+       size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmhpd_lock);
+
+/* SDM845 RPMH powerdomains */
+
+static struct rpmhpd sdm845_ebi = {
+       .pd = { .name = "ebi", },
+       .res_name = "ebi.lvl",
+};
+
+static struct rpmhpd sdm845_lmx = {
+       .pd = { .name = "lmx", },
+       .res_name = "lmx.lvl",
+};
+
+static struct rpmhpd sdm845_lcx = {
+       .pd = { .name = "lcx", },
+       .res_name = "lcx.lvl",
+};
+
+static struct rpmhpd sdm845_gfx = {
+       .pd = { .name = "gfx", },
+       .res_name = "gfx.lvl",
+};
+
+static struct rpmhpd sdm845_mss = {
+       .pd = { .name = "mss", },
+       .res_name = "mss.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao;
+static struct rpmhpd sdm845_mx = {
+       .pd = { .name = "mx", },
+       .peer = &sdm845_mx_ao,
+       .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao = {
+       .pd = { .name = "mx_ao", },
+       .peer = &sdm845_mx,
+       .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao;
+static struct rpmhpd sdm845_cx = {
+       .pd = { .name = "cx", },
+       .peer = &sdm845_cx_ao,
+       .parent = &sdm845_mx.pd,
+       .res_name = "cx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao = {
+       .pd = { .name = "cx_ao", },
+       .peer = &sdm845_cx,
+       .parent = &sdm845_mx_ao.pd,
+       .res_name = "cx.lvl",
+};
+
+static struct rpmhpd *sdm845_rpmhpds[] = {
+       [SDM845_EBI] = &sdm845_ebi,
+       [SDM845_MX] = &sdm845_mx,
+       [SDM845_MX_AO] = &sdm845_mx_ao,
+       [SDM845_CX] = &sdm845_cx,
+       [SDM845_CX_AO] = &sdm845_cx_ao,
+       [SDM845_LMX] = &sdm845_lmx,
+       [SDM845_LCX] = &sdm845_lcx,
+       [SDM845_GFX] = &sdm845_gfx,
+       [SDM845_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sdm845_desc = {
+       .rpmhpds = sdm845_rpmhpds,
+       .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
+};
+
+static const struct of_device_id rpmhpd_match_table[] = {
+       { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+       { }
+};
+
+static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
+                             unsigned int corner, bool sync)
+{
+       struct tcs_cmd cmd = {
+               .addr = pd->addr,
+               .data = corner,
+       };
+
+       /*
+        * Wait for an ack only when we are increasing the
+        * perf state of the power domain
+        */
+       if (sync)
+               return rpmh_write(pd->dev, state, &cmd, 1);
+       else
+               return rpmh_write_async(pd->dev, state, &cmd, 1);
+}
+
+static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
+                           unsigned int *active, unsigned int *sleep)
+{
+       *active = corner;
+
+       if (pd->active_only)
+               *sleep = 0;
+       else
+               *sleep = *active;
+}
+
+/*
+ * This function is used to aggregate the votes across the active only
+ * resources and its peers. The aggregated votes are sent to RPMh as
+ * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
+ * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
+ * on system sleep).
+ * We send ACTIVE_ONLY votes for resources without any peers. For others,
+ * which have an active only peer, all 3 votes are sent.
+ */
+static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+{
+       int ret;
+       struct rpmhpd *peer = pd->peer;
+       unsigned int active_corner, sleep_corner;
+       unsigned int this_active_corner = 0, this_sleep_corner = 0;
+       unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+       to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+
+       if (peer && peer->enabled)
+               to_active_sleep(peer, peer->corner, &peer_active_corner,
+                               &peer_sleep_corner);
+
+       active_corner = max(this_active_corner, peer_active_corner);
+
+       ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
+                                active_corner > pd->active_corner);
+       if (ret)
+               return ret;
+
+       pd->active_corner = active_corner;
+
+       if (peer) {
+               peer->active_corner = active_corner;
+
+               ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
+                                        active_corner, false);
+               if (ret)
+                       return ret;
+
+               sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+               return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
+                                         false);
+       }
+
+       return ret;
+}
+
+static int rpmhpd_power_on(struct generic_pm_domain *domain)
+{
+       struct rpmhpd *pd = domain_to_rpmhpd(domain);
+       int ret = 0;
+
+       mutex_lock(&rpmhpd_lock);
+
+       if (pd->corner)
+               ret = rpmhpd_aggregate_corner(pd, pd->corner);
+
+       if (!ret)
+               pd->enabled = true;
+
+       mutex_unlock(&rpmhpd_lock);
+
+       return ret;
+}
+
+static int rpmhpd_power_off(struct generic_pm_domain *domain)
+{
+       struct rpmhpd *pd = domain_to_rpmhpd(domain);
+       int ret = 0;
+
+       mutex_lock(&rpmhpd_lock);
+
+       ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
+
+       if (!ret)
+               pd->enabled = false;
+
+       mutex_unlock(&rpmhpd_lock);
+
+       return ret;
+}
+
+static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
+                                       unsigned int level)
+{
+       struct rpmhpd *pd = domain_to_rpmhpd(domain);
+       int ret = 0, i;
+
+       mutex_lock(&rpmhpd_lock);
+
+       for (i = 0; i < pd->level_count; i++)
+               if (level <= pd->level[i])
+                       break;
+
+       /*
+        * If the level requested is more than that supported by the
+        * max corner, just set it to max anyway.
+        */
+       if (i == pd->level_count)
+               i--;
+
+       if (pd->enabled) {
+               ret = rpmhpd_aggregate_corner(pd, i);
+               if (ret)
+                       goto out;
+       }
+
+       pd->corner = i;
+out:
+       mutex_unlock(&rpmhpd_lock);
+
+       return ret;
+}
+
+static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
+                                                struct dev_pm_opp *opp)
+{
+       return dev_pm_opp_get_level(opp);
+}
+
+static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+{
+       int i;
+       const u16 *buf;
+
+       buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
+
+       /* 2 bytes used for each command DB aux data entry */
+       rpmhpd->level_count >>= 1;
+
+       if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
+               return -EINVAL;
+
+       for (i = 0; i < rpmhpd->level_count; i++) {
+               rpmhpd->level[i] = buf[i];
+
+               /*
+                * The AUX data may be zero padded.  These 0 valued entries at
+                * the end of the map must be ignored.
+                */
+               if (i > 0 && rpmhpd->level[i] == 0) {
+                       rpmhpd->level_count = i;
+                       break;
+               }
+               pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
+                        rpmhpd->level[i]);
+       }
+
+       return 0;
+}
+
+static int rpmhpd_probe(struct platform_device *pdev)
+{
+       int i, ret;
+       size_t num_pds;
+       struct device *dev = &pdev->dev;
+       struct genpd_onecell_data *data;
+       struct rpmhpd **rpmhpds;
+       const struct rpmhpd_desc *desc;
+
+       desc = of_device_get_match_data(dev);
+       if (!desc)
+               return -EINVAL;
+
+       rpmhpds = desc->rpmhpds;
+       num_pds = desc->num_pds;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
+                                    GFP_KERNEL);
+       if (!data->domains)
+               return -ENOMEM;
+
+       data->num_domains = num_pds;
+
+       for (i = 0; i < num_pds; i++) {
+               if (!rpmhpds[i]) {
+                       dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+                       continue;
+               }
+
+               rpmhpds[i]->dev = dev;
+               rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
+               if (!rpmhpds[i]->addr) {
+                       dev_err(dev, "Could not find RPMh address for resource %s\n",
+                               rpmhpds[i]->res_name);
+                       return -ENODEV;
+               }
+
+               ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
+               if (ret != CMD_DB_HW_ARC) {
+                       dev_err(dev, "RPMh slave ID mismatch\n");
+                       return -EINVAL;
+               }
+
+               ret = rpmhpd_update_level_mapping(rpmhpds[i]);
+               if (ret)
+                       return ret;
+
+               rpmhpds[i]->pd.power_off = rpmhpd_power_off;
+               rpmhpds[i]->pd.power_on = rpmhpd_power_on;
+               rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
+               rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
+               pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
+
+               data->domains[i] = &rpmhpds[i]->pd;
+       }
+
+       /* Add subdomains */
+       for (i = 0; i < num_pds; i++) {
+               if (!rpmhpds[i])
+                       continue;
+               if (rpmhpds[i]->parent)
+                       pm_genpd_add_subdomain(rpmhpds[i]->parent,
+                                              &rpmhpds[i]->pd);
+       }
+
+       return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmhpd_driver = {
+       .driver = {
+               .name = "qcom-rpmhpd",
+               .of_match_table = rpmhpd_match_table,
+               .suppress_bind_attrs = true,
+       },
+       .probe = rpmhpd_probe,
+};
+
+static int __init rpmhpd_init(void)
+{
+       return platform_driver_register(&rpmhpd_driver);
+}
+core_initcall(rpmhpd_init);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
new file mode 100644 (file)
index 0000000..0053260
--- /dev/null
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+
+/* Resource types */
+#define RPMPD_SMPA 0x61706d73
+#define RPMPD_LDOA 0x616f646c
+
+/* Operation Keys */
+#define KEY_CORNER             0x6e726f63 /* corn */
+#define KEY_ENABLE             0x6e657773 /* swen */
+#define KEY_FLOOR_CORNER       0x636676   /* vfc */
+
+#define MAX_RPMPD_STATE                6
+
+#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id)              \
+       static struct rpmpd _platform##_##_active;                      \
+       static struct rpmpd _platform##_##_name = {                     \
+               .pd = { .name = #_name, },                              \
+               .peer = &_platform##_##_active,                         \
+               .res_type = RPMPD_SMPA,                                 \
+               .res_id = r_id,                                         \
+               .key = KEY_CORNER,                                      \
+       };                                                              \
+       static struct rpmpd _platform##_##_active = {                   \
+               .pd = { .name = #_active, },                            \
+               .peer = &_platform##_##_name,                           \
+               .active_only = true,                                    \
+               .res_type = RPMPD_SMPA,                                 \
+               .res_id = r_id,                                         \
+               .key = KEY_CORNER,                                      \
+       }
+
+#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id)                       \
+       static struct rpmpd _platform##_##_name = {                     \
+               .pd = { .name = #_name, },                              \
+               .res_type = RPMPD_LDOA,                                 \
+               .res_id = r_id,                                         \
+               .key = KEY_CORNER,                                      \
+       }
+
+#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type)               \
+       static struct rpmpd _platform##_##_name = {                     \
+               .pd = { .name = #_name, },                              \
+               .res_type = r_type,                                     \
+               .res_id = r_id,                                         \
+               .key = KEY_FLOOR_CORNER,                                \
+       }
+
+#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id)                  \
+       DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+
+#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id)                  \
+       DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+
+struct rpmpd_req {
+       __le32 key;
+       __le32 nbytes;
+       __le32 value;
+};
+
+struct rpmpd {
+       struct generic_pm_domain pd;
+       struct rpmpd *peer;
+       const bool active_only;
+       unsigned int corner;
+       bool enabled;
+       const char *res_name;
+       const int res_type;
+       const int res_id;
+       struct qcom_smd_rpm *rpm;
+       __le32 key;
+};
+
+struct rpmpd_desc {
+       struct rpmpd **rpmpds;
+       size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmpd_lock);
+
+/* msm8996 RPM Power domains */
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
+DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+
+DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
+DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+
+static struct rpmpd *msm8996_rpmpds[] = {
+       [MSM8996_VDDCX] =       &msm8996_vddcx,
+       [MSM8996_VDDCX_AO] =    &msm8996_vddcx_ao,
+       [MSM8996_VDDCX_VFC] =   &msm8996_vddcx_vfc,
+       [MSM8996_VDDMX] =       &msm8996_vddmx,
+       [MSM8996_VDDMX_AO] =    &msm8996_vddmx_ao,
+       [MSM8996_VDDSSCX] =     &msm8996_vddsscx,
+       [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+};
+
+static const struct rpmpd_desc msm8996_desc = {
+       .rpmpds = msm8996_rpmpds,
+       .num_pds = ARRAY_SIZE(msm8996_rpmpds),
+};
+
+static const struct of_device_id rpmpd_match_table[] = {
+       { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+       { }
+};
+
+static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
+{
+       struct rpmpd_req req = {
+               .key = KEY_ENABLE,
+               .nbytes = cpu_to_le32(sizeof(u32)),
+               .value = cpu_to_le32(enable),
+       };
+
+       return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+                                 pd->res_type, pd->res_id, &req, sizeof(req));
+}
+
+static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
+{
+       struct rpmpd_req req = {
+               .key = pd->key,
+               .nbytes = cpu_to_le32(sizeof(u32)),
+               .value = cpu_to_le32(corner),
+       };
+
+       return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+                                 &req, sizeof(req));
+};
+
+static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
+                           unsigned int *active, unsigned int *sleep)
+{
+       *active = corner;
+
+       if (pd->active_only)
+               *sleep = 0;
+       else
+               *sleep = *active;
+}
+
+static int rpmpd_aggregate_corner(struct rpmpd *pd)
+{
+       int ret;
+       struct rpmpd *peer = pd->peer;
+       unsigned int active_corner, sleep_corner;
+       unsigned int this_active_corner = 0, this_sleep_corner = 0;
+       unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+       to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
+
+       if (peer && peer->enabled)
+               to_active_sleep(peer, peer->corner, &peer_active_corner,
+                               &peer_sleep_corner);
+
+       active_corner = max(this_active_corner, peer_active_corner);
+
+       ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
+       if (ret)
+               return ret;
+
+       sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+       return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
+}
+
+static int rpmpd_power_on(struct generic_pm_domain *domain)
+{
+       int ret;
+       struct rpmpd *pd = domain_to_rpmpd(domain);
+
+       mutex_lock(&rpmpd_lock);
+
+       ret = rpmpd_send_enable(pd, true);
+       if (ret)
+               goto out;
+
+       pd->enabled = true;
+
+       if (pd->corner)
+               ret = rpmpd_aggregate_corner(pd);
+
+out:
+       mutex_unlock(&rpmpd_lock);
+
+       return ret;
+}
+
+static int rpmpd_power_off(struct generic_pm_domain *domain)
+{
+       int ret;
+       struct rpmpd *pd = domain_to_rpmpd(domain);
+
+       mutex_lock(&rpmpd_lock);
+
+       ret = rpmpd_send_enable(pd, false);
+       if (!ret)
+               pd->enabled = false;
+
+       mutex_unlock(&rpmpd_lock);
+
+       return ret;
+}
+
+static int rpmpd_set_performance(struct generic_pm_domain *domain,
+                                unsigned int state)
+{
+       int ret = 0;
+       struct rpmpd *pd = domain_to_rpmpd(domain);
+
+       if (state > MAX_RPMPD_STATE)
+               goto out;
+
+       mutex_lock(&rpmpd_lock);
+
+       pd->corner = state;
+
+       if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+               goto out;
+
+       ret = rpmpd_aggregate_corner(pd);
+
+out:
+       mutex_unlock(&rpmpd_lock);
+
+       return ret;
+}
+
+static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
+                                         struct dev_pm_opp *opp)
+{
+       return dev_pm_opp_get_level(opp);
+}
+
+static int rpmpd_probe(struct platform_device *pdev)
+{
+       int i;
+       size_t num;
+       struct genpd_onecell_data *data;
+       struct qcom_smd_rpm *rpm;
+       struct rpmpd **rpmpds;
+       const struct rpmpd_desc *desc;
+
+       rpm = dev_get_drvdata(pdev->dev.parent);
+       if (!rpm) {
+               dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+               return -ENODEV;
+       }
+
+       desc = of_device_get_match_data(&pdev->dev);
+       if (!desc)
+               return -EINVAL;
+
+       rpmpds = desc->rpmpds;
+       num = desc->num_pds;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
+                                    GFP_KERNEL);
+       data->num_domains = num;
+
+       for (i = 0; i < num; i++) {
+               if (!rpmpds[i]) {
+                       dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
+                                i);
+                       continue;
+               }
+
+               rpmpds[i]->rpm = rpm;
+               rpmpds[i]->pd.power_off = rpmpd_power_off;
+               rpmpds[i]->pd.power_on = rpmpd_power_on;
+               rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+               rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
+               pm_genpd_init(&rpmpds[i]->pd, NULL, true);
+
+               data->domains[i] = &rpmpds[i]->pd;
+       }
+
+       return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmpd_driver = {
+       .driver = {
+               .name = "qcom-rpmpd",
+               .of_match_table = rpmpd_match_table,
+               .suppress_bind_attrs = true,
+       },
+       .probe = rpmpd_probe,
+};
+
+static int __init rpmpd_init(void)
+{
+       return platform_driver_register(&rpmpd_driver);
+}
+core_initcall(rpmpd_init);
index b8e63724a49d5a9b81acfd01fb50fc6d61310763..9956bb2c63f2770bf2f9fad539a13b6385293daa 100644 (file)
@@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
        { .compatible = "qcom,rpm-msm8974" },
        { .compatible = "qcom,rpm-msm8996" },
        { .compatible = "qcom,rpm-msm8998" },
+       { .compatible = "qcom,rpm-sdm660" },
        { .compatible = "qcom,rpm-qcs404" },
        {}
 };
index a33ee8ef8b6b99cc6f4b6e450f3dfbb59c850d36..51625703399e4426e16a506d7968ad264dcb36e1 100644 (file)
@@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        fuse->phys = res->start;
        fuse->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(fuse->base))
-               return PTR_ERR(fuse->base);
+       if (IS_ERR(fuse->base)) {
+               err = PTR_ERR(fuse->base);
+               fuse->base = base;
+               return err;
+       }
 
        fuse->clk = devm_clk_get(&pdev->dev, "fuse");
        if (IS_ERR(fuse->clk)) {
                dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
                        PTR_ERR(fuse->clk));
+               fuse->base = base;
                return PTR_ERR(fuse->clk);
        }
 
@@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
 
        if (fuse->soc->probe) {
                err = fuse->soc->probe(fuse);
-               if (err < 0)
+               if (err < 0) {
+                       fuse->base = base;
                        return err;
+               }
        }
 
        if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
index 5373f4c16b54cbaa7cf82a5bb12082bd5f2f334c..8ed35d9851f81d144e40130d7b24daf259fa0edb 100644 (file)
@@ -131,7 +131,7 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
 
        soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
        soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
-       soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+       soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
 
        cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
        soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
index 7ea3280279ff56cb9d8932d37f16d9c7268f33cf..0df25851869365e4dccd0e10f8042f54d65e99c4 100644 (file)
@@ -20,7 +20,7 @@
 
 #define pr_fmt(fmt) "tegra-pmc: " fmt
 
-#include <linux/kernel.h>
+#include <linux/arm-smccc.h>
 #include <linux/clk.h>
 #include <linux/clk/tegra.h>
 #include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
-#include <linux/irq.h>
 #include <linux/irqdomain.h>
-#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/of_address.h>
 #include <linux/of_clk.h>
+#include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/reboot.h>
 #define WAKE_AOWAKE_CTRL 0x4f4
 #define  WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
 
+/* for secure PMC */
+#define TEGRA_SMC_PMC          0xc2fffe00
+#define  TEGRA_SMC_PMC_READ    0xaa
+#define  TEGRA_SMC_PMC_WRITE   0xbb
+
 struct tegra_powergate {
        struct generic_pm_domain genpd;
        struct tegra_pmc *pmc;
@@ -216,6 +222,7 @@ struct tegra_pmc_soc {
        bool has_gpu_clamps;
        bool needs_mbist_war;
        bool has_impl_33v_pwr;
+       bool maybe_tz_only;
 
        const struct tegra_io_pad_soc *io_pads;
        unsigned int num_io_pads;
@@ -273,8 +280,12 @@ static const char * const tegra30_reset_sources[] = {
  * struct tegra_pmc - NVIDIA Tegra PMC
  * @dev: pointer to PMC device structure
  * @base: pointer to I/O remapped register region
+ * @wake: pointer to I/O remapped region for WAKE registers
+ * @aotag: pointer to I/O remapped region for AOTAG registers
+ * @scratch: pointer to I/O remapped region for scratch registers
  * @clk: pointer to pclk clock
  * @soc: pointer to SoC data structure
+ * @tz_only: flag specifying if the PMC can only be accessed via TrustZone
  * @debugfs: pointer to debugfs entry
  * @rate: currently configured rate of pclk
  * @suspend_mode: lowest suspend mode available
@@ -291,6 +302,9 @@ static const char * const tegra30_reset_sources[] = {
  * @lp0_vec_size: size of the LP0 warm boot code
  * @powergates_available: Bitmap of available power gates
  * @powergates_lock: mutex for power gate register access
+ * @pctl_dev: pin controller exposed by the PMC
+ * @domain: IRQ domain provided by the PMC
+ * @irq: chip implementation for the IRQ domain
  */
 struct tegra_pmc {
        struct device *dev;
@@ -302,6 +316,7 @@ struct tegra_pmc {
        struct dentry *debugfs;
 
        const struct tegra_pmc_soc *soc;
+       bool tz_only;
 
        unsigned long rate;
 
@@ -338,30 +353,85 @@ to_powergate(struct generic_pm_domain *domain)
        return container_of(domain, struct tegra_powergate, genpd);
 }
 
-static u32 tegra_pmc_readl(unsigned long offset)
+static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset)
 {
+       struct arm_smccc_res res;
+
+       if (pmc->tz_only) {
+               arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0,
+                             0, 0, 0, &res);
+               if (res.a0) {
+                       if (pmc->dev)
+                               dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+                                        __func__, res.a0);
+                       else
+                               pr_warn("%s(): SMC failed: %lu\n", __func__,
+                                       res.a0);
+               }
+
+               return res.a1;
+       }
+
        return readl(pmc->base + offset);
 }
 
-static void tegra_pmc_writel(u32 value, unsigned long offset)
+static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value,
+                            unsigned long offset)
 {
-       writel(value, pmc->base + offset);
+       struct arm_smccc_res res;
+
+       if (pmc->tz_only) {
+               arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset,
+                             value, 0, 0, 0, 0, &res);
+               if (res.a0) {
+                       if (pmc->dev)
+                               dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+                                        __func__, res.a0);
+                       else
+                               pr_warn("%s(): SMC failed: %lu\n", __func__,
+                                       res.a0);
+               }
+       } else {
+               writel(value, pmc->base + offset);
+       }
 }
 
+static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset)
+{
+       if (pmc->tz_only)
+               return tegra_pmc_readl(pmc, offset);
+
+       return readl(pmc->scratch + offset);
+}
+
+static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value,
+                                    unsigned long offset)
+{
+       if (pmc->tz_only)
+               tegra_pmc_writel(pmc, value, offset);
+       else
+               writel(value, pmc->scratch + offset);
+}
+
+/*
+ * TODO Figure out a way to call this with the struct tegra_pmc * passed in.
+ * This currently doesn't work because readx_poll_timeout() can only operate
+ * on functions that take a single argument.
+ */
 static inline bool tegra_powergate_state(int id)
 {
        if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
-               return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+               return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0;
        else
-               return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+               return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0;
 }
 
-static inline bool tegra_powergate_is_valid(int id)
+static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id)
 {
        return (pmc->soc && pmc->soc->powergates[id]);
 }
 
-static inline bool tegra_powergate_is_available(int id)
+static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id)
 {
        return test_bit(id, pmc->powergates_available);
 }
@@ -374,7 +444,7 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
                return -EINVAL;
 
        for (i = 0; i < pmc->soc->num_powergates; i++) {
-               if (!tegra_powergate_is_valid(i))
+               if (!tegra_powergate_is_valid(pmc, i))
                        continue;
 
                if (!strcmp(name, pmc->soc->powergates[i]))
@@ -386,10 +456,12 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
 
 /**
  * tegra_powergate_set() - set the state of a partition
+ * @pmc: power management controller
  * @id: partition ID
  * @new_state: new state of the partition
  */
-static int tegra_powergate_set(unsigned int id, bool new_state)
+static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+                              bool new_state)
 {
        bool status;
        int err;
@@ -404,7 +476,7 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
                return 0;
        }
 
-       tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
 
        err = readx_poll_timeout(tegra_powergate_state, id, status,
                                 status == new_state, 10, 100000);
@@ -414,7 +486,8 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
        return err;
 }
 
-static int __tegra_powergate_remove_clamping(unsigned int id)
+static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc,
+                                            unsigned int id)
 {
        u32 mask;
 
@@ -426,7 +499,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
         */
        if (id == TEGRA_POWERGATE_3D) {
                if (pmc->soc->has_gpu_clamps) {
-                       tegra_pmc_writel(0, GPU_RG_CNTRL);
+                       tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL);
                        goto out;
                }
        }
@@ -442,7 +515,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
        else
                mask = (1 << id);
 
-       tegra_pmc_writel(mask, REMOVE_CLAMPING);
+       tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING);
 
 out:
        mutex_unlock(&pmc->powergates_lock);
@@ -494,7 +567,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
 
        usleep_range(10, 20);
 
-       err = tegra_powergate_set(pg->id, true);
+       err = tegra_powergate_set(pg->pmc, pg->id, true);
        if (err < 0)
                return err;
 
@@ -506,7 +579,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
 
        usleep_range(10, 20);
 
-       err = __tegra_powergate_remove_clamping(pg->id);
+       err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
        if (err)
                goto disable_clks;
 
@@ -533,7 +606,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
        usleep_range(10, 20);
 
 powergate_off:
-       tegra_powergate_set(pg->id, false);
+       tegra_powergate_set(pg->pmc, pg->id, false);
 
        return err;
 }
@@ -558,7 +631,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
 
        usleep_range(10, 20);
 
-       err = tegra_powergate_set(pg->id, false);
+       err = tegra_powergate_set(pg->pmc, pg->id, false);
        if (err)
                goto assert_resets;
 
@@ -579,12 +652,13 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
 static int tegra_genpd_power_on(struct generic_pm_domain *domain)
 {
        struct tegra_powergate *pg = to_powergate(domain);
+       struct device *dev = pg->pmc->dev;
        int err;
 
        err = tegra_powergate_power_up(pg, true);
        if (err)
-               pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
-                      err);
+               dev_err(dev, "failed to turn on PM domain %s: %d\n",
+                       pg->genpd.name, err);
 
        return err;
 }
@@ -592,12 +666,13 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
 static int tegra_genpd_power_off(struct generic_pm_domain *domain)
 {
        struct tegra_powergate *pg = to_powergate(domain);
+       struct device *dev = pg->pmc->dev;
        int err;
 
        err = tegra_powergate_power_down(pg);
        if (err)
-               pr_err("failed to turn off PM domain %s: %d\n",
-                      pg->genpd.name, err);
+               dev_err(dev, "failed to turn off PM domain %s: %d\n",
+                       pg->genpd.name, err);
 
        return err;
 }
@@ -608,10 +683,10 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
  */
 int tegra_powergate_power_on(unsigned int id)
 {
-       if (!tegra_powergate_is_available(id))
+       if (!tegra_powergate_is_available(pmc, id))
                return -EINVAL;
 
-       return tegra_powergate_set(id, true);
+       return tegra_powergate_set(pmc, id, true);
 }
 
 /**
@@ -620,20 +695,21 @@ int tegra_powergate_power_on(unsigned int id)
  */
 int tegra_powergate_power_off(unsigned int id)
 {
-       if (!tegra_powergate_is_available(id))
+       if (!tegra_powergate_is_available(pmc, id))
                return -EINVAL;
 
-       return tegra_powergate_set(id, false);
+       return tegra_powergate_set(pmc, id, false);
 }
 EXPORT_SYMBOL(tegra_powergate_power_off);
 
 /**
  * tegra_powergate_is_powered() - check if partition is powered
+ * @pmc: power management controller
  * @id: partition ID
  */
-int tegra_powergate_is_powered(unsigned int id)
+static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id)
 {
-       if (!tegra_powergate_is_valid(id))
+       if (!tegra_powergate_is_valid(pmc, id))
                return -EINVAL;
 
        return tegra_powergate_state(id);
@@ -645,10 +721,10 @@ int tegra_powergate_is_powered(unsigned int id)
  */
 int tegra_powergate_remove_clamping(unsigned int id)
 {
-       if (!tegra_powergate_is_available(id))
+       if (!tegra_powergate_is_available(pmc, id))
                return -EINVAL;
 
-       return __tegra_powergate_remove_clamping(id);
+       return __tegra_powergate_remove_clamping(pmc, id);
 }
 EXPORT_SYMBOL(tegra_powergate_remove_clamping);
 
@@ -666,7 +742,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
        struct tegra_powergate *pg;
        int err;
 
-       if (!tegra_powergate_is_available(id))
+       if (!tegra_powergate_is_available(pmc, id))
                return -EINVAL;
 
        pg = kzalloc(sizeof(*pg), GFP_KERNEL);
@@ -681,7 +757,8 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
 
        err = tegra_powergate_power_up(pg, false);
        if (err)
-               pr_err("failed to turn on partition %d: %d\n", id, err);
+               dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
+                       err);
 
        kfree(pg);
 
@@ -691,12 +768,14 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
 
 /**
  * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @pmc: power management controller
  * @cpuid: CPU partition ID
  *
  * Returns the partition ID corresponding to the CPU partition ID or a
  * negative error code on failure.
  */
-static int tegra_get_cpu_powergate_id(unsigned int cpuid)
+static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc,
+                                     unsigned int cpuid)
 {
        if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
                return pmc->soc->cpu_powergates[cpuid];
@@ -712,11 +791,11 @@ bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
 {
        int id;
 
-       id = tegra_get_cpu_powergate_id(cpuid);
+       id = tegra_get_cpu_powergate_id(pmc, cpuid);
        if (id < 0)
                return false;
 
-       return tegra_powergate_is_powered(id);
+       return tegra_powergate_is_powered(pmc, id);
 }
 
 /**
@@ -727,11 +806,11 @@ int tegra_pmc_cpu_power_on(unsigned int cpuid)
 {
        int id;
 
-       id = tegra_get_cpu_powergate_id(cpuid);
+       id = tegra_get_cpu_powergate_id(pmc, cpuid);
        if (id < 0)
                return id;
 
-       return tegra_powergate_set(id, true);
+       return tegra_powergate_set(pmc, id, true);
 }
 
 /**
@@ -742,7 +821,7 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
 {
        int id;
 
-       id = tegra_get_cpu_powergate_id(cpuid);
+       id = tegra_get_cpu_powergate_id(pmc, cpuid);
        if (id < 0)
                return id;
 
@@ -755,7 +834,7 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
        const char *cmd = data;
        u32 value;
 
-       value = readl(pmc->scratch + pmc->soc->regs->scratch0);
+       value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
        value &= ~PMC_SCRATCH0_MODE_MASK;
 
        if (cmd) {
@@ -769,12 +848,12 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
                        value |= PMC_SCRATCH0_MODE_RCM;
        }
 
-       writel(value, pmc->scratch + pmc->soc->regs->scratch0);
+       tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
 
        /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
-       value = tegra_pmc_readl(PMC_CNTRL);
+       value = tegra_pmc_readl(pmc, PMC_CNTRL);
        value |= PMC_CNTRL_MAIN_RST;
-       tegra_pmc_writel(value, PMC_CNTRL);
+       tegra_pmc_writel(pmc, value, PMC_CNTRL);
 
        return NOTIFY_DONE;
 }
@@ -793,7 +872,7 @@ static int powergate_show(struct seq_file *s, void *data)
        seq_printf(s, "------------------\n");
 
        for (i = 0; i < pmc->soc->num_powergates; i++) {
-               status = tegra_powergate_is_powered(i);
+               status = tegra_powergate_is_powered(pmc, i);
                if (status < 0)
                        continue;
 
@@ -855,12 +934,13 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
 static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
                                         struct device_node *np, bool off)
 {
+       struct device *dev = pg->pmc->dev;
        int err;
 
        pg->reset = of_reset_control_array_get_exclusive(np);
        if (IS_ERR(pg->reset)) {
                err = PTR_ERR(pg->reset);
-               pr_err("failed to get device resets: %d\n", err);
+               dev_err(dev, "failed to get device resets: %d\n", err);
                return err;
        }
 
@@ -877,6 +957,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
 
 static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
 {
+       struct device *dev = pmc->dev;
        struct tegra_powergate *pg;
        int id, err;
        bool off;
@@ -887,7 +968,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
 
        id = tegra_powergate_lookup(pmc, np->name);
        if (id < 0) {
-               pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
+               dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
                goto free_mem;
        }
 
@@ -903,17 +984,17 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
        pg->genpd.power_on = tegra_genpd_power_on;
        pg->pmc = pmc;
 
-       off = !tegra_powergate_is_powered(pg->id);
+       off = !tegra_powergate_is_powered(pmc, pg->id);
 
        err = tegra_powergate_of_get_clks(pg, np);
        if (err < 0) {
-               pr_err("failed to get clocks for %pOFn: %d\n", np, err);
+               dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err);
                goto set_available;
        }
 
        err = tegra_powergate_of_get_resets(pg, np, off);
        if (err < 0) {
-               pr_err("failed to get resets for %pOFn: %d\n", np, err);
+               dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
                goto remove_clks;
        }
 
@@ -926,19 +1007,19 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
 
        err = pm_genpd_init(&pg->genpd, NULL, off);
        if (err < 0) {
-               pr_err("failed to initialise PM domain %pOFn: %d\n", np,
+               dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
                       err);
                goto remove_resets;
        }
 
        err = of_genpd_add_provider_simple(np, &pg->genpd);
        if (err < 0) {
-               pr_err("failed to add PM domain provider for %pOFn: %d\n",
-                      np, err);
+               dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n",
+                       np, err);
                goto remove_genpd;
        }
 
-       pr_debug("added PM domain %s\n", pg->genpd.name);
+       dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
 
        return;
 
@@ -994,7 +1075,8 @@ tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
        return NULL;
 }
 
-static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
+static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc,
+                                            enum tegra_io_pad id,
                                             unsigned long *request,
                                             unsigned long *status,
                                             u32 *mask)
@@ -1003,7 +1085,7 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
 
        pad = tegra_io_pad_find(pmc, id);
        if (!pad) {
-               pr_err("invalid I/O pad ID %u\n", id);
+               dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
                return -ENOENT;
        }
 
@@ -1023,43 +1105,44 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
        return 0;
 }
 
-static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
-                               unsigned long *status, u32 *mask)
+static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id,
+                               unsigned long *request, unsigned long *status,
+                               u32 *mask)
 {
        unsigned long rate, value;
        int err;
 
-       err = tegra_io_pad_get_dpd_register_bit(id, request, status, mask);
+       err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask);
        if (err)
                return err;
 
        if (pmc->clk) {
                rate = clk_get_rate(pmc->clk);
                if (!rate) {
-                       pr_err("failed to get clock rate\n");
+                       dev_err(pmc->dev, "failed to get clock rate\n");
                        return -ENODEV;
                }
 
-               tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+               tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE);
 
                /* must be at least 200 ns, in APB (PCLK) clock cycles */
                value = DIV_ROUND_UP(1000000000, rate);
                value = DIV_ROUND_UP(200, value);
-               tegra_pmc_writel(value, SEL_DPD_TIM);
+               tegra_pmc_writel(pmc, value, SEL_DPD_TIM);
        }
 
        return 0;
 }
 
-static int tegra_io_pad_poll(unsigned long offset, u32 mask,
-                            u32 val, unsigned long timeout)
+static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset,
+                            u32 mask, u32 val, unsigned long timeout)
 {
        u32 value;
 
        timeout = jiffies + msecs_to_jiffies(timeout);
 
        while (time_after(timeout, jiffies)) {
-               value = tegra_pmc_readl(offset);
+               value = tegra_pmc_readl(pmc, offset);
                if ((value & mask) == val)
                        return 0;
 
@@ -1069,10 +1152,10 @@ static int tegra_io_pad_poll(unsigned long offset, u32 mask,
        return -ETIMEDOUT;
 }
 
-static void tegra_io_pad_unprepare(void)
+static void tegra_io_pad_unprepare(struct tegra_pmc *pmc)
 {
        if (pmc->clk)
-               tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+               tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE);
 }
 
 /**
@@ -1089,21 +1172,21 @@ int tegra_io_pad_power_enable(enum tegra_io_pad id)
 
        mutex_lock(&pmc->powergates_lock);
 
-       err = tegra_io_pad_prepare(id, &request, &status, &mask);
+       err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
        if (err < 0) {
-               pr_err("failed to prepare I/O pad: %d\n", err);
+               dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
                goto unlock;
        }
 
-       tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
+       tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request);
 
-       err = tegra_io_pad_poll(status, mask, 0, 250);
+       err = tegra_io_pad_poll(pmc, status, mask, 0, 250);
        if (err < 0) {
-               pr_err("failed to enable I/O pad: %d\n", err);
+               dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err);
                goto unlock;
        }
 
-       tegra_io_pad_unprepare();
+       tegra_io_pad_unprepare(pmc);
 
 unlock:
        mutex_unlock(&pmc->powergates_lock);
@@ -1125,21 +1208,21 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id)
 
        mutex_lock(&pmc->powergates_lock);
 
-       err = tegra_io_pad_prepare(id, &request, &status, &mask);
+       err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
        if (err < 0) {
-               pr_err("failed to prepare I/O pad: %d\n", err);
+               dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
                goto unlock;
        }
 
-       tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
+       tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request);
 
-       err = tegra_io_pad_poll(status, mask, mask, 250);
+       err = tegra_io_pad_poll(pmc, status, mask, mask, 250);
        if (err < 0) {
-               pr_err("failed to disable I/O pad: %d\n", err);
+               dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err);
                goto unlock;
        }
 
-       tegra_io_pad_unprepare();
+       tegra_io_pad_unprepare(pmc);
 
 unlock:
        mutex_unlock(&pmc->powergates_lock);
@@ -1147,22 +1230,24 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id)
 }
 EXPORT_SYMBOL(tegra_io_pad_power_disable);
 
-static int tegra_io_pad_is_powered(enum tegra_io_pad id)
+static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id)
 {
        unsigned long request, status;
        u32 mask, value;
        int err;
 
-       err = tegra_io_pad_get_dpd_register_bit(id, &request, &status, &mask);
+       err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status,
+                                               &mask);
        if (err)
                return err;
 
-       value = tegra_pmc_readl(status);
+       value = tegra_pmc_readl(pmc, status);
 
        return !(value & mask);
 }
 
-static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
+static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id,
+                                   int voltage)
 {
        const struct tegra_io_pad_soc *pad;
        u32 value;
@@ -1177,29 +1262,29 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
        mutex_lock(&pmc->powergates_lock);
 
        if (pmc->soc->has_impl_33v_pwr) {
-               value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+               value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
 
                if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
                        value &= ~BIT(pad->voltage);
                else
                        value |= BIT(pad->voltage);
 
-               tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR);
+               tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR);
        } else {
                /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
-               value = tegra_pmc_readl(PMC_PWR_DET);
+               value = tegra_pmc_readl(pmc, PMC_PWR_DET);
                value |= BIT(pad->voltage);
-               tegra_pmc_writel(value, PMC_PWR_DET);
+               tegra_pmc_writel(pmc, value, PMC_PWR_DET);
 
                /* update I/O voltage */
-               value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+               value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
 
                if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
                        value &= ~BIT(pad->voltage);
                else
                        value |= BIT(pad->voltage);
 
-               tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+               tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE);
        }
 
        mutex_unlock(&pmc->powergates_lock);
@@ -1209,7 +1294,7 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
        return 0;
 }
 
-static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
 {
        const struct tegra_io_pad_soc *pad;
        u32 value;
@@ -1222,9 +1307,9 @@ static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
                return -ENOTSUPP;
 
        if (pmc->soc->has_impl_33v_pwr)
-               value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+               value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
        else
-               value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+               value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
 
        if ((value & BIT(pad->voltage)) == 0)
                return TEGRA_IO_PAD_VOLTAGE_1V8;
@@ -1296,21 +1381,21 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
 
                ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
                do_div(ticks, USEC_PER_SEC);
-               tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER);
+               tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
 
                ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
                do_div(ticks, USEC_PER_SEC);
-               tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER);
+               tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
 
                wmb();
 
                pmc->rate = rate;
        }
 
-       value = tegra_pmc_readl(PMC_CNTRL);
+       value = tegra_pmc_readl(pmc, PMC_CNTRL);
        value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
        value |= PMC_CNTRL_CPU_PWRREQ_OE;
-       tegra_pmc_writel(value, PMC_CNTRL);
+       tegra_pmc_writel(pmc, value, PMC_CNTRL);
 }
 #endif
 
@@ -1432,13 +1517,13 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
        if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux))
                pinmux = 0;
 
-       value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+       value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
        value |= PMC_SENSOR_CTRL_SCRATCH_WRITE;
-       tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+       tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
 
        value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) |
                (reg_addr << PMC_SCRATCH54_ADDR_SHIFT);
-       tegra_pmc_writel(value, PMC_SCRATCH54);
+       tegra_pmc_writel(pmc, value, PMC_SCRATCH54);
 
        value = PMC_SCRATCH55_RESET_TEGRA;
        value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT;
@@ -1456,11 +1541,11 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
 
        value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT;
 
-       tegra_pmc_writel(value, PMC_SCRATCH55);
+       tegra_pmc_writel(pmc, value, PMC_SCRATCH55);
 
-       value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+       value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
        value |= PMC_SENSOR_CTRL_ENABLE_RST;
-       tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+       tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
 
        dev_info(pmc->dev, "emergency thermal reset enabled\n");
 
@@ -1470,12 +1555,16 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
 
 static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev)
 {
+       struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
        return pmc->soc->num_io_pads;
 }
 
-static const char *tegra_io_pad_pinctrl_get_group_name(
-               struct pinctrl_dev *pctl, unsigned int group)
+static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl,
+                                                      unsigned int group)
 {
+       struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl);
+
        return pmc->soc->io_pads[group].name;
 }
 
@@ -1484,8 +1573,11 @@ static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev,
                                               const unsigned int **pins,
                                               unsigned int *num_pins)
 {
+       struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
        *pins = &pmc->soc->io_pads[group].id;
        *num_pins = 1;
+
        return 0;
 }
 
@@ -1500,27 +1592,33 @@ static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = {
 static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
                                    unsigned int pin, unsigned long *config)
 {
-       const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
        enum pin_config_param param = pinconf_to_config_param(*config);
+       struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+       const struct tegra_io_pad_soc *pad;
        int ret;
        u32 arg;
 
+       pad = tegra_io_pad_find(pmc, pin);
        if (!pad)
                return -EINVAL;
 
        switch (param) {
        case PIN_CONFIG_POWER_SOURCE:
-               ret = tegra_io_pad_get_voltage(pad->id);
+               ret = tegra_io_pad_get_voltage(pmc, pad->id);
                if (ret < 0)
                        return ret;
+
                arg = ret;
                break;
+
        case PIN_CONFIG_LOW_POWER_MODE:
-               ret = tegra_io_pad_is_powered(pad->id);
+               ret = tegra_io_pad_is_powered(pmc, pad->id);
                if (ret < 0)
                        return ret;
+
                arg = !ret;
                break;
+
        default:
                return -EINVAL;
        }
@@ -1534,12 +1632,14 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
                                    unsigned int pin, unsigned long *configs,
                                    unsigned int num_configs)
 {
-       const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
+       struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+       const struct tegra_io_pad_soc *pad;
        enum pin_config_param param;
        unsigned int i;
        int err;
        u32 arg;
 
+       pad = tegra_io_pad_find(pmc, pin);
        if (!pad)
                return -EINVAL;
 
@@ -1560,7 +1660,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
                        if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 &&
                            arg != TEGRA_IO_PAD_VOLTAGE_3V3)
                                return -EINVAL;
-                       err = tegra_io_pad_set_voltage(pad->id, arg);
+                       err = tegra_io_pad_set_voltage(pmc, pad->id, arg);
                        if (err)
                                return err;
                        break;
@@ -1585,7 +1685,7 @@ static struct pinctrl_desc tegra_pmc_pctl_desc = {
 
 static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
 {
-       int err = 0;
+       int err;
 
        if (!pmc->soc->num_pin_descs)
                return 0;
@@ -1598,18 +1698,20 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
                                              pmc);
        if (IS_ERR(pmc->pctl_dev)) {
                err = PTR_ERR(pmc->pctl_dev);
-               dev_err(pmc->dev, "unable to register pinctrl, %d\n", err);
+               dev_err(pmc->dev, "failed to register pin controller: %d\n",
+                       err);
+               return err;
        }
 
-       return err;
+       return 0;
 }
 
 static ssize_t reset_reason_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+                                struct device_attribute *attr, char *buf)
 {
        u32 value, rst_src;
 
-       value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+       value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
        rst_src = (value & pmc->soc->regs->rst_source_mask) >>
                        pmc->soc->regs->rst_source_shift;
 
@@ -1619,11 +1721,11 @@ static ssize_t reset_reason_show(struct device *dev,
 static DEVICE_ATTR_RO(reset_reason);
 
 static ssize_t reset_level_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+                               struct device_attribute *attr, char *buf)
 {
        u32 value, rst_lvl;
 
-       value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+       value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
        rst_lvl = (value & pmc->soc->regs->rst_level_mask) >>
                        pmc->soc->regs->rst_level_shift;
 
@@ -1641,16 +1743,16 @@ static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc)
                err = device_create_file(dev, &dev_attr_reset_reason);
                if (err < 0)
                        dev_warn(dev,
-                               "failed to create attr \"reset_reason\": %d\n",
-                               err);
+                                "failed to create attr \"reset_reason\": %d\n",
+                                err);
        }
 
        if (pmc->soc->reset_levels) {
                err = device_create_file(dev, &dev_attr_reset_level);
                if (err < 0)
                        dev_warn(dev,
-                               "failed to create attr \"reset_level\": %d\n",
-                               err);
+                                "failed to create attr \"reset_level\": %d\n",
+                                err);
        }
 }
 
@@ -1920,6 +2022,8 @@ static int tegra_pmc_probe(struct platform_device *pdev)
        pmc->base = base;
        mutex_unlock(&pmc->powergates_lock);
 
+       platform_set_drvdata(pdev, pmc);
+
        return 0;
 
 cleanup_restart_handler:
@@ -1932,14 +2036,18 @@ static int tegra_pmc_probe(struct platform_device *pdev)
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
 static int tegra_pmc_suspend(struct device *dev)
 {
-       tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
+       struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+       tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41);
 
        return 0;
 }
 
 static int tegra_pmc_resume(struct device *dev)
 {
-       tegra_pmc_writel(0x0, PMC_SCRATCH41);
+       struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+       tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41);
 
        return 0;
 }
@@ -1976,11 +2084,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
        u32 value;
 
        /* Always enable CPU power request */
-       value = tegra_pmc_readl(PMC_CNTRL);
+       value = tegra_pmc_readl(pmc, PMC_CNTRL);
        value |= PMC_CNTRL_CPU_PWRREQ_OE;
-       tegra_pmc_writel(value, PMC_CNTRL);
+       tegra_pmc_writel(pmc, value, PMC_CNTRL);
 
-       value = tegra_pmc_readl(PMC_CNTRL);
+       value = tegra_pmc_readl(pmc, PMC_CNTRL);
 
        if (pmc->sysclkreq_high)
                value &= ~PMC_CNTRL_SYSCLK_POLARITY;
@@ -1988,12 +2096,12 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
                value |= PMC_CNTRL_SYSCLK_POLARITY;
 
        /* configure the output polarity while the request is tristated */
-       tegra_pmc_writel(value, PMC_CNTRL);
+       tegra_pmc_writel(pmc, value, PMC_CNTRL);
 
        /* now enable the request */
-       value = tegra_pmc_readl(PMC_CNTRL);
+       value = tegra_pmc_readl(pmc, PMC_CNTRL);
        value |= PMC_CNTRL_SYSCLK_OE;
-       tegra_pmc_writel(value, PMC_CNTRL);
+       tegra_pmc_writel(pmc, value, PMC_CNTRL);
 }
 
 static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -2002,14 +2110,14 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
 {
        u32 value;
 
-       value = tegra_pmc_readl(PMC_CNTRL);
+       value = tegra_pmc_readl(pmc, PMC_CNTRL);
 
        if (invert)
                value |= PMC_CNTRL_INTR_POLARITY;
        else
                value &= ~PMC_CNTRL_INTR_POLARITY;
 
-       tegra_pmc_writel(value, PMC_CNTRL);
+       tegra_pmc_writel(pmc, value, PMC_CNTRL);
 }
 
 static const struct tegra_pmc_soc tegra20_pmc_soc = {
@@ -2019,6 +2127,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
        .cpu_powergates = NULL,
        .has_tsense_reset = false,
        .has_gpu_clamps = false,
+       .needs_mbist_war = false,
+       .has_impl_33v_pwr = false,
+       .maybe_tz_only = false,
        .num_io_pads = 0,
        .io_pads = NULL,
        .num_pin_descs = 0,
@@ -2063,7 +2174,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
        .cpu_powergates = tegra30_cpu_powergates,
        .has_tsense_reset = true,
        .has_gpu_clamps = false,
+       .needs_mbist_war = false,
        .has_impl_33v_pwr = false,
+       .maybe_tz_only = false,
        .num_io_pads = 0,
        .io_pads = NULL,
        .num_pin_descs = 0,
@@ -2112,7 +2225,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
        .cpu_powergates = tegra114_cpu_powergates,
        .has_tsense_reset = true,
        .has_gpu_clamps = false,
+       .needs_mbist_war = false,
        .has_impl_33v_pwr = false,
+       .maybe_tz_only = false,
        .num_io_pads = 0,
        .io_pads = NULL,
        .num_pin_descs = 0,
@@ -2221,7 +2336,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
        .cpu_powergates = tegra124_cpu_powergates,
        .has_tsense_reset = true,
        .has_gpu_clamps = true,
+       .needs_mbist_war = false,
        .has_impl_33v_pwr = false,
+       .maybe_tz_only = false,
        .num_io_pads = ARRAY_SIZE(tegra124_io_pads),
        .io_pads = tegra124_io_pads,
        .num_pin_descs = ARRAY_SIZE(tegra124_pin_descs),
@@ -2325,8 +2442,9 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
        .cpu_powergates = tegra210_cpu_powergates,
        .has_tsense_reset = true,
        .has_gpu_clamps = true,
-       .has_impl_33v_pwr = false,
        .needs_mbist_war = true,
+       .has_impl_33v_pwr = false,
+       .maybe_tz_only = true,
        .num_io_pads = ARRAY_SIZE(tegra210_io_pads),
        .io_pads = tegra210_io_pads,
        .num_pin_descs = ARRAY_SIZE(tegra210_pin_descs),
@@ -2413,7 +2531,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
 
        index = of_property_match_string(np, "reg-names", "wake");
        if (index < 0) {
-               pr_err("failed to find PMC wake registers\n");
+               dev_err(pmc->dev, "failed to find PMC wake registers\n");
                return;
        }
 
@@ -2421,7 +2539,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
 
        wake = ioremap_nocache(regs.start, resource_size(&regs));
        if (!wake) {
-               pr_err("failed to map PMC wake registers\n");
+               dev_err(pmc->dev, "failed to map PMC wake registers\n");
                return;
        }
 
@@ -2438,7 +2556,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
 }
 
 static const struct tegra_wake_event tegra186_wake_events[] = {
-       TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)),
+       TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
        TEGRA_WAKE_IRQ("rtc", 73, 10),
 };
 
@@ -2449,7 +2567,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
        .cpu_powergates = NULL,
        .has_tsense_reset = false,
        .has_gpu_clamps = false,
+       .needs_mbist_war = false,
        .has_impl_33v_pwr = true,
+       .maybe_tz_only = false,
        .num_io_pads = ARRAY_SIZE(tegra186_io_pads),
        .io_pads = tegra186_io_pads,
        .num_pin_descs = ARRAY_SIZE(tegra186_pin_descs),
@@ -2527,6 +2647,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
        .cpu_powergates = NULL,
        .has_tsense_reset = false,
        .has_gpu_clamps = false,
+       .needs_mbist_war = false,
+       .has_impl_33v_pwr = false,
+       .maybe_tz_only = false,
        .num_io_pads = ARRAY_SIZE(tegra194_io_pads),
        .io_pads = tegra194_io_pads,
        .regs = &tegra186_pmc_regs,
@@ -2561,6 +2684,32 @@ static struct platform_driver tegra_pmc_driver = {
 };
 builtin_platform_driver(tegra_pmc_driver);
 
+static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc)
+{
+       u32 value, saved;
+
+       saved = readl(pmc->base + pmc->soc->regs->scratch0);
+       value = saved ^ 0xffffffff;
+
+       if (value == 0xffffffff)
+               value = 0xdeadbeef;
+
+       /* write pattern and read it back */
+       writel(value, pmc->base + pmc->soc->regs->scratch0);
+       value = readl(pmc->base + pmc->soc->regs->scratch0);
+
+       /* if we read all-zeroes, access is restricted to TZ only */
+       if (value == 0) {
+               pr_info("access to PMC is restricted to TZ\n");
+               return true;
+       }
+
+       /* restore original value */
+       writel(saved, pmc->base + pmc->soc->regs->scratch0);
+
+       return false;
+}
+
 /*
  * Early initialization to allow access to registers in the very early boot
  * process.
@@ -2623,6 +2772,9 @@ static int __init tegra_pmc_early_init(void)
        if (np) {
                pmc->soc = match->data;
 
+               if (pmc->soc->maybe_tz_only)
+                       pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
+
                tegra_powergate_init(pmc, np);
 
                /*
index e05ab16d9a9e3c10224e93745f57f1d477616a22..6285cd8efb21bb9798249e7db404b5aeb16c1d8e 100644 (file)
@@ -598,7 +598,7 @@ static int pktdma_init_chan(struct knav_dma_device *dma,
 
        INIT_LIST_HEAD(&chan->list);
        chan->dma       = dma;
-       chan->direction = DMA_NONE;
+       chan->direction = DMA_TRANS_NONE;
        atomic_set(&chan->ref_count, 0);
        spin_lock_init(&chan->lock);
 
index 687c8f3cd9552f38a54ddc80a5df01dfde04fbf2..01e76b58dd78af49d10a28d5bec3c71b951385f1 100644 (file)
@@ -17,4 +17,24 @@ config XILINX_VCU
          To compile this driver as a module, choose M here: the
          module will be called xlnx_vcu.
 
+config ZYNQMP_POWER
+       bool "Enable Xilinx Zynq MPSoC Power Management driver"
+       depends on PM && ARCH_ZYNQMP
+       default y
+       help
+         Say yes to enable power management support for ZyqnMP SoC.
+         This driver uses firmware driver as an interface for power
+         management request to firmware. It registers isr to handle
+         power management callbacks from firmware.
+         If in doubt, say N.
+
+config ZYNQMP_PM_DOMAINS
+       bool "Enable Zynq MPSoC generic PM domains"
+       default y
+       depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
+       select PM_GENERIC_DOMAINS
+       help
+         Say yes to enable device power management through PM domains
+         If in doubt, say N.
+
 endmenu
index dee8fd51e303281968e2c2f198d403a5ac573473..f66bfea5de175ae2b75f6ec43034e9b6fbb6c275 100644 (file)
@@ -1,2 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_XILINX_VCU)       += xlnx_vcu.o
+obj-$(CONFIG_ZYNQMP_POWER)     += zynqmp_power.o
+obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
new file mode 100644 (file)
index 0000000..354d256
--- /dev/null
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Generic PM domain support
+ *
+ *  Copyright (C) 2015-2018 Xilinx, Inc.
+ *
+ *  Davorin Mista <davorin.mista@aggios.com>
+ *  Jolly Shah <jollys@xilinx.com>
+ *  Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NUM_DOMAINS             (100)
+/* Flag stating if PM nodes mapped to the PM domain has been requested */
+#define ZYNQMP_PM_DOMAIN_REQUESTED     BIT(0)
+
+/**
+ * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
+ * @gpd:               Generic power domain
+ * @node_id:           PM node ID corresponding to device inside PM domain
+ * @flags:             ZynqMP PM domain flags
+ */
+struct zynqmp_pm_domain {
+       struct generic_pm_domain gpd;
+       u32 node_id;
+       u8 flags;
+};
+
+/**
+ * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
+ *                                     path
+ * @dev:       Device to check for wakeup source path
+ * @not_used:  Data member (not required)
+ *
+ * This function is checks device's child hierarchy and checks if any device is
+ * set as wakeup source.
+ *
+ * Return: 1 if device is in wakeup source path else 0
+ */
+static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
+{
+       int may_wakeup;
+
+       may_wakeup = device_may_wakeup(dev);
+       if (may_wakeup)
+               return may_wakeup;
+
+       return device_for_each_child(dev, NULL,
+                       zynqmp_gpd_is_active_wakeup_path);
+}
+
+/**
+ * zynqmp_gpd_power_on() - Power on PM domain
+ * @domain:    Generic PM domain
+ *
+ * This function is called before devices inside a PM domain are resumed, to
+ * power on PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
+{
+       int ret;
+       struct zynqmp_pm_domain *pd;
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->set_requirement)
+               return -ENXIO;
+
+       pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+       ret = eemi_ops->set_requirement(pd->node_id,
+                                       ZYNQMP_PM_CAPABILITY_ACCESS,
+                                       ZYNQMP_PM_MAX_QOS,
+                                       ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+       if (ret) {
+               pr_err("%s() %s set requirement for node %d failed: %d\n",
+                      __func__, domain->name, pd->node_id, ret);
+               return ret;
+       }
+
+       pr_debug("%s() Powered on %s domain\n", __func__, domain->name);
+       return 0;
+}
+
+/**
+ * zynqmp_gpd_power_off() - Power off PM domain
+ * @domain:    Generic PM domain
+ *
+ * This function is called after devices inside a PM domain are suspended, to
+ * power off PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
+{
+       int ret;
+       struct pm_domain_data *pdd, *tmp;
+       struct zynqmp_pm_domain *pd;
+       u32 capabilities = 0;
+       bool may_wakeup;
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->set_requirement)
+               return -ENXIO;
+
+       pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+       /* If domain is already released there is nothing to be done */
+       if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) {
+               pr_debug("%s() %s domain is already released\n",
+                        __func__, domain->name);
+               return 0;
+       }
+
+       list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) {
+               /* If device is in wakeup path, set capability to WAKEUP */
+               may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL);
+               if (may_wakeup) {
+                       dev_dbg(pdd->dev, "device is in wakeup path in %s\n",
+                               domain->name);
+                       capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP;
+                       break;
+               }
+       }
+
+       ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+                                       ZYNQMP_PM_REQUEST_ACK_NO);
+       /**
+        * If powering down of any node inside this domain fails,
+        * report and return the error
+        */
+       if (ret) {
+               pr_err("%s() %s set requirement for node %d failed: %d\n",
+                      __func__, domain->name, pd->node_id, ret);
+               return ret;
+       }
+
+       pr_debug("%s() Powered off %s domain\n", __func__, domain->name);
+       return 0;
+}
+
+/**
+ * zynqmp_gpd_attach_dev() - Attach device to the PM domain
+ * @domain:    Generic PM domain
+ * @dev:       Device to attach
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
+                                struct device *dev)
+{
+       int ret;
+       struct zynqmp_pm_domain *pd;
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->request_node)
+               return -ENXIO;
+
+       pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+       /* If this is not the first device to attach there is nothing to do */
+       if (domain->device_count)
+               return 0;
+
+       ret = eemi_ops->request_node(pd->node_id, 0, 0,
+                                    ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+       /* If requesting a node fails print and return the error */
+       if (ret) {
+               pr_err("%s() %s request failed for node %d: %d\n",
+                      __func__, domain->name, pd->node_id, ret);
+               return ret;
+       }
+
+       pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED;
+
+       pr_debug("%s() %s attached to %s domain\n", __func__,
+                dev_name(dev), domain->name);
+       return 0;
+}
+
+/**
+ * zynqmp_gpd_detach_dev() - Detach device from the PM domain
+ * @domain:    Generic PM domain
+ * @dev:       Device to detach
+ */
+static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
+                                 struct device *dev)
+{
+       int ret;
+       struct zynqmp_pm_domain *pd;
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->release_node)
+               return;
+
+       pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+       /* If this is not the last device to detach there is nothing to do */
+       if (domain->device_count)
+               return;
+
+       ret = eemi_ops->release_node(pd->node_id);
+       /* If releasing a node fails print the error and return */
+       if (ret) {
+               pr_err("%s() %s release failed for node %d: %d\n",
+                      __func__, domain->name, pd->node_id, ret);
+               return;
+       }
+
+       pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED;
+
+       pr_debug("%s() %s detached from %s domain\n", __func__,
+                dev_name(dev), domain->name);
+}
+
+static struct generic_pm_domain *zynqmp_gpd_xlate
+                               (struct of_phandle_args *genpdspec, void *data)
+{
+       struct genpd_onecell_data *genpd_data = data;
+       unsigned int i, idx = genpdspec->args[0];
+       struct zynqmp_pm_domain *pd;
+
+       pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd);
+
+       if (genpdspec->args_count != 1)
+               return ERR_PTR(-EINVAL);
+
+       /* Check for existing pm domains */
+       for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+               if (pd[i].node_id == idx)
+                       goto done;
+       }
+
+       /**
+        * Add index in empty node_id of power domain list as no existing
+        * power domain found for current index.
+        */
+       for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+               if (pd[i].node_id == 0) {
+                       pd[i].node_id = idx;
+                       break;
+               }
+       }
+
+done:
+       if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS)
+               return ERR_PTR(-ENOENT);
+
+       return genpd_data->domains[i];
+}
+
+static int zynqmp_gpd_probe(struct platform_device *pdev)
+{
+       int i;
+       struct genpd_onecell_data *zynqmp_pd_data;
+       struct generic_pm_domain **domains;
+       struct zynqmp_pm_domain *pd;
+       struct device *dev = &pdev->dev;
+
+       pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return -ENOMEM;
+
+       zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL);
+       if (!zynqmp_pd_data)
+               return -ENOMEM;
+
+       zynqmp_pd_data->xlate = zynqmp_gpd_xlate;
+
+       domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains),
+                              GFP_KERNEL);
+       if (!domains)
+               return -ENOMEM;
+
+       for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
+               pd->node_id = 0;
+               pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
+               pd->gpd.power_off = zynqmp_gpd_power_off;
+               pd->gpd.power_on = zynqmp_gpd_power_on;
+               pd->gpd.attach_dev = zynqmp_gpd_attach_dev;
+               pd->gpd.detach_dev = zynqmp_gpd_detach_dev;
+
+               domains[i] = &pd->gpd;
+
+               /* Mark all PM domains as initially powered off */
+               pm_genpd_init(&pd->gpd, NULL, true);
+       }
+
+       zynqmp_pd_data->domains = domains;
+       zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS;
+       of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data);
+
+       return 0;
+}
+
+static int zynqmp_gpd_remove(struct platform_device *pdev)
+{
+       of_genpd_del_provider(pdev->dev.parent->of_node);
+
+       return 0;
+}
+
+static struct platform_driver zynqmp_power_domain_driver = {
+       .driver = {
+               .name = "zynqmp_power_controller",
+       },
+       .probe = zynqmp_gpd_probe,
+       .remove = zynqmp_gpd_remove,
+};
+module_platform_driver(zynqmp_power_domain_driver);
+
+MODULE_ALIAS("platform:zynqmp_power_controller");
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
new file mode 100644 (file)
index 0000000..771cb59
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ *  Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ *  Davorin Mista <davorin.mista@aggios.com>
+ *  Jolly Shah <jollys@xilinx.com>
+ *  Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+enum pm_suspend_mode {
+       PM_SUSPEND_MODE_FIRST = 0,
+       PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST,
+       PM_SUSPEND_MODE_POWER_OFF,
+};
+
+#define PM_SUSPEND_MODE_FIRST  PM_SUSPEND_MODE_STD
+
+static const char *const suspend_modes[] = {
+       [PM_SUSPEND_MODE_STD] = "standard",
+       [PM_SUSPEND_MODE_POWER_OFF] = "power-off",
+};
+
+static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+
+enum pm_api_cb_id {
+       PM_INIT_SUSPEND_CB = 30,
+       PM_ACKNOWLEDGE_CB,
+       PM_NOTIFY_CB,
+};
+
+static void zynqmp_pm_get_callback_data(u32 *buf)
+{
+       zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t zynqmp_pm_isr(int irq, void *data)
+{
+       u32 payload[CB_PAYLOAD_SIZE];
+
+       zynqmp_pm_get_callback_data(payload);
+
+       /* First element is callback API ID, others are callback arguments */
+       if (payload[0] == PM_INIT_SUSPEND_CB) {
+               switch (payload[1]) {
+               case SUSPEND_SYSTEM_SHUTDOWN:
+                       orderly_poweroff(true);
+                       break;
+               case SUSPEND_POWER_REQUEST:
+                       pm_suspend(PM_SUSPEND_MEM);
+                       break;
+               default:
+                       pr_err("%s Unsupported InitSuspendCb reason "
+                               "code %d\n", __func__, payload[1]);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static ssize_t suspend_mode_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       char *s = buf;
+       int md;
+
+       for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+               if (suspend_modes[md]) {
+                       if (md == suspend_mode)
+                               s += sprintf(s, "[%s] ", suspend_modes[md]);
+                       else
+                               s += sprintf(s, "%s ", suspend_modes[md]);
+               }
+
+       /* Convert last space to newline */
+       if (s != buf)
+               *(s - 1) = '\n';
+       return (s - buf);
+}
+
+static ssize_t suspend_mode_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       int md, ret = -EINVAL;
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->set_suspend_mode)
+               return ret;
+
+       for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+               if (suspend_modes[md] &&
+                   sysfs_streq(suspend_modes[md], buf)) {
+                       ret = 0;
+                       break;
+               }
+
+       if (!ret && md != suspend_mode) {
+               ret = eemi_ops->set_suspend_mode(md);
+               if (likely(!ret))
+                       suspend_mode = md;
+       }
+
+       return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(suspend_mode);
+
+static int zynqmp_pm_probe(struct platform_device *pdev)
+{
+       int ret, irq;
+       u32 pm_api_version;
+
+       const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+       if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize)
+               return -ENXIO;
+
+       eemi_ops->init_finalize();
+       eemi_ops->get_api_version(&pm_api_version);
+
+       /* Check PM API version number */
+       if (pm_api_version < ZYNQMP_PM_VERSION)
+               return -ENODEV;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0)
+               return -ENXIO;
+
+       ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
+                                       IRQF_NO_SUSPEND | IRQF_ONESHOT,
+                                       dev_name(&pdev->dev), &pdev->dev);
+       if (ret) {
+               dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
+                       "with %d\n", irq, ret);
+               return ret;
+       }
+
+       ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to create sysfs interface\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int zynqmp_pm_remove(struct platform_device *pdev)
+{
+       sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+
+       return 0;
+}
+
+static const struct of_device_id pm_of_match[] = {
+       { .compatible = "xlnx,zynqmp-power", },
+       { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, pm_of_match);
+
+static struct platform_driver zynqmp_pm_platform_driver = {
+       .probe = zynqmp_pm_probe,
+       .remove = zynqmp_pm_remove,
+       .driver = {
+               .name = "zynqmp_power",
+               .of_match_table = pm_of_match,
+       },
+};
+module_platform_driver(zynqmp_pm_platform_driver);
index a0802de8c3a1dbf54c4bca70d56535ac632026c7..6f5afab7c1a1b8e9ae63222412b163c25c643bfe 100644 (file)
@@ -248,10 +248,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
        struct ion_dma_buf_attachment *a = attachment->priv;
        struct ion_buffer *buffer = dmabuf->priv;
 
-       free_duped_table(a->table);
        mutex_lock(&buffer->lock);
        list_del(&a->list);
        mutex_unlock(&buffer->lock);
+       free_duped_table(a->table);
 
        kfree(a);
 }
index 28cbd6b3d26c39e09f5b8586756f22d83fd9b97a..dfee6985efa6126cb652fc96bad1bfd352aa764c 100644 (file)
@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
        {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+       {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
        {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
index bcc8dfa8e67287b80bd68ae6134133c5b0aac26c..9efb4dcb9d3a8f1d32544b3c8e9b5694da14a63d 100644 (file)
@@ -850,18 +850,18 @@ enum ieee80211_state {
 #define IP_FMT "%pI4"
 #define IP_ARG(x) (x)
 
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static inline int is_multicast_mac_addr(const u8 *addr)
 {
         return ((addr[0] != 0xff) && (0x01 & addr[0]));
 }
 
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static inline int is_broadcast_mac_addr(const u8 *addr)
 {
        return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&   \
                (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
 }
 
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static inline int is_zero_mac_addr(const u8 *addr)
 {
        return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) &&   \
                (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
index 9e17ec651bdec040d73f21ceff6685054e6ec337..53f5a1cb4636eb2e195bcb19c84d94588502abc5 100644 (file)
@@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
 static inline void
 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
 {
+       event->fired = 1;
        event->armed = 0;
        wake_up_all(wq);
 }
index 70c854d939cee222cda5e66618b6a30aca589999..3d0badc34825f27fb34addab44eec4c4e264455a 100644 (file)
@@ -36,7 +36,7 @@ struct wilc_op_mode {
 struct wilc_reg_frame {
        bool reg;
        u8 reg_id;
-       __le32 frame_type;
+       __le16 frame_type;
 } __packed;
 
 struct wilc_drv_handler {
@@ -1744,7 +1744,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
                result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
                                              ARRAY_SIZE(wid_list),
                                              wilc_get_vif_idx(vif));
-               kfree(gtk_key);
        } else if (mode == WILC_STATION_MODE) {
                struct wid wid;
 
@@ -1754,9 +1753,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
                wid.val = (u8 *)gtk_key;
                result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
                                              wilc_get_vif_idx(vif));
-               kfree(gtk_key);
        }
 
+       kfree(gtk_key);
        return result;
 }
 
index 3c5e9e030cadcff05cde6f37dc3c63ec5fc6f2b4..489e5a5038f8d10bc786162f7b92106c72b1e764 100644 (file)
@@ -1252,21 +1252,22 @@ static u32 init_chip(struct net_device *dev)
                ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
                if (!ret) {
                        netdev_err(dev, "fail read reg 0x1118\n");
-                       return ret;
+                       goto release;
                }
                reg |= BIT(0);
                ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
                if (!ret) {
                        netdev_err(dev, "fail write reg 0x1118\n");
-                       return ret;
+                       goto release;
                }
                ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
                if (!ret) {
                        netdev_err(dev, "fail write reg 0xc0000\n");
-                       return ret;
+                       goto release;
                }
        }
 
+release:
        release_bus(wilc, WILC_BUS_RELEASE_ONLY);
 
        return ret;
index c34c88ef331996e7bc02783f3957ca88fd451841..5831e0eecea120f9157cb566311839b9273755ce 100644 (file)
@@ -1317,12 +1317,13 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
                 * target_complete_cmd will translate this to LUN COMM FAILURE
                 */
                scsi_status = SAM_STAT_CHECK_CONDITION;
+               list_del_init(&cmd->queue_entry);
        } else {
+               list_del_init(&cmd->queue_entry);
                idr_remove(&udev->commands, id);
                tcmu_free_cmd(cmd);
                scsi_status = SAM_STAT_TASK_SET_FULL;
        }
-       list_del_init(&cmd->queue_entry);
 
        pr_debug("Timing out cmd %u on dev %s that is %s.\n",
                 id, udev->name, is_running ? "inflight" : "queued");
index 5e4938bbef2bebb306d86e46531d8220103e4a03..e3a148521ec1d4016ab65a5ae2ce8fbe1aa6b7ce 100644 (file)
@@ -34,9 +34,12 @@ static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
 static int get_devices(struct tee_context *ctx, u32 session,
                       struct tee_shm *device_shm, u32 *shm_size)
 {
-       u32 ret = 0;
-       struct tee_ioctl_invoke_arg inv_arg = {0};
-       struct tee_param param[4] = {0};
+       int ret = 0;
+       struct tee_ioctl_invoke_arg inv_arg;
+       struct tee_param param[4];
+
+       memset(&inv_arg, 0, sizeof(inv_arg));
+       memset(&param, 0, sizeof(param));
 
        /* Invoke PTA_CMD_GET_DEVICES function */
        inv_arg.func = PTA_CMD_GET_DEVICES;
@@ -89,13 +92,15 @@ int optee_enumerate_devices(void)
        const uuid_t pta_uuid =
                UUID_INIT(0x7011a688, 0xddde, 0x4053,
                          0xa5, 0xa9, 0x7b, 0x3c, 0x4d, 0xdf, 0x13, 0xb8);
-       struct tee_ioctl_open_session_arg sess_arg = {0};
+       struct tee_ioctl_open_session_arg sess_arg;
        struct tee_shm *device_shm = NULL;
        const uuid_t *device_uuid = NULL;
        struct tee_context *ctx = NULL;
        u32 shm_size = 0, idx, num_devices = 0;
        int rc;
 
+       memset(&sess_arg, 0, sizeof(sess_arg));
+
        /* Open context with OP-TEE driver */
        ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
        if (IS_ERR(ctx))
index ecffdd8a29b71e474a5ea35702a88062e565aa35..17c64fccbb10f3fde59114dde5ae4d2c73ece664 100644 (file)
@@ -993,7 +993,9 @@ tee_client_open_context(struct tee_context *start,
         * tee_client_open_session() if any in kernel client requires
         * different behaviour.
         */
-       ctx->supp_nowait = true;
+       if (!IS_ERR(ctx))
+               ctx->supp_nowait = true;
+
        return ctx;
 }
 EXPORT_SYMBOL_GPL(tee_client_open_context);
index 284cf2c5a8fd92db5bde9705e67d2510fe984731..8e1cf4d789be10df2413e1311bba63dde3545f43 100644 (file)
@@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
        struct pci_dev *pci_dev; \
        struct platform_device *pdev; \
        struct proc_thermal_device *proc_dev; \
-\
+       \
+       if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
+               dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
+               return 0; \
+       } \
+       \
        if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
                pdev = to_platform_device(dev); \
                proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
        *priv = proc_priv;
 
        ret = proc_thermal_read_ppcc(proc_priv);
-       if (!ret) {
-               ret = sysfs_create_group(&dev->kobj,
-                                        &power_limit_attribute_group);
-
-       }
        if (ret)
                return ret;
 
@@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
 
        proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
        if (IS_ERR(proc_priv->int340x_zone)) {
-               ret = PTR_ERR(proc_priv->int340x_zone);
-               goto remove_group;
+               return PTR_ERR(proc_priv->int340x_zone);
        } else
                ret = 0;
 
@@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
 
 remove_zone:
        int340x_thermal_zone_remove(proc_priv->int340x_zone);
-remove_group:
-       sysfs_remove_group(&proc_priv->dev->kobj,
-                          &power_limit_attribute_group);
 
        return ret;
 }
@@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
        platform_set_drvdata(pdev, proc_priv);
        proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
 
-       return 0;
+       dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
+
+       return sysfs_create_group(&pdev->dev.kobj,
+                                        &power_limit_attribute_group);
 }
 
 static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@ static int  proc_thermal_pci_probe(struct pci_dev *pdev,
                proc_priv->soc_dts = intel_soc_dts_iosf_init(
                                        INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
 
-               if (proc_priv->soc_dts && pdev->irq) {
+               if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
                        ret = pci_enable_msi(pdev);
                        if (!ret) {
                                ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@ static int  proc_thermal_pci_probe(struct pci_dev *pdev,
                        dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
        }
 
-       return 0;
+       dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
+
+       return sysfs_create_group(&pdev->dev.kobj,
+                                        &power_limit_attribute_group);
 }
 
 static void  proc_thermal_pci_remove(struct pci_dev *pdev)
index 4164414d4c64b266dfce58772241cb009363f4c0..8bdf42bc8fc817c9a0ac1278d2e89aeb7377ba0e 100644 (file)
@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
                                /* too large for caller's buffer */
                                ret = -EOVERFLOW;
                        } else {
+                               __set_current_state(TASK_RUNNING);
                                if (copy_to_user(buf, rbuf->buf, rbuf->count))
                                        ret = -EFAULT;
                                else
index 189ab1212d9aa80b219e01c131ddbbe6c4bec6d8..e441221e04b9aa67186bf6784236486d6f30cdce 100644 (file)
@@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 
                        ret = 0;
                }
-       }
 
-       /* Initialise interrupt backoff work if required */
-       if (up->overrun_backoff_time_ms > 0) {
-               uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms;
-               INIT_DELAYED_WORK(&uart->overrun_backoff,
-                                 serial_8250_overrun_backoff_work);
-       } else {
-               uart->overrun_backoff_time_ms = 0;
+               /* Initialise interrupt backoff work if required */
+               if (up->overrun_backoff_time_ms > 0) {
+                       uart->overrun_backoff_time_ms =
+                               up->overrun_backoff_time_ms;
+                       INIT_DELAYED_WORK(&uart->overrun_backoff,
+                                       serial_8250_overrun_backoff_work);
+               } else {
+                       uart->overrun_backoff_time_ms = 0;
+               }
        }
 
        mutex_unlock(&serial_mutex);
index 241a48e5052c37106acc8f672c37834dd38a74c8..debdd1b9e01ae560d32065d89cb37d6115388879 100644 (file)
@@ -1697,7 +1697,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
        }
 
        /* ask the core to calculate the divisor */
-       baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+       baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
 
        spin_lock_irqsave(&sport->port.lock, flags);
 
index a72d6d9fb98340e6d283876a7f7b2f96c31479e0..38016609c7fa99b1726bcebe3ed04b1fd6a0c724 100644 (file)
@@ -225,7 +225,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
        unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
        u32 geni_ios;
 
-       if (uart_console(uport) || !uart_cts_enabled(uport)) {
+       if (uart_console(uport)) {
                mctrl |= TIOCM_CTS;
        } else {
                geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -241,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
 {
        u32 uart_manual_rfr = 0;
 
-       if (uart_console(uport) || !uart_cts_enabled(uport))
+       if (uart_console(uport))
                return;
 
        if (!(mctrl & TIOCM_RTS))
index d4cca5bdaf1c60d40ba45b6c1815f2a6d98046af..5c01bb6d1c24f7081ce537c5445566266af0c387 100644 (file)
@@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
        int ret = 0;
 
        circ = &state->xmit;
-       if (!circ->buf)
+       port = uart_port_lock(state, flags);
+       if (!circ->buf) {
+               uart_port_unlock(port, flags);
                return 0;
+       }
 
-       port = uart_port_lock(state, flags);
        if (port && uart_circ_chars_free(circ) != 0) {
                circ->buf[circ->head] = c;
                circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty,
                return -EL3HLT;
        }
 
+       port = uart_port_lock(state, flags);
        circ = &state->xmit;
-       if (!circ->buf)
+       if (!circ->buf) {
+               uart_port_unlock(port, flags);
                return 0;
+       }
 
-       port = uart_port_lock(state, flags);
        while (port) {
                c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
                if (count < c)
index 23c6fd23842295de47b5ec328316e5de8c435b76..21ffcce16927164a279cb0afa8300d4430942910 100644 (file)
@@ -2189,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
        ld = tty_ldisc_ref_wait(tty);
        if (!ld)
                return -EIO;
-       ld->ops->receive_buf(tty, &ch, &mbz, 1);
+       if (ld->ops->receive_buf)
+               ld->ops->receive_buf(tty, &ch, &mbz, 1);
        tty_ldisc_deref(ld);
        return 0;
 }
index 41ec8e5010f30a544b82ca439cc5a481fe499b19..bba75560d11e2eca421638aef7a2a928c44878b3 100644 (file)
@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
        if (con_is_visible(vc))
                update_screen(vc);
        vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
+       notify_update(vc);
        return err;
 }
 
@@ -2764,8 +2765,8 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
        con_flush(vc, draw_from, draw_to, &draw_x);
        vc_uniscr_debug_check(vc);
        console_conditional_schedule();
-       console_unlock();
        notify_update(vc);
+       console_unlock();
        return n;
 }
 
@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
        unsigned char c;
        static DEFINE_SPINLOCK(printing_lock);
        const ushort *start;
-       ushort cnt = 0;
-       ushort myx;
+       ushort start_x, cnt;
        int kmsg_console;
 
        /* console busy or not yet initialized */
@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
        if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
                vc = vc_cons[kmsg_console - 1].d;
 
-       /* read `x' only after setting currcons properly (otherwise
-          the `x' macro will read the x of the foreground console). */
-       myx = vc->vc_x;
-
        if (!vc_cons_allocated(fg_console)) {
                /* impossible */
                /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
                hide_cursor(vc);
 
        start = (ushort *)vc->vc_pos;
-
-       /* Contrived structure to try to emulate original need_wrap behaviour
-        * Problems caused when we have need_wrap set on '\n' character */
+       start_x = vc->vc_x;
+       cnt = 0;
        while (count--) {
                c = *b++;
                if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
-                       if (cnt > 0) {
-                               if (con_is_visible(vc))
-                                       vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
-                               vc->vc_x += cnt;
-                               if (vc->vc_need_wrap)
-                                       vc->vc_x--;
-                               cnt = 0;
-                       }
+                       if (cnt && con_is_visible(vc))
+                               vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
+                       cnt = 0;
                        if (c == 8) {           /* backspace */
                                bs(vc);
                                start = (ushort *)vc->vc_pos;
-                               myx = vc->vc_x;
+                               start_x = vc->vc_x;
                                continue;
                        }
                        if (c != 13)
                                lf(vc);
                        cr(vc);
                        start = (ushort *)vc->vc_pos;
-                       myx = vc->vc_x;
+                       start_x = vc->vc_x;
                        if (c == 10 || c == 13)
                                continue;
                }
+               vc_uniscr_putc(vc, c);
                scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
                notify_write(vc, c);
                cnt++;
-               if (myx == vc->vc_cols - 1) {
-                       vc->vc_need_wrap = 1;
-                       continue;
-               }
-               vc->vc_pos += 2;
-               myx++;
-       }
-       if (cnt > 0) {
-               if (con_is_visible(vc))
-                       vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
-               vc->vc_x += cnt;
-               if (vc->vc_x == vc->vc_cols) {
-                       vc->vc_x--;
+               if (vc->vc_x == vc->vc_cols - 1) {
                        vc->vc_need_wrap = 1;
+               } else {
+                       vc->vc_pos += 2;
+                       vc->vc_x++;
                }
        }
+       if (cnt && con_is_visible(vc))
+               vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
        set_cursor(vc);
        notify_update(vc);
 
index e81de9ca8729e2e7101ce0e6bcb8e15977377ff0..9b45aa422e696e96f6038aed9fadc6a1a1527956 100644 (file)
@@ -316,7 +316,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        if (IS_ERR(data->usbmisc_data))
                return PTR_ERR(data->usbmisc_data);
 
-       if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) {
+       if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC)
+               && data->usbmisc_data) {
                pdata.flags |= CI_HDRC_IMX_IS_HSIC;
                data->usbmisc_data->hsic = 1;
                data->pinctrl = devm_pinctrl_get(dev);
index dc7f7fd71684cb7d7e2d0f3bc39f49f114618def..c12ac56606c3f681fe8cebe0357a8284ec405f25 100644 (file)
@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
        .attrs = ports_attrs,
 };
 
-static const struct attribute_group *ports_groups[] = {
-       &ports_group,
-       NULL
-};
-
 /***************************************
  * Adding & removing ports
  ***************************************/
@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
 static int usbport_trig_activate(struct led_classdev *led_cdev)
 {
        struct usbport_trig_data *usbport_data;
+       int err;
 
        usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
        if (!usbport_data)
@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
 
        /* List of ports */
        INIT_LIST_HEAD(&usbport_data->ports);
+       err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
+       if (err)
+               goto err_free;
        usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
        usbport_trig_update_count(usbport_data);
 
@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
        usbport_data->nb.notifier_call = usbport_trig_notify;
        led_set_trigger_data(led_cdev, usbport_data);
        usb_register_notify(&usbport_data->nb);
-
        return 0;
+
+err_free:
+       kfree(usbport_data);
+       return err;
 }
 
 static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
                usbport_trig_remove_port(usbport_data, port);
        }
 
+       sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
+
        usb_unregister_notify(&usbport_data->nb);
 
        kfree(usbport_data);
@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
        .name     = "usbport",
        .activate = usbport_trig_activate,
        .deactivate = usbport_trig_deactivate,
-       .groups = ports_groups,
 };
 
 static int __init usbport_trig_init(void)
index 68ad75a7460dd032cfbe8b5a007f3774f8a8cad6..55ef3cc2701b999aa93d9bca90cc979bfb1cc08a 100644 (file)
@@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
 
        if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
                dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
-               dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
+               dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
                dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
        }
 }
index 07bd31bb2f8a0a6a70d3f4a9e94766abdf857bfe..bed2ff42780b79dd2cdbc166c564e7fcb786d60a 100644 (file)
@@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
        req->started = false;
        list_del(&req->list);
        req->remaining = 0;
+       req->needs_extra_trb = false;
 
        if (req->request.status == -EINPROGRESS)
                req->request.status = status;
@@ -1984,6 +1985,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
 
        /* begin to receive SETUP packets */
        dwc->ep0state = EP0_SETUP_PHASE;
+       dwc->link_state = DWC3_LINK_STATE_SS_DIS;
        dwc3_ep0_out_start(dwc);
 
        dwc3_gadget_enable_irq(dwc);
@@ -3379,6 +3381,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
        dwc3_disconnect_gadget(dwc);
        __dwc3_gadget_stop(dwc);
 
+       synchronize_irq(dwc->irq_gadget);
+
        return 0;
 }
 
index 9cdef108fb1b3da5581c99c8251903702b04c6a8..ed68a4860b7d8702e1a8fd9cca40b04b5065a4ce 100644 (file)
@@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
 
        ss = kzalloc(sizeof(*ss), GFP_KERNEL);
        if (!ss)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ss_opts =  container_of(fi, struct f_ss_opts, func_inst);
 
index f26109eafdbfbc3633a9126c18f41cef27e26e01..66ec1fdf9fe7d9860c3c0d277391b430c516e339 100644 (file)
@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
 MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
 MODULE_ALIAS("mv-ehci");
 MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
index 1ab2a6191013e195d8c4d8f246222f85b064b9c0..77ef4c481f3ce42c7bcd24bd9c271ac64a46703f 100644 (file)
@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
        int result;
        u16 val;
 
+       result = usb_autopm_get_interface(serial->interface);
+       if (result)
+               return result;
+
        val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
        result = usb_control_msg(serial->dev,
                                 usb_sndctrlpipe(serial->dev, 0),
@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
                        val, result);
        }
 
+       usb_autopm_put_interface(serial->interface);
+
        return result;
 }
 
@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
        unsigned char *buf;
        int result;
 
+       result = usb_autopm_get_interface(serial->interface);
+       if (result)
+               return result;
+
        buf = kmalloc(1, GFP_KERNEL);
-       if (!buf)
+       if (!buf) {
+               usb_autopm_put_interface(serial->interface);
                return -ENOMEM;
+       }
 
        result = usb_control_msg(serial->dev,
                                 usb_rcvctrlpipe(serial->dev, 0),
@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
        }
 
        kfree(buf);
+       usb_autopm_put_interface(serial->interface);
 
        return result;
 }
index 09e21e84fc4e03ed7db638c402bbfd66e02918ef..a68f1fb25b8a996cc2d601c5f72a02cafa1500ad 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
 /*
        usa26msg.h
 
index dee454c4609a2a51a60169b41f3420b5937ddad7..a19f3fe5d98d346719185459d713aefd646119be 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
 /*
        usa28msg.h
 
index 163b2dea2ec5ccd7fba706068031b69a134ad377..8c3970fdd868a407b87f9a80468e67f1ac7c2dff 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
 /*
        usa49msg.h
 
index 20fa3e2f7187a4c84da1fd524eb67afd10e4532e..dcf502fdbb44430c29d2bfc6d1c40257ed483302 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
 /*
        usa67msg.h
 
index 86708ecd87357095629d6f670938b2da30e456ac..c4ca0f631d20a9085294f67e1fe356e55a209b9b 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
 /*
        usa90msg.h
 
index 98e7a5df0f6d8ffcbd28335fea51bb25f294eb1b..bb3f9aa4a9093fe4fcf6ebc192cf1d040d6dc8c3 100644 (file)
@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
        { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
+       { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
index 4e2554d553620402ae8d3adf5e7f4eee96c90cff..559941ca884daf353cc022e41755fc9bc6d54567 100644 (file)
@@ -8,6 +8,7 @@
 
 #define PL2303_VENDOR_ID       0x067b
 #define PL2303_PRODUCT_ID      0x2303
+#define PL2303_PRODUCT_ID_TB           0x2304
 #define PL2303_PRODUCT_ID_RSAQ2                0x04bb
 #define PL2303_PRODUCT_ID_DCU11                0x1234
 #define PL2303_PRODUCT_ID_PHAROS       0xaaa0
@@ -20,6 +21,7 @@
 #define PL2303_PRODUCT_ID_MOTOROLA     0x0307
 #define PL2303_PRODUCT_ID_ZTEK         0xe1f1
 
+
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
index 4d0273508043de920cc4868eab9722deabf4ba40..edbbb13d6de6ee39285fef25268be3d08f4e3b0e 100644 (file)
@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()                   \
        { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
-       { USB_DEVICE(0x0cad, 0x9012) }  /* MTP6550 */
+       { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+       { USB_DEVICE(0x0cad, 0x9016) }  /* TPG2200 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README
deleted file mode 100644 (file)
index 41a2cf2..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
-       - more discussion about the protocol
-       - testing
-       - review of the userspace interface
-       - document the protocol
-
-Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com>
index 4d13e510590e7b9a56ca8a57ef4a2737200f55ff..b2aa986ab9ed0572fb4fe71b41d1c2b8645d5de1 100644 (file)
@@ -1,13 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * VFIO PCI mmap/mmap_fault tracepoints
  *
  * Copyright (C) 2018 IBM Corp.  All rights reserved.
  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #undef TRACE_SYSTEM
index 054a2cf9dd8e555a17b7eb11563a1750c7f01cd5..32f695ffe128085b0df8b8c0025e9b420868f6f8 100644 (file)
@@ -1,14 +1,10 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
  *
  * Copyright (C) 2018 IBM Corp.  All rights reserved.
  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  * Register an on-GPU RAM region for cacheable access.
  *
  * Derived from original vfio_pci_igd.c:
@@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
                struct vfio_pci_region *region, struct vfio_info_cap *caps)
 {
        struct vfio_pci_nvgpu_data *data = region->data;
-       struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 };
-
-       cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
-       cap.header.version = 1;
-       cap.tgt = data->gpu_tgt;
+       struct vfio_region_info_cap_nvlink2_ssatgt cap = {
+               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+               .header.version = 1,
+               .tgt = data->gpu_tgt
+       };
 
        return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
 }
@@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
                struct vfio_pci_region *region, struct vfio_info_cap *caps)
 {
        struct vfio_pci_npu2_data *data = region->data;
-       struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 };
-       struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 };
+       struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
+               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
+               .header.version = 1,
+               .tgt = data->gpu_tgt
+       };
+       struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
+               .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
+               .header.version = 1,
+               .link_speed = data->link_speed
+       };
        int ret;
 
-       captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
-       captgt.header.version = 1;
-       captgt.tgt = data->gpu_tgt;
-
-       capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD;
-       capspd.header.version = 1;
-       capspd.link_speed = data->link_speed;
-
        ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
        if (ret)
                return ret;
index 09731b2f6815f98651acba84cf9cc8d679f34f7a..c6b3bdbbdbc9e283d661eb960696681611575489 100644 (file)
@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
 
 static void vgacon_restore_screen(struct vc_data *c)
 {
+       c->vc_origin = c->vc_visible_origin;
        vgacon_scrollback_cur->save = 0;
 
        if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
        int start, end, count, soff;
 
        if (!lines) {
-               c->vc_visible_origin = c->vc_origin;
-               vga_set_mem_top(c);
+               vgacon_restore_screen(c);
                return;
        }
 
@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
        if (!vgacon_scrollback_cur->save) {
                vgacon_cursor(c, CM_ERASE);
                vgacon_save_screen(c);
+               c->vc_origin = (unsigned long)c->vc_screenbuf;
                vgacon_scrollback_cur->save = 1;
        }
 
@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
                int copysize;
 
                int diff = c->vc_rows - count;
-               void *d = (void *) c->vc_origin;
+               void *d = (void *) c->vc_visible_origin;
                void *s = (void *) c->vc_screenbuf;
 
                count *= c->vc_size_row;
index ed05514cc2dce77f2ce46cd8d941b6b20f5f1be3..1834524ae3734c4d87e64ad3c83e321a407ffd50 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/types.h>
+#include <linux/mfd/bcm2835-pm.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/watchdog.h>
@@ -47,6 +48,8 @@ struct bcm2835_wdt {
        spinlock_t              lock;
 };
 
+static struct bcm2835_wdt *bcm2835_power_off_wdt;
+
 static unsigned int heartbeat;
 static bool nowayout = WATCHDOG_NOWAYOUT;
 
@@ -148,10 +151,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
  */
 static void bcm2835_power_off(void)
 {
-       struct device_node *np =
-               of_find_compatible_node(NULL, NULL, "brcm,bcm2835-pm-wdt");
-       struct platform_device *pdev = of_find_device_by_node(np);
-       struct bcm2835_wdt *wdt = platform_get_drvdata(pdev);
+       struct bcm2835_wdt *wdt = bcm2835_power_off_wdt;
        u32 val;
 
        /*
@@ -169,7 +169,7 @@ static void bcm2835_power_off(void)
 
 static int bcm2835_wdt_probe(struct platform_device *pdev)
 {
-       struct resource *res;
+       struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
        struct device *dev = &pdev->dev;
        struct bcm2835_wdt *wdt;
        int err;
@@ -181,10 +181,7 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
 
        spin_lock_init(&wdt->lock);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       wdt->base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(wdt->base))
-               return PTR_ERR(wdt->base);
+       wdt->base = pm->base;
 
        watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
        watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
@@ -211,8 +208,10 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
                return err;
        }
 
-       if (pm_power_off == NULL)
+       if (pm_power_off == NULL) {
                pm_power_off = bcm2835_power_off;
+               bcm2835_power_off_wdt = wdt;
+       }
 
        dev_info(dev, "Broadcom BCM2835 watchdog timer");
        return 0;
@@ -226,18 +225,11 @@ static int bcm2835_wdt_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id bcm2835_wdt_of_match[] = {
-       { .compatible = "brcm,bcm2835-pm-wdt", },
-       {},
-};
-MODULE_DEVICE_TABLE(of, bcm2835_wdt_of_match);
-
 static struct platform_driver bcm2835_wdt_driver = {
        .probe          = bcm2835_wdt_probe,
        .remove         = bcm2835_wdt_remove,
        .driver = {
                .name =         "bcm2835-wdt",
-               .of_match_table = bcm2835_wdt_of_match,
        },
 };
 module_platform_driver(bcm2835_wdt_driver);
index 989cf872b98c60398e9394528721de1811151e9a..bb7888429be6b98dec9cd7fcfd13f9827a515d05 100644 (file)
@@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                     void *cpu_addr, dma_addr_t dma_addr, size_t size,
                     unsigned long attrs)
 {
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#ifdef CONFIG_ARM
        if (xen_get_dma_ops(dev)->mmap)
                return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
                                                    dma_addr, size, attrs);
@@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
                        void *cpu_addr, dma_addr_t handle, size_t size,
                        unsigned long attrs)
 {
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#ifdef CONFIG_ARM
        if (xen_get_dma_ops(dev)->get_sgtable) {
 #if 0
        /*
index 94c026bba2c226a4b034ca0db2011aa411726179..bba28a5034ba39e53ce68b32ecad1d03b8d60ba7 100644 (file)
@@ -1035,6 +1035,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
        list_del_init(&ci->i_snap_realm_item);
        ci->i_snap_realm_counter++;
        ci->i_snap_realm = NULL;
+       if (realm->ino == ci->i_vino.ino)
+               realm->inode = NULL;
        spin_unlock(&realm->inodes_with_caps_lock);
        ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
                            realm);
index 03f4d24db8fe009dc4384b83162979c34f11d1e0..9455d3aef0c3c1b50f5be96e3a804424e55597b4 100644 (file)
@@ -3,19 +3,6 @@
  * quota.c - CephFS quota
  *
  * Copyright (C) 2017-2018 SUSE
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/statfs.h>
index 593fb422d0f3e63f5e51f95e2f9d7eee25c78c33..e92a2fee3c577bd7fc8c63e3e10edf079ca97686 100644 (file)
@@ -252,6 +252,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
        seq_printf(m, ",ACL");
 #endif
        seq_putc(m, '\n');
+       seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
        seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
        seq_printf(m, "Servers:");
 
index e18915415e1337dc22e5fca260e9011ac5b1a37e..bb54ccf8481c33937d3dfa6b0a782971112a74fc 100644 (file)
@@ -1549,18 +1549,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
 }
 
 static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+                    bool malformed)
 {
        int length;
-       struct cifs_readdata *rdata = mid->callback_data;
 
        length = cifs_discard_remaining_data(server);
-       dequeue_mid(mid, rdata->result);
+       dequeue_mid(mid, malformed);
        mid->resp_buf = server->smallbuf;
        server->smallbuf = NULL;
        return length;
 }
 
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+       struct cifs_readdata *rdata = mid->callback_data;
+
+       return  __cifs_readv_discard(server, mid, rdata->result);
+}
+
 int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
@@ -1602,12 +1610,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                return -1;
        }
 
+       /* set up first two iov for signature check and to get credits */
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = 4;
+       rdata->iov[1].iov_base = buf + 4;
+       rdata->iov[1].iov_len = server->total_read - 4;
+       cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+                rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+       cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+                rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
        /* Was the SMB read successful? */
        rdata->result = server->ops->map_error(buf, false);
        if (rdata->result != 0) {
                cifs_dbg(FYI, "%s: server returned error %d\n",
                         __func__, rdata->result);
-               return cifs_readv_discard(server, mid);
+               /* normal error on read response */
+               return __cifs_readv_discard(server, mid, false);
        }
 
        /* Is there enough to get to the rest of the READ_RSP header? */
@@ -1651,14 +1670,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                server->total_read += length;
        }
 
-       /* set up first iov for signature check */
-       rdata->iov[0].iov_base = buf;
-       rdata->iov[0].iov_len = 4;
-       rdata->iov[1].iov_base = buf + 4;
-       rdata->iov[1].iov_len = server->total_read - 4;
-       cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
-                rdata->iov[0].iov_base, server->total_read);
-
        /* how much data is in the response? */
 #ifdef CONFIG_CIFS_SMB_DIRECT
        use_rdma_mr = rdata->mr;
index 683310f261718d3d07088342fde4b89a27bac4bc..8463c940e0e59779390b210fa3ebba45df86f80b 100644 (file)
@@ -720,6 +720,21 @@ server_unresponsive(struct TCP_Server_Info *server)
        return false;
 }
 
+static inline bool
+zero_credits(struct TCP_Server_Info *server)
+{
+       int val;
+
+       spin_lock(&server->req_lock);
+       val = server->credits + server->echo_credits + server->oplock_credits;
+       if (server->in_flight == 0 && val == 0) {
+               spin_unlock(&server->req_lock);
+               return true;
+       }
+       spin_unlock(&server->req_lock);
+       return false;
+}
+
 static int
 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
 {
@@ -732,6 +747,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
        for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
                try_to_freeze();
 
+               /* reconnect if no credits and no requests in flight */
+               if (zero_credits(server)) {
+                       cifs_reconnect(server);
+                       return -ECONNABORTED;
+               }
+
                if (server_unresponsive(server))
                        return -ECONNABORTED;
                if (cifs_rdma_enabled(server) && server->smbd_conn)
index f14533da3a9328d459073177e970974a6037f550..01a76bccdb8dfd28c1a0fff2036270bea70f08db 100644 (file)
@@ -293,6 +293,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_file_all_info *smb2_data;
        __u32 create_options = 0;
+       struct cifs_fid fid;
+       bool no_cached_open = tcon->nohandlecache;
 
        *adjust_tz = false;
        *symlink = false;
@@ -301,6 +303,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
                            GFP_KERNEL);
        if (smb2_data == NULL)
                return -ENOMEM;
+
+       /* If it is a root and its handle is cached then use it */
+       if (!strlen(full_path) && !no_cached_open) {
+               rc = open_shroot(xid, tcon, &fid);
+               if (rc)
+                       goto out;
+               rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
+                                    fid.volatile_fid, smb2_data);
+               close_shroot(&tcon->crfid);
+               if (rc)
+                       goto out;
+               move_smb2_info_to_cifs(data, smb2_data);
+               goto out;
+       }
+
        if (backup_cred(cifs_sb))
                create_options |= CREATE_OPEN_BACKUP_INTENT;
 
index 6a9c47541c53d0983a068703106ddacdbc834e04..7b8b58fb4d3fbdcf898d306a585c7332cd2c0ef5 100644 (file)
@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
        if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
                return false;
 
+       if (rsp->sync_hdr.CreditRequest) {
+               spin_lock(&server->req_lock);
+               server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
+               spin_unlock(&server->req_lock);
+               wake_up(&server->request_q);
+       }
+
        if (rsp->StructureSize !=
                                smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
                if (le16_to_cpu(rsp->StructureSize) == 44)
index cf7eb891804f6f527b7fd4a7512dfc6d66309042..153238fc4fa986ba542778a95d313493d717a1c0 100644 (file)
@@ -34,6 +34,7 @@
 #include "cifs_ioctl.h"
 #include "smbdirect.h"
 
+/* Change credits for different ops and return the total number of credits */
 static int
 change_conf(struct TCP_Server_Info *server)
 {
@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
        server->oplock_credits = server->echo_credits = 0;
        switch (server->credits) {
        case 0:
-               return -1;
+               return 0;
        case 1:
                server->echoes = false;
                server->oplocks = false;
-               cifs_dbg(VFS, "disabling echoes and oplocks\n");
                break;
        case 2:
                server->echoes = true;
                server->oplocks = false;
                server->echo_credits = 1;
-               cifs_dbg(FYI, "disabling oplocks\n");
                break;
        default:
                server->echoes = true;
@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
                server->echo_credits = 1;
        }
        server->credits -= server->echo_credits + server->oplock_credits;
-       return 0;
+       return server->credits + server->echo_credits + server->oplock_credits;
 }
 
 static void
 smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
                 const int optype)
 {
-       int *val, rc = 0;
+       int *val, rc = -1;
+
        spin_lock(&server->req_lock);
        val = server->ops->get_credits_field(server, optype);
 
@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
        }
        spin_unlock(&server->req_lock);
        wake_up(&server->request_q);
-       if (rc)
-               cifs_reconnect(server);
+
+       if (server->tcpStatus == CifsNeedReconnect)
+               return;
+
+       switch (rc) {
+       case -1:
+               /* change_conf hasn't been executed */
+               break;
+       case 0:
+               cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
+               break;
+       case 1:
+               cifs_dbg(VFS, "disabling echoes and oplocks\n");
+               break;
+       case 2:
+               cifs_dbg(FYI, "disabling oplocks\n");
+               break;
+       default:
+               cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
+       }
 }
 
 static void
@@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid)
 {
        struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
 
-       return le16_to_cpu(shdr->CreditRequest);
+       if (mid->mid_state == MID_RESPONSE_RECEIVED
+           || mid->mid_state == MID_RESPONSE_MALFORMED)
+               return le16_to_cpu(shdr->CreditRequest);
+
+       return 0;
 }
 
 static int
@@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
 
                        scredits = server->credits;
                        /* can deadlock with reopen */
-                       if (scredits == 1) {
+                       if (scredits <= 8) {
                                *num = SMB2_MAX_BUFFER_SIZE;
                                *credits = 0;
                                break;
                        }
 
-                       /* leave one credit for a possible reopen */
-                       scredits--;
+                       /* leave some credits for reopen and other ops */
+                       scredits -= 8;
                        *num = min_t(unsigned int, size,
                                     scredits * SMB2_MAX_BUFFER_SIZE);
 
@@ -3189,11 +3211,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
                        server->ops->is_status_pending(buf, server, 0))
                return -1;
 
-       rdata->result = server->ops->map_error(buf, false);
+       /* set up first two iov to get credits */
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = 4;
+       rdata->iov[1].iov_base = buf + 4;
+       rdata->iov[1].iov_len =
+               min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
+       cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+                rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+       cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+                rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
+       rdata->result = server->ops->map_error(buf, true);
        if (rdata->result != 0) {
                cifs_dbg(FYI, "%s: server returned error %d\n",
                         __func__, rdata->result);
-               dequeue_mid(mid, rdata->result);
+               /* normal error on read response */
+               dequeue_mid(mid, false);
                return 0;
        }
 
@@ -3266,14 +3300,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
                return 0;
        }
 
-       /* set up first iov for signature check */
-       rdata->iov[0].iov_base = buf;
-       rdata->iov[0].iov_len = 4;
-       rdata->iov[1].iov_base = buf + 4;
-       rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
-       cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
-                rdata->iov[0].iov_base, server->vals->read_rsp_size);
-
        length = rdata->copy_into_pages(server, rdata, &iter);
 
        kfree(bvec);
index 50811a7dc0e0c6375fa2cdbe5e6c8474da61a8c4..2ff209ec4fabe55d22a342e045b1f4b363bb294d 100644 (file)
@@ -2816,6 +2816,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        int resp_buftype = CIFS_NO_BUFFER;
        struct cifs_ses *ses = tcon->ses;
        int flags = 0;
+       bool allocated = false;
 
        cifs_dbg(FYI, "Query Info\n");
 
@@ -2855,14 +2856,21 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
                                        "Error %d allocating memory for acl\n",
                                        rc);
                                *dlen = 0;
+                               rc = -ENOMEM;
                                goto qinf_exit;
                        }
+                       allocated = true;
                }
        }
 
        rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
                                        le32_to_cpu(rsp->OutputBufferLength),
                                        &rsp_iov, min_len, *data);
+       if (rc && allocated) {
+               kfree(*data);
+               *data = NULL;
+               *dlen = 0;
+       }
 
 qinf_exit:
        SMB2_query_info_free(&rqst);
@@ -2916,9 +2924,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
 {
        struct TCP_Server_Info *server = mid->callback_data;
        struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
-       unsigned int credits_received = 1;
+       unsigned int credits_received = 0;
 
-       if (mid->mid_state == MID_RESPONSE_RECEIVED)
+       if (mid->mid_state == MID_RESPONSE_RECEIVED
+           || mid->mid_state == MID_RESPONSE_MALFORMED)
                credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
 
        DeleteMidQEntry(mid);
@@ -3175,7 +3184,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
        struct TCP_Server_Info *server = tcon->ses->server;
        struct smb2_sync_hdr *shdr =
                                (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
-       unsigned int credits_received = 1;
+       unsigned int credits_received = 0;
        struct smb_rqst rqst = { .rq_iov = rdata->iov,
                                 .rq_nvec = 2,
                                 .rq_pages = rdata->pages,
@@ -3214,6 +3223,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
                task_io_account_read(rdata->got_bytes);
                cifs_stats_bytes_read(tcon, rdata->got_bytes);
                break;
+       case MID_RESPONSE_MALFORMED:
+               credits_received = le16_to_cpu(shdr->CreditRequest);
+               /* fall through */
        default:
                if (rdata->result != -ENODATA)
                        rdata->result = -EIO;
@@ -3399,7 +3411,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
        unsigned int written;
        struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
-       unsigned int credits_received = 1;
+       unsigned int credits_received = 0;
 
        switch (mid->mid_state) {
        case MID_RESPONSE_RECEIVED:
@@ -3427,6 +3439,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
        case MID_RETRY_NEEDED:
                wdata->result = -EAGAIN;
                break;
+       case MID_RESPONSE_MALFORMED:
+               credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+               /* fall through */
        default:
                wdata->result = -EIO;
                break;
index bd4a546feec1afb25e13b27f564d78577fe5ec76..4654837871934fe8dd099739660f0a49ef9a10af 100644 (file)
@@ -3,16 +3,6 @@
  *   Copyright (C) 2018, Microsoft Corporation.
  *
  *   Author(s): Steve French <stfrench@microsoft.com>
- *
- *   This program is free software;  you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or
- *   (at your option) any later version.
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
- *   the GNU General Public License for more details.
  */
 #define CREATE_TRACE_POINTS
 #include "trace.h"
index fb049809555fea9b3e2cc072b735ac0adfdfe1ae..59be48206932a6fb3051ef6b62bb424cd048513b 100644 (file)
@@ -3,16 +3,6 @@
  *   Copyright (C) 2018, Microsoft Corporation.
  *
  *   Author(s): Steve French <stfrench@microsoft.com>
- *
- *   This program is free software;  you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or
- *   (at your option) any later version.
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
- *   the GNU General Public License for more details.
  */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM cifs
index 202e0e84efdd7dfa7dff0501c1e9d3861f986afd..53532bd3f50d3d66886cd2465f6cbb0fed498e06 100644 (file)
@@ -786,17 +786,8 @@ static void
 cifs_compound_callback(struct mid_q_entry *mid)
 {
        struct TCP_Server_Info *server = mid->server;
-       unsigned int optype = mid->optype;
-       unsigned int credits_received = 0;
 
-       if (mid->mid_state == MID_RESPONSE_RECEIVED) {
-               if (mid->resp_buf)
-                       credits_received = server->ops->get_credits(mid);
-               else
-                       cifs_dbg(FYI, "Bad state for cancelled MID\n");
-       }
-
-       add_credits(server, credits_received, optype);
+       add_credits(server, server->ops->get_credits(mid), mid->optype);
 }
 
 static void
index dbc1a1f080ceb231f0553ff270ee7663a5479bfd..ec2fb6fe6d37c628d5335b7a0971b464824b0623 100644 (file)
@@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
        unsigned long fs_count; /* Number of filesystem-sized blocks */
        int create;
        unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
+       loff_t i_size;
 
        /*
         * If there was a memory error and we've overwritten all the
@@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
                 */
                create = dio->op == REQ_OP_WRITE;
                if (dio->flags & DIO_SKIP_HOLES) {
-                       if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
-                                                       i_blkbits))
+                       i_size = i_size_read(dio->inode);
+                       if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
                                create = 0;
                }
 
index b40168fcc94a6ae6383600b443e67163f820abbb..36855c1f8dafdce42422e0b31ce806d6d9973979 100644 (file)
@@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
        struct work_struct      work;
 };
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+       down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+       up_write(&bdi->wb_switch_rwsem);
+}
+
 static void inode_switch_wbs_work_fn(struct work_struct *work)
 {
        struct inode_switch_wbs_context *isw =
                container_of(work, struct inode_switch_wbs_context, work);
        struct inode *inode = isw->inode;
+       struct backing_dev_info *bdi = inode_to_bdi(inode);
        struct address_space *mapping = inode->i_mapping;
        struct bdi_writeback *old_wb = inode->i_wb;
        struct bdi_writeback *new_wb = isw->new_wb;
@@ -343,6 +354,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
        struct page *page;
        bool switched = false;
 
+       /*
+        * If @inode switches cgwb membership while sync_inodes_sb() is
+        * being issued, sync_inodes_sb() might miss it.  Synchronize.
+        */
+       down_read(&bdi->wb_switch_rwsem);
+
        /*
         * By the time control reaches here, RCU grace period has passed
         * since I_WB_SWITCH assertion and all wb stat update transactions
@@ -428,6 +445,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
        spin_unlock(&new_wb->list_lock);
        spin_unlock(&old_wb->list_lock);
 
+       up_read(&bdi->wb_switch_rwsem);
+
        if (switched) {
                wb_wakeup(new_wb);
                wb_put(old_wb);
@@ -468,9 +487,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
        if (inode->i_state & I_WB_SWITCH)
                return;
 
+       /*
+        * Avoid starting new switches while sync_inodes_sb() is in
+        * progress.  Otherwise, if the down_write protected issue path
+        * blocks heavily, we might end up starting a large number of
+        * switches which will block on the rwsem.
+        */
+       if (!down_read_trylock(&bdi->wb_switch_rwsem))
+               return;
+
        isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
        if (!isw)
-               return;
+               goto out_unlock;
 
        /* find and pin the new wb */
        rcu_read_lock();
@@ -504,12 +532,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
         * Let's continue after I_WB_SWITCH is guaranteed to be visible.
         */
        call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
-       return;
+       goto out_unlock;
 
 out_free:
        if (isw->new_wb)
                wb_put(isw->new_wb);
        kfree(isw);
+out_unlock:
+       up_read(&bdi->wb_switch_rwsem);
 }
 
 /**
@@ -887,6 +917,9 @@ fs_initcall(cgroup_writeback_init);
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
 static struct bdi_writeback *
 locked_inode_to_wb_and_lock_list(struct inode *inode)
        __releases(&inode->i_lock)
@@ -2413,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb)
                return;
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
+       /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+       bdi_down_write_wb_switch_rwsem(bdi);
        bdi_split_work_to_wbs(bdi, &work, false);
        wb_wait_for_completion(bdi, &done);
+       bdi_up_write_wb_switch_rwsem(bdi);
 
        wait_sb_inodes(sb);
 }
index 105576daca4abc35f861e89fc7bafc2b0229223b..798f1253141aee87fe11a1106ada42d33ccaadd2 100644 (file)
@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
                return -EBADF;
 
        /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
-       if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
-               return -EINVAL;
+       if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
+               ret = -EINVAL;
+               goto fput_and_out;
+       }
 
        /* verify that this is indeed an inotify instance */
        if (unlikely(f.file->f_op != &inotify_fops)) {
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
new file mode 100644 (file)
index 0000000..87d9c66
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
+#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
+
+/* SDM845 Power Domain Indexes */
+#define SDM845_EBI     0
+#define SDM845_MX      1
+#define SDM845_MX_AO   2
+#define SDM845_CX      3
+#define SDM845_CX_AO   4
+#define SDM845_LMX     5
+#define SDM845_LCX     6
+#define SDM845_GFX     7
+#define SDM845_MSS     8
+
+/* SDM845 Power Domain performance levels */
+#define RPMH_REGULATOR_LEVEL_RETENTION 16
+#define RPMH_REGULATOR_LEVEL_MIN_SVS   48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS   64
+#define RPMH_REGULATOR_LEVEL_SVS       128
+#define RPMH_REGULATOR_LEVEL_SVS_L1    192
+#define RPMH_REGULATOR_LEVEL_NOM       256
+#define RPMH_REGULATOR_LEVEL_NOM_L1    320
+#define RPMH_REGULATOR_LEVEL_NOM_L2    336
+#define RPMH_REGULATOR_LEVEL_TURBO     384
+#define RPMH_REGULATOR_LEVEL_TURBO_L1  416
+
+/* MSM8996 Power Domain Indexes */
+#define MSM8996_VDDCX          0
+#define MSM8996_VDDCX_AO       1
+#define MSM8996_VDDCX_VFC      2
+#define MSM8996_VDDMX          3
+#define MSM8996_VDDMX_AO       4
+#define MSM8996_VDDSSCX                5
+#define MSM8996_VDDSSCX_VFC    6
+
+#endif
diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h
new file mode 100644 (file)
index 0000000..0d9a412
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_ZYNQMP_POWER_H
+#define _DT_BINDINGS_ZYNQMP_POWER_H
+
+#define                PD_USB_0        22
+#define                PD_USB_1        23
+#define                PD_TTC_0        24
+#define                PD_TTC_1        25
+#define                PD_TTC_2        26
+#define                PD_TTC_3        27
+#define                PD_SATA         28
+#define                PD_ETH_0        29
+#define                PD_ETH_1        30
+#define                PD_ETH_2        31
+#define                PD_ETH_3        32
+#define                PD_UART_0       33
+#define                PD_UART_1       34
+#define                PD_SPI_0        35
+#define                PD_SPI_1        36
+#define                PD_I2C_0        37
+#define                PD_I2C_1        38
+#define                PD_SD_0         39
+#define                PD_SD_1         40
+#define                PD_DP           41
+#define                PD_GDMA         42
+#define                PD_ADMA         43
+#define                PD_NAND         44
+#define                PD_QSPI         45
+#define                PD_GPIO         46
+#define                PD_CAN_0        47
+#define                PD_CAN_1        48
+#define                PD_GPU          58
+#define                PD_PCIE         59
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
new file mode 100644 (file)
index 0000000..8063e83
--- /dev/null
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H
+
+/*     RESET0                                  */
+#define RESET_HIU                      0
+/*                                     1       */
+#define RESET_DOS                      2
+/*                                     3-4     */
+#define RESET_VIU                      5
+#define RESET_AFIFO                    6
+#define RESET_VID_PLL_DIV              7
+/*                                     8-9     */
+#define RESET_VENC                     10
+#define RESET_ASSIST                   11
+#define RESET_PCIE_CTRL_A              12
+#define RESET_VCBUS                    13
+#define RESET_PCIE_PHY                 14
+#define RESET_PCIE_APB                 15
+#define RESET_GIC                      16
+#define RESET_CAPB3_DECODE             17
+/*                                     18      */
+#define RESET_HDMITX_CAPB3             19
+#define RESET_DVALIN_CAPB3             20
+#define RESET_DOS_CAPB3                        21
+/*                                     22      */
+#define RESET_CBUS_CAPB3               23
+#define RESET_AHB_CNTL                 24
+#define RESET_AHB_DATA                 25
+#define RESET_VCBUS_CLK81              26
+/*                                     27-31   */
+/*     RESET1                                  */
+/*                                     32      */
+#define RESET_DEMUX                    33
+#define RESET_USB                      34
+#define RESET_DDR                      35
+/*                                     36      */
+#define RESET_BT656                    37
+#define RESET_AHB_SRAM                 38
+/*                                     39      */
+#define RESET_PARSER                   40
+/*                                     41      */
+#define RESET_ISA                      42
+#define RESET_ETHERNET                 43
+#define RESET_SD_EMMC_A                        44
+#define RESET_SD_EMMC_B                        45
+#define RESET_SD_EMMC_C                        46
+/*                                     47-60 */
+#define RESET_AUDIO_CODEC              61
+/*                                     62-63   */
+/*     RESET2                                  */
+/*                                     64      */
+#define RESET_AUDIO                    65
+#define RESET_HDMITX_PHY               66
+/*                                     67      */
+#define RESET_MIPI_DSI_HOST            68
+#define RESET_ALOCKER                  69
+#define RESET_GE2D                     70
+#define RESET_PARSER_REG               71
+#define RESET_PARSER_FETCH             72
+#define RESET_CTL                      73
+#define RESET_PARSER_TOP               74
+/*                                     75-77   */
+#define RESET_DVALIN                   78
+#define RESET_HDMITX                   79
+/*                                     80-95   */
+/*     RESET3                                  */
+/*                                     96-95   */
+#define RESET_DEMUX_TOP                        105
+#define RESET_DEMUX_DES_PL             106
+#define RESET_DEMUX_S2P_0              107
+#define RESET_DEMUX_S2P_1              108
+#define RESET_DEMUX_0                  109
+#define RESET_DEMUX_1                  110
+#define RESET_DEMUX_2                  111
+/*                                     112-127 */
+/*     RESET4                                  */
+/*                                     128-129 */
+#define RESET_MIPI_DSI_PHY             130
+/*                                     131-132 */
+#define RESET_RDMA                     133
+#define RESET_VENCI                    134
+#define RESET_VENCP                    135
+/*                                     136     */
+#define RESET_VDAC                     137
+/*                                     138-139 */
+#define RESET_VDI6                     140
+#define RESET_VENCL                    141
+#define RESET_I2C_M1                   142
+#define RESET_I2C_M2                   143
+/*                                     144-159 */
+/*     RESET5                                  */
+/*                                     160-191 */
+/*     RESET6                                  */
+#define RESET_GEN                      192
+#define RESET_SPICC0                   193
+#define RESET_SC                       194
+#define RESET_SANA_3                   195
+#define RESET_I2C_M0                   196
+#define RESET_TS_PLL                   197
+#define RESET_SPICC1                   198
+#define RESET_STREAM                   199
+#define RESET_TS_CPU                   200
+#define RESET_UART0                    201
+#define RESET_UART1_2                  202
+#define RESET_ASYNC0                   203
+#define RESET_ASYNC1                   204
+#define RESET_SPIFC0                   205
+#define RESET_I2C_M3                   206
+/*                                     207-223 */
+/*     RESET7                                  */
+#define RESET_USB_DDR_0                        224
+#define RESET_USB_DDR_1                        225
+#define RESET_USB_DDR_2                        226
+#define RESET_USB_DDR_3                        227
+#define RESET_TS_GPU                   228
+#define RESET_DEVICE_MMC_ARB           229
+#define RESET_DVALIN_DMC_PIPL          230
+#define RESET_VID_LOCK                 231
+#define RESET_NIC_DMC_PIPL             232
+#define RESET_DMC_VPU_PIPL             233
+#define RESET_GE2D_DMC_PIPL            234
+#define RESET_HCODEC_DMC_PIPL          235
+#define RESET_WAVE420_DMC_PIPL         236
+#define RESET_HEVCF_DMC_PIPL           237
+/*                                     238-255 */
+
+#endif
diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h
new file mode 100644 (file)
index 0000000..57c5924
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Zodiac Inflight Innovations
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ */
+
+#ifndef DT_BINDING_RESET_IMX8MQ_H
+#define DT_BINDING_RESET_IMX8MQ_H
+
+#define IMX8MQ_RESET_A53_CORE_POR_RESET0       0
+#define IMX8MQ_RESET_A53_CORE_POR_RESET1       1
+#define IMX8MQ_RESET_A53_CORE_POR_RESET2       2
+#define IMX8MQ_RESET_A53_CORE_POR_RESET3       3
+#define IMX8MQ_RESET_A53_CORE_RESET0           4
+#define IMX8MQ_RESET_A53_CORE_RESET1           5
+#define IMX8MQ_RESET_A53_CORE_RESET2           6
+#define IMX8MQ_RESET_A53_CORE_RESET3           7
+#define IMX8MQ_RESET_A53_DBG_RESET0            8
+#define IMX8MQ_RESET_A53_DBG_RESET1            9
+#define IMX8MQ_RESET_A53_DBG_RESET2            10
+#define IMX8MQ_RESET_A53_DBG_RESET3            11
+#define IMX8MQ_RESET_A53_ETM_RESET0            12
+#define IMX8MQ_RESET_A53_ETM_RESET1            13
+#define IMX8MQ_RESET_A53_ETM_RESET2            14
+#define IMX8MQ_RESET_A53_ETM_RESET3            15
+#define IMX8MQ_RESET_A53_SOC_DBG_RESET         16
+#define IMX8MQ_RESET_A53_L2RESET               17
+#define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST       18
+#define IMX8MQ_RESET_OTG1_PHY_RESET            19
+#define IMX8MQ_RESET_OTG2_PHY_RESET            20
+#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N     21
+#define IMX8MQ_RESET_MIPI_DSI_RESET_N          22
+#define IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N      23
+#define IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N      24
+#define IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N     25
+#define IMX8MQ_RESET_PCIEPHY                   26
+#define IMX8MQ_RESET_PCIEPHY_PERST             27
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN         28
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF    29
+#define IMX8MQ_RESET_HDMI_PHY_APB_RESET                30
+#define IMX8MQ_RESET_DISP_RESET                        31
+#define IMX8MQ_RESET_GPU_RESET                 32
+#define IMX8MQ_RESET_VPU_RESET                 33
+#define IMX8MQ_RESET_PCIEPHY2                  34
+#define IMX8MQ_RESET_PCIEPHY2_PERST            35
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN                36
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF   37
+#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET      38
+#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET   39
+#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET       40
+#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET      41
+#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET   42
+#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET       43
+#define IMX8MQ_RESET_DDRC1_PRST                        44
+#define IMX8MQ_RESET_DDRC1_CORE_RESET          45
+#define IMX8MQ_RESET_DDRC1_PHY_RESET           46
+#define IMX8MQ_RESET_DDRC2_PRST                        47
+#define IMX8MQ_RESET_DDRC2_CORE_RESET          48
+#define IMX8MQ_RESET_DDRC2_PHY_RESET           49
+
+#define IMX8MQ_RESET_NUM                       50
+
+#endif
diff --git a/include/dt-bindings/reset/xlnx-zynqmp-resets.h b/include/dt-bindings/reset/xlnx-zynqmp-resets.h
new file mode 100644 (file)
index 0000000..d44525b
--- /dev/null
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_ZYNQMP_RESETS_H
+#define _DT_BINDINGS_ZYNQMP_RESETS_H
+
+#define                ZYNQMP_RESET_PCIE_CFG           0
+#define                ZYNQMP_RESET_PCIE_BRIDGE        1
+#define                ZYNQMP_RESET_PCIE_CTRL          2
+#define                ZYNQMP_RESET_DP                 3
+#define                ZYNQMP_RESET_SWDT_CRF           4
+#define                ZYNQMP_RESET_AFI_FM5            5
+#define                ZYNQMP_RESET_AFI_FM4            6
+#define                ZYNQMP_RESET_AFI_FM3            7
+#define                ZYNQMP_RESET_AFI_FM2            8
+#define                ZYNQMP_RESET_AFI_FM1            9
+#define                ZYNQMP_RESET_AFI_FM0            10
+#define                ZYNQMP_RESET_GDMA               11
+#define                ZYNQMP_RESET_GPU_PP1            12
+#define                ZYNQMP_RESET_GPU_PP0            13
+#define                ZYNQMP_RESET_GPU                14
+#define                ZYNQMP_RESET_GT                 15
+#define                ZYNQMP_RESET_SATA               16
+#define                ZYNQMP_RESET_ACPU3_PWRON        17
+#define                ZYNQMP_RESET_ACPU2_PWRON        18
+#define                ZYNQMP_RESET_ACPU1_PWRON        19
+#define                ZYNQMP_RESET_ACPU0_PWRON        20
+#define                ZYNQMP_RESET_APU_L2             21
+#define                ZYNQMP_RESET_ACPU3              22
+#define                ZYNQMP_RESET_ACPU2              23
+#define                ZYNQMP_RESET_ACPU1              24
+#define                ZYNQMP_RESET_ACPU0              25
+#define                ZYNQMP_RESET_DDR                26
+#define                ZYNQMP_RESET_APM_FPD            27
+#define                ZYNQMP_RESET_SOFT               28
+#define                ZYNQMP_RESET_GEM0               29
+#define                ZYNQMP_RESET_GEM1               30
+#define                ZYNQMP_RESET_GEM2               31
+#define                ZYNQMP_RESET_GEM3               32
+#define                ZYNQMP_RESET_QSPI               33
+#define                ZYNQMP_RESET_UART0              34
+#define                ZYNQMP_RESET_UART1              35
+#define                ZYNQMP_RESET_SPI0               36
+#define                ZYNQMP_RESET_SPI1               37
+#define                ZYNQMP_RESET_SDIO0              38
+#define                ZYNQMP_RESET_SDIO1              39
+#define                ZYNQMP_RESET_CAN0               40
+#define                ZYNQMP_RESET_CAN1               41
+#define                ZYNQMP_RESET_I2C0               42
+#define                ZYNQMP_RESET_I2C1               43
+#define                ZYNQMP_RESET_TTC0               44
+#define                ZYNQMP_RESET_TTC1               45
+#define                ZYNQMP_RESET_TTC2               46
+#define                ZYNQMP_RESET_TTC3               47
+#define                ZYNQMP_RESET_SWDT_CRL           48
+#define                ZYNQMP_RESET_NAND               49
+#define                ZYNQMP_RESET_ADMA               50
+#define                ZYNQMP_RESET_GPIO               51
+#define                ZYNQMP_RESET_IOU_CC             52
+#define                ZYNQMP_RESET_TIMESTAMP          53
+#define                ZYNQMP_RESET_RPU_R50            54
+#define                ZYNQMP_RESET_RPU_R51            55
+#define                ZYNQMP_RESET_RPU_AMBA           56
+#define                ZYNQMP_RESET_OCM                57
+#define                ZYNQMP_RESET_RPU_PGE            58
+#define                ZYNQMP_RESET_USB0_CORERESET     59
+#define                ZYNQMP_RESET_USB1_CORERESET     60
+#define                ZYNQMP_RESET_USB0_HIBERRESET    61
+#define                ZYNQMP_RESET_USB1_HIBERRESET    62
+#define                ZYNQMP_RESET_USB0_APB           63
+#define                ZYNQMP_RESET_USB1_APB           64
+#define                ZYNQMP_RESET_IPI                65
+#define                ZYNQMP_RESET_APM_LPD            66
+#define                ZYNQMP_RESET_RTC                67
+#define                ZYNQMP_RESET_SYSMON             68
+#define                ZYNQMP_RESET_AFI_FM6            69
+#define                ZYNQMP_RESET_LPD_SWDT           70
+#define                ZYNQMP_RESET_FPD                71
+#define                ZYNQMP_RESET_RPU_DBG1           72
+#define                ZYNQMP_RESET_RPU_DBG0           73
+#define                ZYNQMP_RESET_DBG_LPD            74
+#define                ZYNQMP_RESET_DBG_FPD            75
+#define                ZYNQMP_RESET_APLL               76
+#define                ZYNQMP_RESET_DPLL               77
+#define                ZYNQMP_RESET_VPLL               78
+#define                ZYNQMP_RESET_IOPLL              79
+#define                ZYNQMP_RESET_RPLL               80
+#define                ZYNQMP_RESET_GPO3_PL_0          81
+#define                ZYNQMP_RESET_GPO3_PL_1          82
+#define                ZYNQMP_RESET_GPO3_PL_2          83
+#define                ZYNQMP_RESET_GPO3_PL_3          84
+#define                ZYNQMP_RESET_GPO3_PL_4          85
+#define                ZYNQMP_RESET_GPO3_PL_5          86
+#define                ZYNQMP_RESET_GPO3_PL_6          87
+#define                ZYNQMP_RESET_GPO3_PL_7          88
+#define                ZYNQMP_RESET_GPO3_PL_8          89
+#define                ZYNQMP_RESET_GPO3_PL_9          90
+#define                ZYNQMP_RESET_GPO3_PL_10         91
+#define                ZYNQMP_RESET_GPO3_PL_11         92
+#define                ZYNQMP_RESET_GPO3_PL_12         93
+#define                ZYNQMP_RESET_GPO3_PL_13         94
+#define                ZYNQMP_RESET_GPO3_PL_14         95
+#define                ZYNQMP_RESET_GPO3_PL_15         96
+#define                ZYNQMP_RESET_GPO3_PL_16         97
+#define                ZYNQMP_RESET_GPO3_PL_17         98
+#define                ZYNQMP_RESET_GPO3_PL_18         99
+#define                ZYNQMP_RESET_GPO3_PL_19         100
+#define                ZYNQMP_RESET_GPO3_PL_20         101
+#define                ZYNQMP_RESET_GPO3_PL_21         102
+#define                ZYNQMP_RESET_GPO3_PL_22         103
+#define                ZYNQMP_RESET_GPO3_PL_23         104
+#define                ZYNQMP_RESET_GPO3_PL_24         105
+#define                ZYNQMP_RESET_GPO3_PL_25         106
+#define                ZYNQMP_RESET_GPO3_PL_26         107
+#define                ZYNQMP_RESET_GPO3_PL_27         108
+#define                ZYNQMP_RESET_GPO3_PL_28         109
+#define                ZYNQMP_RESET_GPO3_PL_29         110
+#define                ZYNQMP_RESET_GPO3_PL_30         111
+#define                ZYNQMP_RESET_GPO3_PL_31         112
+#define                ZYNQMP_RESET_RPU_LS             113
+#define                ZYNQMP_RESET_PS_ONLY            114
+#define                ZYNQMP_RESET_PL                 115
+#define                ZYNQMP_RESET_PS_PL0             116
+#define                ZYNQMP_RESET_PS_PL1             117
+#define                ZYNQMP_RESET_PS_PL2             118
+#define                ZYNQMP_RESET_PS_PL3             119
+
+#endif
diff --git a/include/dt-bindings/soc/bcm2835-pm.h b/include/dt-bindings/soc/bcm2835-pm.h
new file mode 100644 (file)
index 0000000..153d75b
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef _DT_BINDINGS_ARM_BCM2835_PM_H
+#define _DT_BINDINGS_ARM_BCM2835_PM_H
+
+#define BCM2835_POWER_DOMAIN_GRAFX             0
+#define BCM2835_POWER_DOMAIN_GRAFX_V3D         1
+#define BCM2835_POWER_DOMAIN_IMAGE             2
+#define BCM2835_POWER_DOMAIN_IMAGE_PERI                3
+#define BCM2835_POWER_DOMAIN_IMAGE_ISP         4
+#define BCM2835_POWER_DOMAIN_IMAGE_H264                5
+#define BCM2835_POWER_DOMAIN_USB               6
+#define BCM2835_POWER_DOMAIN_DSI0              7
+#define BCM2835_POWER_DOMAIN_DSI1              8
+#define BCM2835_POWER_DOMAIN_CAM0              9
+#define BCM2835_POWER_DOMAIN_CAM1              10
+#define BCM2835_POWER_DOMAIN_CCP2TX            11
+#define BCM2835_POWER_DOMAIN_HDMI              12
+
+#define BCM2835_POWER_DOMAIN_COUNT             13
+
+#define BCM2835_RESET_V3D                      0
+#define BCM2835_RESET_ISP                      1
+#define BCM2835_RESET_H264                     2
+
+#define BCM2835_RESET_COUNT                    3
+
+#endif /* _DT_BINDINGS_ARM_BCM2835_PM_H */
index c31157135598150332d3aa193764c39e18f247b2..07e02d6df5ad9f24b262fe2d852e21235de8119f 100644 (file)
@@ -190,6 +190,7 @@ struct backing_dev_info {
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
        struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
+       struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
 #else
        struct bdi_writeback_congested *wb_congested;
 #endif
index 5c7e7f859a2493f58ac65a6f10aee7e611b01458..d66bf5f32610adce133e522b7f3852bd08817ff7 100644 (file)
@@ -287,7 +287,7 @@ enum req_opf {
        REQ_OP_DISCARD          = 3,
        /* securely erase sectors */
        REQ_OP_SECURE_ERASE     = 5,
-       /* seset a zone write pointer */
+       /* reset a zone write pointer */
        REQ_OP_ZONE_RESET       = 6,
        /* write the same sector many times */
        REQ_OP_WRITE_SAME       = 7,
index e21c49aba92fe3cd0b0c1d29b8fb3c0de44910e2..031dd4d3c766b12f571b074c8579c7bac9445ca9 100644 (file)
@@ -52,4 +52,7 @@ int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
 int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
                            u8 ctrl, u32 *val);
 
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+                       bool enable, u64 phys_addr);
+
 #endif /* _SC_MISC_API_H */
index 3c3c28eff56a386201fee57477d92df99a21609b..642dab10f65d7340d88eba039f28a44d23274d88 100644 (file)
 /* SMC SIP service Call Function Identifier Prefix */
 #define PM_SIP_SVC                     0xC2000000
 #define PM_GET_TRUSTZONE_VERSION       0xa03
+#define PM_SET_SUSPEND_MODE            0xa02
+#define GET_CALLBACK_DATA              0xa01
 
 /* Number of 32bits values in payload */
 #define PAYLOAD_ARG_CNT        4U
 
+/* Number of arguments for a callback */
+#define CB_ARG_CNT     4
+
+/* Payload size (consists of callback API ID + arguments) */
+#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+
+#define ZYNQMP_PM_MAX_QOS              100U
+
+/* Node capabilities */
+#define        ZYNQMP_PM_CAPABILITY_ACCESS     0x1U
+#define        ZYNQMP_PM_CAPABILITY_CONTEXT    0x2U
+#define        ZYNQMP_PM_CAPABILITY_WAKEUP     0x4U
+#define        ZYNQMP_PM_CAPABILITY_POWER      0x8U
+
 enum pm_api_id {
        PM_GET_API_VERSION = 1,
+       PM_REQUEST_NODE = 13,
+       PM_RELEASE_NODE,
+       PM_SET_REQUIREMENT,
+       PM_RESET_ASSERT = 17,
+       PM_RESET_GET_STATUS,
+       PM_PM_INIT_FINALIZE = 21,
+       PM_GET_CHIPID = 24,
        PM_IOCTL = 34,
        PM_QUERY_DATA,
        PM_CLOCK_ENABLE,
@@ -75,6 +98,149 @@ enum pm_query_id {
        PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
 };
 
+enum zynqmp_pm_reset_action {
+       PM_RESET_ACTION_RELEASE,
+       PM_RESET_ACTION_ASSERT,
+       PM_RESET_ACTION_PULSE,
+};
+
+enum zynqmp_pm_reset {
+       ZYNQMP_PM_RESET_START = 1000,
+       ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
+       ZYNQMP_PM_RESET_PCIE_BRIDGE,
+       ZYNQMP_PM_RESET_PCIE_CTRL,
+       ZYNQMP_PM_RESET_DP,
+       ZYNQMP_PM_RESET_SWDT_CRF,
+       ZYNQMP_PM_RESET_AFI_FM5,
+       ZYNQMP_PM_RESET_AFI_FM4,
+       ZYNQMP_PM_RESET_AFI_FM3,
+       ZYNQMP_PM_RESET_AFI_FM2,
+       ZYNQMP_PM_RESET_AFI_FM1,
+       ZYNQMP_PM_RESET_AFI_FM0,
+       ZYNQMP_PM_RESET_GDMA,
+       ZYNQMP_PM_RESET_GPU_PP1,
+       ZYNQMP_PM_RESET_GPU_PP0,
+       ZYNQMP_PM_RESET_GPU,
+       ZYNQMP_PM_RESET_GT,
+       ZYNQMP_PM_RESET_SATA,
+       ZYNQMP_PM_RESET_ACPU3_PWRON,
+       ZYNQMP_PM_RESET_ACPU2_PWRON,
+       ZYNQMP_PM_RESET_ACPU1_PWRON,
+       ZYNQMP_PM_RESET_ACPU0_PWRON,
+       ZYNQMP_PM_RESET_APU_L2,
+       ZYNQMP_PM_RESET_ACPU3,
+       ZYNQMP_PM_RESET_ACPU2,
+       ZYNQMP_PM_RESET_ACPU1,
+       ZYNQMP_PM_RESET_ACPU0,
+       ZYNQMP_PM_RESET_DDR,
+       ZYNQMP_PM_RESET_APM_FPD,
+       ZYNQMP_PM_RESET_SOFT,
+       ZYNQMP_PM_RESET_GEM0,
+       ZYNQMP_PM_RESET_GEM1,
+       ZYNQMP_PM_RESET_GEM2,
+       ZYNQMP_PM_RESET_GEM3,
+       ZYNQMP_PM_RESET_QSPI,
+       ZYNQMP_PM_RESET_UART0,
+       ZYNQMP_PM_RESET_UART1,
+       ZYNQMP_PM_RESET_SPI0,
+       ZYNQMP_PM_RESET_SPI1,
+       ZYNQMP_PM_RESET_SDIO0,
+       ZYNQMP_PM_RESET_SDIO1,
+       ZYNQMP_PM_RESET_CAN0,
+       ZYNQMP_PM_RESET_CAN1,
+       ZYNQMP_PM_RESET_I2C0,
+       ZYNQMP_PM_RESET_I2C1,
+       ZYNQMP_PM_RESET_TTC0,
+       ZYNQMP_PM_RESET_TTC1,
+       ZYNQMP_PM_RESET_TTC2,
+       ZYNQMP_PM_RESET_TTC3,
+       ZYNQMP_PM_RESET_SWDT_CRL,
+       ZYNQMP_PM_RESET_NAND,
+       ZYNQMP_PM_RESET_ADMA,
+       ZYNQMP_PM_RESET_GPIO,
+       ZYNQMP_PM_RESET_IOU_CC,
+       ZYNQMP_PM_RESET_TIMESTAMP,
+       ZYNQMP_PM_RESET_RPU_R50,
+       ZYNQMP_PM_RESET_RPU_R51,
+       ZYNQMP_PM_RESET_RPU_AMBA,
+       ZYNQMP_PM_RESET_OCM,
+       ZYNQMP_PM_RESET_RPU_PGE,
+       ZYNQMP_PM_RESET_USB0_CORERESET,
+       ZYNQMP_PM_RESET_USB1_CORERESET,
+       ZYNQMP_PM_RESET_USB0_HIBERRESET,
+       ZYNQMP_PM_RESET_USB1_HIBERRESET,
+       ZYNQMP_PM_RESET_USB0_APB,
+       ZYNQMP_PM_RESET_USB1_APB,
+       ZYNQMP_PM_RESET_IPI,
+       ZYNQMP_PM_RESET_APM_LPD,
+       ZYNQMP_PM_RESET_RTC,
+       ZYNQMP_PM_RESET_SYSMON,
+       ZYNQMP_PM_RESET_AFI_FM6,
+       ZYNQMP_PM_RESET_LPD_SWDT,
+       ZYNQMP_PM_RESET_FPD,
+       ZYNQMP_PM_RESET_RPU_DBG1,
+       ZYNQMP_PM_RESET_RPU_DBG0,
+       ZYNQMP_PM_RESET_DBG_LPD,
+       ZYNQMP_PM_RESET_DBG_FPD,
+       ZYNQMP_PM_RESET_APLL,
+       ZYNQMP_PM_RESET_DPLL,
+       ZYNQMP_PM_RESET_VPLL,
+       ZYNQMP_PM_RESET_IOPLL,
+       ZYNQMP_PM_RESET_RPLL,
+       ZYNQMP_PM_RESET_GPO3_PL_0,
+       ZYNQMP_PM_RESET_GPO3_PL_1,
+       ZYNQMP_PM_RESET_GPO3_PL_2,
+       ZYNQMP_PM_RESET_GPO3_PL_3,
+       ZYNQMP_PM_RESET_GPO3_PL_4,
+       ZYNQMP_PM_RESET_GPO3_PL_5,
+       ZYNQMP_PM_RESET_GPO3_PL_6,
+       ZYNQMP_PM_RESET_GPO3_PL_7,
+       ZYNQMP_PM_RESET_GPO3_PL_8,
+       ZYNQMP_PM_RESET_GPO3_PL_9,
+       ZYNQMP_PM_RESET_GPO3_PL_10,
+       ZYNQMP_PM_RESET_GPO3_PL_11,
+       ZYNQMP_PM_RESET_GPO3_PL_12,
+       ZYNQMP_PM_RESET_GPO3_PL_13,
+       ZYNQMP_PM_RESET_GPO3_PL_14,
+       ZYNQMP_PM_RESET_GPO3_PL_15,
+       ZYNQMP_PM_RESET_GPO3_PL_16,
+       ZYNQMP_PM_RESET_GPO3_PL_17,
+       ZYNQMP_PM_RESET_GPO3_PL_18,
+       ZYNQMP_PM_RESET_GPO3_PL_19,
+       ZYNQMP_PM_RESET_GPO3_PL_20,
+       ZYNQMP_PM_RESET_GPO3_PL_21,
+       ZYNQMP_PM_RESET_GPO3_PL_22,
+       ZYNQMP_PM_RESET_GPO3_PL_23,
+       ZYNQMP_PM_RESET_GPO3_PL_24,
+       ZYNQMP_PM_RESET_GPO3_PL_25,
+       ZYNQMP_PM_RESET_GPO3_PL_26,
+       ZYNQMP_PM_RESET_GPO3_PL_27,
+       ZYNQMP_PM_RESET_GPO3_PL_28,
+       ZYNQMP_PM_RESET_GPO3_PL_29,
+       ZYNQMP_PM_RESET_GPO3_PL_30,
+       ZYNQMP_PM_RESET_GPO3_PL_31,
+       ZYNQMP_PM_RESET_RPU_LS,
+       ZYNQMP_PM_RESET_PS_ONLY,
+       ZYNQMP_PM_RESET_PL,
+       ZYNQMP_PM_RESET_PS_PL0,
+       ZYNQMP_PM_RESET_PS_PL1,
+       ZYNQMP_PM_RESET_PS_PL2,
+       ZYNQMP_PM_RESET_PS_PL3,
+       ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
+};
+
+enum zynqmp_pm_suspend_reason {
+       SUSPEND_POWER_REQUEST = 201,
+       SUSPEND_ALERT,
+       SUSPEND_SYSTEM_SHUTDOWN,
+};
+
+enum zynqmp_pm_request_ack {
+       ZYNQMP_PM_REQUEST_ACK_NO = 1,
+       ZYNQMP_PM_REQUEST_ACK_BLOCKING,
+       ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
+};
+
 /**
  * struct zynqmp_pm_query_data - PM query data
  * @qid:       query ID
@@ -91,6 +257,7 @@ struct zynqmp_pm_query_data {
 
 struct zynqmp_eemi_ops {
        int (*get_api_version)(u32 *version);
+       int (*get_chipid)(u32 *idcode, u32 *version);
        int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
        int (*clock_enable)(u32 clock_id);
        int (*clock_disable)(u32 clock_id);
@@ -102,8 +269,25 @@ struct zynqmp_eemi_ops {
        int (*clock_setparent)(u32 clock_id, u32 parent_id);
        int (*clock_getparent)(u32 clock_id, u32 *parent_id);
        int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
+       int (*reset_assert)(const enum zynqmp_pm_reset reset,
+                           const enum zynqmp_pm_reset_action assert_flag);
+       int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status);
+       int (*init_finalize)(void);
+       int (*set_suspend_mode)(u32 mode);
+       int (*request_node)(const u32 node,
+                           const u32 capabilities,
+                           const u32 qos,
+                           const enum zynqmp_pm_request_ack ack);
+       int (*release_node)(const u32 node);
+       int (*set_requirement)(const u32 node,
+                              const u32 capabilities,
+                              const u32 qos,
+                              const enum zynqmp_pm_request_ack ack);
 };
 
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+                       u32 arg2, u32 arg3, u32 *ret_payload);
+
 #if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
 const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
 #else
index 741f567253ef5f91c663b4f37229666c0fb055c2..975553a9f75d9490df6ad2c827790c324388dbb0 100644 (file)
@@ -193,6 +193,7 @@ struct fsl_mc_device {
        struct resource *regions;
        struct fsl_mc_device_irq **irqs;
        struct fsl_mc_resource *resource;
+       struct device_link *consumer_link;
 };
 
 #define to_fsl_mc_device(_dev) \
index d99287327ef23f630e321d4705da2ef0248a257a..f9707d1dcb584a8bf9cac4d614fef4659bb00b12 100644 (file)
@@ -430,7 +430,7 @@ struct hid_local {
  */
 
 struct hid_collection {
-       struct hid_collection *parent;
+       int parent_idx; /* device->collection */
        unsigned type;
        unsigned usage;
        unsigned level;
@@ -658,7 +658,6 @@ struct hid_parser {
        unsigned int         *collection_stack;
        unsigned int          collection_stack_ptr;
        unsigned int          collection_stack_size;
-       struct hid_collection *active_collection;
        struct hid_device    *device;
        unsigned int          scan_flags;
 };
index f0885cc01db66ba8ff42ac8a02d30eb211865069..dcb6977afce931d2c3407f38c0ed2565b778bdc1 100644 (file)
@@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
        u32 bytes_avail_towrite;
 };
 
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
-                           struct hv_ring_buffer_debug_info *debug_info);
+
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+                               struct hv_ring_buffer_debug_info *debug_info);
 
 /* Vmbus interface */
 #define vmbus_driver_register(driver)  \
index 6756fea18b69f6176bf73c749a977ba15e37d840..e44746de95cdf9193c470d78382797465f39c5ba 100644 (file)
@@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
        case ARPHRD_IPGRE:
        case ARPHRD_VOID:
        case ARPHRD_NONE:
+       case ARPHRD_RAWIP:
                return false;
        default:
                return true;
index c672f34235e74bd47334f5bc97586ca0c42d20f5..4a728dba02e2272d8292bb8e828ab19bbddf16b6 100644 (file)
@@ -260,6 +260,7 @@ struct irq_affinity {
 /**
  * struct irq_affinity_desc - Interrupt affinity descriptor
  * @mask:      cpumask to hold the affinity assignment
+ * @is_managed: 1 if the interrupt is managed internally
  */
 struct irq_affinity_desc {
        struct cpumask  mask;
index 7315977b64dacc4bf02b6cc925d5c80b3055d7f3..ad609617aeb87063ecb4fa8090cdced8df8dc74d 100644 (file)
@@ -235,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
                        cmd_mask, num_flush, flush_wpq, NULL, NULL);
 }
 
-int nvdimm_security_setup_events(struct nvdimm *nvdimm);
 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
 u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
new file mode 100644 (file)
index 0000000..ed37dc4
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef BCM2835_MFD_PM_H
+#define BCM2835_MFD_PM_H
+
+#include <linux/regmap.h>
+
+struct bcm2835_pm {
+       struct device *dev;
+       void __iomem *base;
+       void __iomem *asb;
+};
+
+#endif /* BCM2835_MFD_PM_H */
index b895f4e798683b4c7d29c171aebe8b8c4026c966..9003593429652ba5a007c3c468dab97112a84900 100644 (file)
@@ -86,6 +86,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
 
 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
 
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
+
 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
 
 int dev_pm_opp_get_opp_count(struct device *dev);
@@ -158,6 +160,11 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
        return 0;
 }
 
+static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+       return 0;
+}
+
 static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
 {
        return false;
diff --git a/include/linux/reset/socfpga.h b/include/linux/reset/socfpga.h
new file mode 100644 (file)
index 0000000..b11a204
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_RESET_SOCFPGA_H__
+#define __LINUX_RESET_SOCFPGA_H__
+
+void __init socfpga_reset_init(void);
+
+#endif /* __LINUX_RESET_SOCFPGA_H__ */
diff --git a/include/linux/reset/sunxi.h b/include/linux/reset/sunxi.h
new file mode 100644 (file)
index 0000000..1ad7fff
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_RESET_SUNXI_H__
+#define __LINUX_RESET_SUNXI_H__
+
+void __init sun6i_reset_init(void);
+
+#endif /* __LINUX_RESET_SUNXI_H__ */
index 10b19a192b2d000121eff7dc91e8f224918d5fec..545f371380574e785abbb0ab3a09eeff9657ce94 100644 (file)
  * called near the end of a function. Otherwise, the list can be
  * re-initialized for later re-use by wake_q_init().
  *
- * Note that this can cause spurious wakeups. schedule() callers
+ * NOTE that this can cause spurious wakeups. schedule() callers
  * must ensure the call is done inside a loop, confirming that the
  * wakeup condition has in fact occurred.
+ *
+ * NOTE that there is no guarantee the wakeup will happen any later than the
+ * wake_q_add() location. Therefore task must be ready to be woken at the
+ * location of the wake_q_add().
  */
 
 #include <linux/sched.h>
index 69c285b1c9905496ff8b4f1d05c6b5221278075e..eb71a50b8afc8b5da200173980d5bf120c83678c 100644 (file)
@@ -162,6 +162,12 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc);
  */
 int qcom_llcc_probe(struct platform_device *pdev,
                      const struct llcc_slice_config *table, u32 sz);
+
+/**
+ * qcom_llcc_remove - remove the sct table
+ * @pdev: Platform device pointer
+ */
+int qcom_llcc_remove(struct platform_device *pdev);
 #else
 static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
 {
index f492e21c4aa2c81f5301116408f84d0703a61a18..5d9d318bcf7a1af4379195ed048f3ddeea1fa0ed 100644 (file)
@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
  */
 static inline bool xa_is_err(const void *entry)
 {
-       return unlikely(xa_is_internal(entry));
+       return unlikely(xa_is_internal(entry) &&
+                       entry >= xa_mk_internal(-MAX_ERRNO));
 }
 
 /**
@@ -286,7 +287,6 @@ struct xarray {
  */
 #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
 
-void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void *xa_erase(struct xarray *, unsigned long index);
@@ -303,6 +303,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
                unsigned long max, unsigned int n, xa_mark_t);
 void xa_destroy(struct xarray *);
 
+/**
+ * xa_init_flags() - Initialise an empty XArray with flags.
+ * @xa: XArray.
+ * @flags: XA_FLAG values.
+ *
+ * If you need to initialise an XArray with special flags (eg you need
+ * to take the lock from interrupt context), use this function instead
+ * of xa_init().
+ *
+ * Context: Any context.
+ */
+static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+       spin_lock_init(&xa->xa_lock);
+       xa->xa_flags = flags;
+       xa->xa_head = NULL;
+}
+
 /**
  * xa_init() - Initialise an empty XArray.
  * @xa: XArray.
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
 }
 
 /**
- * xa_for_each() - Iterate over a portion of an XArray.
+ * xa_for_each_start() - Iterate over a portion of an XArray.
  * @xa: XArray.
+ * @index: Index of @entry.
  * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index.  You may modify @index during the iteration if you
+ * want to skip or reprocess indices.  It is safe to modify the array
+ * during the iteration.  At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n).  You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_start() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_start().
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ */
+#define xa_for_each_start(xa, index, entry, start)                     \
+       for (index = start,                                             \
+            entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT);        \
+            entry;                                                     \
+            entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+
+/**
+ * xa_for_each() - Iterate over present entries in an XArray.
+ * @xa: XArray.
  * @index: Index of @entry.
- * @max: Maximum index to retrieve from array.
- * @filter: Selection criterion.
+ * @entry: Entry retrieved from array.
  *
- * Initialise @index to the lowest index you want to retrieve from the
- * array.  During the iteration, @entry will have the value of the entry
- * stored in @xa at @index.  The iteration will skip all entries in the
- * array which do not match @filter.  You may modify @index during the
- * iteration if you want to skip or reprocess indices.  It is safe to modify
- * the array during the iteration.  At the end of the iteration, @entry will
- * be set to NULL and @index will have a value less than or equal to max.
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index.  You may modify @index during the iteration if you want
+ * to skip or reprocess indices.  It is safe to modify the array during the
+ * iteration.  At the end of the iteration, @entry will be set to NULL and
+ * @index will have a value less than or equal to max.
  *
  * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n).  You have
  * to handle your own locking with xas_for_each(), and if you have to unlock
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
  *
  * Context: Any context.  Takes and releases the RCU lock.
  */
-#define xa_for_each(xa, entry, index, max, filter) \
-       for (entry = xa_find(xa, &index, max, filter); entry; \
-            entry = xa_find_after(xa, &index, max, filter))
+#define xa_for_each(xa, index, entry) \
+       xa_for_each_start(xa, index, entry, 0)
+
+/**
+ * xa_for_each_marked() - Iterate over marked entries in an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @filter: Selection criterion.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index.  The iteration will skip all entries in the array
+ * which do not match @filter.  You may modify @index during the iteration
+ * if you want to skip or reprocess indices.  It is safe to modify the array
+ * during the iteration.  At the end of the iteration, @entry will be set to
+ * NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
+ * You have to handle your own locking with xas_for_each(), and if you have
+ * to unlock after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each_marked() iterator
+ * instead.  The xas_for_each_marked() iterator will expand into more inline
+ * code than xa_for_each_marked().
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ */
+#define xa_for_each_marked(xa, index, entry, filter) \
+       for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
+            entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
 
 #define xa_trylock(xa)         spin_trylock(&(xa)->xa_lock)
 #define xa_lock(xa)            spin_lock(&(xa)->xa_lock)
@@ -393,39 +463,12 @@ void *__xa_erase(struct xarray *, unsigned long index);
 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
                void *entry, gfp_t);
+int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
 int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
 int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
 void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
 void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
 
-/**
- * __xa_insert() - Store this entry in the XArray unless another entry is
- *                     already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use __xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Any context.  Expects xa_lock to be held on entry.  May
- *         release and reacquire xa_lock if the @gfp flags permit.
- * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int __xa_insert(struct xarray *xa, unsigned long index,
-               void *entry, gfp_t gfp)
-{
-       void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
-       if (!curr)
-               return 0;
-       if (xa_is_err(curr))
-               return xa_err(curr);
-       return -EEXIST;
-}
-
 /**
  * xa_store_bh() - Store this entry in the XArray.
  * @xa: XArray.
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
 }
 
 /**
- * xa_store_irq() - Erase this entry from the XArray.
+ * xa_store_irq() - Store this entry in the XArray.
  * @xa: XArray.
  * @index: Index into array.
  * @entry: New entry.
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
  * @entry: New entry.
  * @gfp: Memory allocation flags.
  *
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present.  Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
  *
- * Context: Process context.  Takes and releases the xa_lock.
- *         May sleep if the @gfp flags permit.
+ * Context: Any context.  Takes and releases the xa_lock.  May sleep if
+ * the @gfp flags permit.
  * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
  * -ENOMEM if memory could not be allocated.
  */
 static inline int xa_insert(struct xarray *xa, unsigned long index,
                void *entry, gfp_t gfp)
 {
-       void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
-       if (!curr)
-               return 0;
-       if (xa_is_err(curr))
-               return xa_err(curr);
-       return -EEXIST;
+       int err;
+
+       xa_lock(xa);
+       err = __xa_insert(xa, index, entry, gfp);
+       xa_unlock(xa);
+
+       return err;
+}
+
+/**
+ * xa_insert_bh() - Store this entry in the XArray unless another entry is
+ *                     already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present.  Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context.  Takes and releases the xa_lock while
+ * disabling softirqs.  May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
+               void *entry, gfp_t gfp)
+{
+       int err;
+
+       xa_lock_bh(xa);
+       err = __xa_insert(xa, index, entry, gfp);
+       xa_unlock_bh(xa);
+
+       return err;
+}
+
+/**
+ * xa_insert_irq() - Store this entry in the XArray unless another entry is
+ *                     already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present.  Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Process context.  Takes and releases the xa_lock while
+ * disabling interrupts.  May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
+               void *entry, gfp_t gfp)
+{
+       int err;
+
+       xa_lock_irq(xa);
+       err = __xa_insert(xa, index, entry, gfp);
+       xa_unlock_irq(xa);
+
+       return err;
 }
 
 /**
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
                (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
 }
 
-#define XA_ZERO_ENTRY          xa_mk_internal(256)
-#define XA_RETRY_ENTRY         xa_mk_internal(257)
+#define XA_RETRY_ENTRY         xa_mk_internal(256)
+#define XA_ZERO_ENTRY          xa_mk_internal(257)
 
 /**
  * xa_is_zero() - Is the entry a zero entry?
@@ -995,6 +1097,17 @@ static inline bool xa_is_retry(const void *entry)
        return unlikely(entry == XA_RETRY_ENTRY);
 }
 
+/**
+ * xa_is_advanced() - Is the entry only permitted for the advanced API?
+ * @entry: Entry to be stored in the XArray.
+ *
+ * Return: %true if the entry cannot be stored by the normal API.
+ */
+static inline bool xa_is_advanced(const void *entry)
+{
+       return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
+}
+
 /**
  * typedef xa_update_node_t - A callback function from the XArray.
  * @node: The node which is being processed
index 3f9aea8087e3c823cdd5a22530ed4b25dd7621e1..8b7eb46ad72d8804c1ffaa3943bb2816113239d8 100644 (file)
@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
 
 void __ax25_put_route(ax25_route *ax25_rt);
 
+extern rwlock_t ax25_route_lock;
+
+static inline void ax25_route_lock_use(void)
+{
+       read_lock(&ax25_route_lock);
+}
+
+static inline void ax25_route_lock_unuse(void)
+{
+       read_unlock(&ax25_route_lock);
+}
+
 static inline void ax25_put_route(ax25_route *ax25_rt)
 {
        if (refcount_dec_and_test(&ax25_rt->refcount))
index 4be1aa4435ae5e97a6b0b1ac42fcc808102e80b8..7800e12ee042ced0669b14c8b3cedb777bb43440 100644 (file)
@@ -73,6 +73,8 @@ enum rpi_firmware_property_tag {
        RPI_FIRMWARE_GET_CUSTOMER_OTP =                       0x00030021,
        RPI_FIRMWARE_GET_DOMAIN_STATE =                       0x00030030,
        RPI_FIRMWARE_GET_THROTTLED =                          0x00030046,
+       RPI_FIRMWARE_GET_CLOCK_MEASURED =                     0x00030047,
+       RPI_FIRMWARE_NOTIFY_REBOOT =                          0x00030048,
        RPI_FIRMWARE_SET_CLOCK_STATE =                        0x00038001,
        RPI_FIRMWARE_SET_CLOCK_RATE =                         0x00038002,
        RPI_FIRMWARE_SET_VOLTAGE =                            0x00038003,
@@ -86,6 +88,8 @@ enum rpi_firmware_property_tag {
        RPI_FIRMWARE_SET_GPIO_CONFIG =                        0x00038043,
        RPI_FIRMWARE_GET_PERIPH_REG =                         0x00030045,
        RPI_FIRMWARE_SET_PERIPH_REG =                         0x00038045,
+       RPI_FIRMWARE_GET_POE_HAT_VAL =                        0x00030049,
+       RPI_FIRMWARE_SET_POE_HAT_VAL =                        0x00030050,
 
 
        /* Dispmanx TAGS */
index 3fbd71c27ba30f6e5c1b5adc6e1d64ac75707128..672cfb58046f8d54ef352029226d2f93b22a230a 100644 (file)
@@ -57,7 +57,8 @@ struct dpaa2_io_desc {
        u32 qman_version;
 };
 
-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
+                                struct device *dev);
 
 void dpaa2_io_down(struct dpaa2_io *d);
 
@@ -90,10 +91,14 @@ struct dpaa2_io_notification_ctx {
        void *dpio_private;
 };
 
+int dpaa2_io_get_cpu(struct dpaa2_io *d);
+
 int dpaa2_io_service_register(struct dpaa2_io *service,
-                             struct dpaa2_io_notification_ctx *ctx);
+                             struct dpaa2_io_notification_ctx *ctx,
+                             struct device *dev);
 void dpaa2_io_service_deregister(struct dpaa2_io *service,
-                                struct dpaa2_io_notification_ctx *ctx);
+                                struct dpaa2_io_notification_ctx *ctx,
+                                struct device *dev);
 int dpaa2_io_service_rearm(struct dpaa2_io *service,
                           struct dpaa2_io_notification_ctx *ctx);
 
@@ -106,9 +111,9 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
                                const struct dpaa2_fd *fd);
 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
                                u16 qdbin, const struct dpaa2_fd *fd);
-int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
+int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
                             const u64 *buffers, unsigned int num_buffers);
-int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
+int dpaa2_io_service_acquire(struct dpaa2_io *d, u16 bpid,
                             u64 *buffers, unsigned int num_buffers);
 
 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
index b02f926a02166af960c25da152ca457c905d4451..45960aa08f4aa18374f49866094325407dbae626 100644 (file)
@@ -23,6 +23,7 @@
 #include <soc/tegra/bpmp-abi.h>
 
 struct tegra_bpmp_clk;
+struct tegra_bpmp_ops;
 
 struct tegra_bpmp_soc {
        struct {
@@ -32,6 +33,8 @@ struct tegra_bpmp_soc {
                        unsigned int timeout;
                } cpu_tx, thread, cpu_rx;
        } channels;
+
+       const struct tegra_bpmp_ops *ops;
        unsigned int num_resets;
 };
 
@@ -47,6 +50,7 @@ struct tegra_bpmp_channel {
        struct tegra_bpmp_mb_data *ob;
        struct completion completion;
        struct tegra_ivc *ivc;
+       unsigned int index;
 };
 
 typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int mrq,
@@ -63,12 +67,7 @@ struct tegra_bpmp_mrq {
 struct tegra_bpmp {
        const struct tegra_bpmp_soc *soc;
        struct device *dev;
-
-       struct {
-               struct gen_pool *pool;
-               dma_addr_t phys;
-               void *virt;
-       } tx, rx;
+       void *priv;
 
        struct {
                struct mbox_client client;
@@ -173,6 +172,8 @@ static inline bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp,
 }
 #endif
 
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp);
+
 #if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP)
 int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp);
 #else
index a9db1b501de1f668b356170aa1a8d80652c2f190..b32ee5d82dd4348cba4ad3719df0953bb9980827 100644 (file)
@@ -161,7 +161,6 @@ enum tegra_io_pad {
 #define TEGRA_IO_RAIL_LVDS     TEGRA_IO_PAD_LVDS
 
 #ifdef CONFIG_SOC_TEGRA_PMC
-int tegra_powergate_is_powered(unsigned int id);
 int tegra_powergate_power_on(unsigned int id);
 int tegra_powergate_power_off(unsigned int id);
 int tegra_powergate_remove_clamping(unsigned int id);
@@ -182,11 +181,6 @@ void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
 void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
 
 #else
-static inline int tegra_powergate_is_powered(unsigned int id)
-{
-       return -ENOSYS;
-}
-
 static inline int tegra_powergate_power_on(unsigned int id)
 {
        return -ENOSYS;
index 8ec1de856ee7e17f0d080623544a17e09d8746bd..e665f111b0d27548015b2b35e8f4c7bfab098df1 100644 (file)
@@ -985,6 +985,12 @@ struct snd_soc_dai_link {
        /* Do not create a PCM for this DAI link (Backend link) */
        unsigned int ignore:1;
 
+       /*
+        * This driver uses legacy platform naming. Set by the core, machine
+        * drivers should not modify this value.
+        */
+       unsigned int legacy_platform:1;
+
        struct list_head list; /* DAI link list of the soc card */
        struct snd_soc_dobj dobj; /* For topology */
 };
similarity index 83%
rename from include/uapi/linux/android/binder_ctl.h
rename to include/uapi/linux/android/binderfs.h
index 65b2efd1a0a542bfa17deeb2f1ee7aff5f91e6de..87410477aea91cdccf57af137c74b2c8adc08187 100644 (file)
@@ -4,8 +4,8 @@
  *
  */
 
-#ifndef _UAPI_LINUX_BINDER_CTL_H
-#define _UAPI_LINUX_BINDER_CTL_H
+#ifndef _UAPI_LINUX_BINDERFS_H
+#define _UAPI_LINUX_BINDERFS_H
 
 #include <linux/android/binder.h>
 #include <linux/types.h>
@@ -22,8 +22,8 @@
  */
 struct binderfs_device {
        char name[BINDERFS_MAX_NAME + 1];
-       __u8 major;
-       __u8 minor;
+       __u32 major;
+       __u32 minor;
 };
 
 /**
@@ -31,5 +31,5 @@ struct binderfs_device {
  */
 #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
 
-#endif /* _UAPI_LINUX_BINDER_CTL_H */
+#endif /* _UAPI_LINUX_BINDERFS_H */
 
index 6fa38d001d84ff5af90fe6014e7874d18f8e2bc1..498eec813494c6ccbef5d939211088b90bb534d9 100644 (file)
@@ -138,6 +138,7 @@ struct blk_zone_range {
  * @BLKRESETZONE: Reset the write pointer of the zones in the specified
  *                sector range. The sector range must be zone aligned.
  * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
+ * @BLKGETNRZONES: Get the total number of zones of the device.
  */
 #define BLKREPORTZONE  _IOWR(0x12, 130, struct blk_zone_report)
 #define BLKRESETZONE   _IOW(0x12, 131, struct blk_zone_range)
index fb78f6f500f37d19bf5dc531ba90c1e5f23746a5..f056b2a00d5c7695a69826786b5e2336c79c3c31 100644 (file)
  */
 
 struct input_event {
-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
+#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
        struct timeval time;
 #define input_event_sec time.tv_sec
 #define input_event_usec time.tv_usec
 #else
        __kernel_ulong_t __sec;
+#if defined(__sparc__) && defined(__arch64__)
+       unsigned int __usec;
+#else
        __kernel_ulong_t __usec;
+#endif
 #define input_event_sec  __sec
 #define input_event_usec __usec
 #endif
index 59a260712a5666d19cc617578bde3d4a273bf04d..2ca9164a79bfdcc1c48844692c874b823e6f87a0 100644 (file)
@@ -1,17 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <asm/dma-mapping.h>
-#include <linux/dma-mapping.h>
-
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
-       if (dev && dev->archdata.dev_dma_ops)
-               return dev->archdata.dev_dma_ops;
-       return get_arch_dma_ops(NULL);
-}
+#ifndef _XEN_ARM_PAGE_COHERENT_H
+#define _XEN_ARM_PAGE_COHERENT_H
 
 void __xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
@@ -21,87 +10,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
                unsigned long attrs);
 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
 void __xen_dma_sync_single_for_device(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
-       return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-               void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
-       xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-            dma_addr_t dev_addr, unsigned long offset, size_t size,
-            enum dma_data_direction dir, unsigned long attrs)
-{
-       unsigned long page_pfn = page_to_xen_pfn(page);
-       unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
-       unsigned long compound_pages =
-               (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
-       bool local = (page_pfn <= dev_pfn) &&
-               (dev_pfn - page_pfn < compound_pages);
-
-       /*
-        * Dom0 is mapped 1:1, while the Linux page can span across
-        * multiple Xen pages, it's not possible for it to contain a
-        * mix of local and foreign Xen pages. So if the first xen_pfn
-        * == mfn the page is local otherwise it's a foreign page
-        * grant-mapped in dom0. If the page is local we can safely
-        * call the native dma_ops function, otherwise we call the xen
-        * specific function.
-        */
-       if (local)
-               xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
-       else
-               __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-       unsigned long pfn = PFN_DOWN(handle);
-       /*
-        * Dom0 is mapped 1:1, while the Linux page can be spanned accross
-        * multiple Xen page, it's not possible to have a mix of local and
-        * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
-        * foreign mfn will always return false. If the page is local we can
-        * safely call the native dma_ops function, otherwise we call the xen
-        * specific function.
-        */
-       if (pfn_valid(pfn)) {
-               if (xen_get_dma_ops(hwdev)->unmap_page)
-                       xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-       } else
-               __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       unsigned long pfn = PFN_DOWN(handle);
-       if (pfn_valid(pfn)) {
-               if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
-                       xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-       } else
-               __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       unsigned long pfn = PFN_DOWN(handle);
-       if (pfn_valid(pfn)) {
-               if (xen_get_dma_ops(hwdev)->sync_single_for_device)
-                       xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-       } else
-               __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+#endif /* _XEN_ARM_PAGE_COHERENT_H */
index 284f2fe9a2933fb541c4491e00792b514d40fa10..3fb7be0019644e136a6c31e5bfd47884777ced02 100644 (file)
@@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w)
         *        MB (A)              MB (B)
         *    [L] cond            [L] tsk
         */
-       smp_rmb(); /* (B) */
+       smp_mb(); /* (B) */
 
        /*
         * Avoid using task_rcu_dereference() magic as long as we are careful,
index be3bff2315ff75c46565a42c126f05641c753b98..fdd312da09927ad43e2b6dabd7b089d3c2e8393b 100644 (file)
@@ -1452,11 +1452,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
        if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
                return;
 
-       /*
-        * Queue the task for later wakeup for after we've released
-        * the hb->lock. wake_q_add() grabs reference to p.
-        */
-       wake_q_add(wake_q, p);
+       get_task_struct(p);
        __unqueue_futex(q);
        /*
         * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1466,6 +1462,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
         * plist_del in __unqueue_futex().
         */
        smp_store_release(&q->lock_ptr, NULL);
+
+       /*
+        * Queue the task for later wakeup for after we've released
+        * the hb->lock. wake_q_add() grabs reference to p.
+        */
+       wake_q_add(wake_q, p);
+       put_task_struct(p);
 }
 
 /*
index ee062b7939d3fce9f2813e2bb8f37c5159448631..ef8ad36cadcf0e0b048416b219632a9edcc80636 100644 (file)
@@ -457,7 +457,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
 
        /* Validate affinity mask(s) */
        if (affinity) {
-               for (i = 0; i < cnt; i++, i++) {
+               for (i = 0; i < cnt; i++) {
                        if (cpumask_empty(&affinity[i].mask))
                                return -EINVAL;
                }
index a4888ce4667a489ccd67afc1e6de40762936b3b3..84b54a17b95d3f3a1b4f2148c58e958f0d534dec 100644 (file)
@@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc)
        }
 
        cpumask_and(&mask, cpu_online_mask, set);
+       if (cpumask_empty(&mask))
+               cpumask_copy(&mask, cpu_online_mask);
+
        if (node != NUMA_NO_NODE) {
                const struct cpumask *nodemask = cpumask_of_node(node);
 
index 09b180063ee11681f30a0f7e9b71a01acb3e7cc0..50d9af615dc49850acb3e0dd263cebf14130a8b6 100644 (file)
@@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
                woken++;
                tsk = waiter->task;
 
-               wake_q_add(wake_q, tsk);
+               get_task_struct(tsk);
                list_del(&waiter->list);
                /*
-                * Ensure that the last operation is setting the reader
+                * Ensure calling get_task_struct() before setting the reader
                 * waiter to nil such that rwsem_down_read_failed() cannot
                 * race with do_exit() by always holding a reference count
                 * to the task to wakeup.
                 */
                smp_store_release(&waiter->task, NULL);
+               /*
+                * Ensure issuing the wakeup (either by us or someone else)
+                * after setting the reader waiter to nil.
+                */
+               wake_q_add(wake_q, tsk);
+               /* wake_q_add() already take the task ref */
+               put_task_struct(tsk);
        }
 
        adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
index a674c7db2f29db1e35af199e09db84aeb65cde66..d8d76a65cfdd55538378b4725b6c1219d25f97a9 100644 (file)
@@ -396,6 +396,18 @@ static bool set_nr_if_polling(struct task_struct *p)
 #endif
 #endif
 
+/**
+ * wake_q_add() - queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ */
 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
 {
        struct wake_q_node *node = &task->wake_q;
@@ -405,10 +417,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
         * its already queued (either by us or someone else) and will get the
         * wakeup due to that.
         *
-        * This cmpxchg() executes a full barrier, which pairs with the full
-        * barrier executed by the wakeup in wake_up_q().
+        * In order to ensure that a pending wakeup will observe our pending
+        * state, even in the failed case, an explicit smp_mb() must be used.
         */
-       if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+       smp_mb__before_atomic();
+       if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
                return;
 
        get_task_struct(task);
index 8f0644af40be7e5869f8f664775183bdb07a0f56..80f955210861a591fd383028ba6b045903be26fd 100644 (file)
@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
         * set up the signal and overrun bookkeeping.
         */
        timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
+       timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
 
        /*
         * This acts as a modification timestamp for the timer,
index 4676c0a1eeca0f7f7c559176b2c580cedcccce6d..c596a957f7643e56b45956a61ed999980568ad53 100644 (file)
@@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
                XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
                xa_set_mark(xa, index + 1, XA_MARK_0);
                XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
-               xa_set_mark(xa, index + 2, XA_MARK_1);
+               xa_set_mark(xa, index + 2, XA_MARK_2);
                XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
                xa_store_order(xa, index, order, xa_mk_index(index),
                                GFP_KERNEL);
@@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
                        void *entry;
 
                        XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
-                       XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
-                       XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
+                       XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
+                       XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
 
                        /* We should see two elements in the array */
                        rcu_read_lock();
@@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
 static noinline void check_reserve(struct xarray *xa)
 {
        void *entry;
-       unsigned long index = 0;
+       unsigned long index;
 
        /* An array with a reserved entry is not empty */
        XA_BUG_ON(xa, !xa_empty(xa));
@@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa)
        xa_erase_index(xa, 12345678);
        XA_BUG_ON(xa, !xa_empty(xa));
 
-       /* And so does xa_insert */
+       /* But xa_insert does not */
        xa_reserve(xa, 12345678, GFP_KERNEL);
-       XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
-       xa_erase_index(xa, 12345678);
+       XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
+                       -EEXIST);
+       XA_BUG_ON(xa, xa_empty(xa));
+       XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
        XA_BUG_ON(xa, !xa_empty(xa));
 
        /* Can iterate through a reserved entry */
@@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
        xa_reserve(xa, 6, GFP_KERNEL);
        xa_store_index(xa, 7, GFP_KERNEL);
 
-       xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+       xa_for_each(xa, index, entry) {
                XA_BUG_ON(xa, index != 5 && index != 7);
        }
        xa_destroy(xa);
@@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa)
 static noinline void check_find_2(struct xarray *xa)
 {
        void *entry;
-       unsigned long i, j, index = 0;
+       unsigned long i, j, index;
 
-       xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+       xa_for_each(xa, index, entry) {
                XA_BUG_ON(xa, true);
        }
 
        for (i = 0; i < 1024; i++) {
                xa_store_index(xa, index, GFP_KERNEL);
                j = 0;
-               index = 0;
-               xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+               xa_for_each(xa, index, entry) {
                        XA_BUG_ON(xa, xa_mk_index(index) != entry);
                        XA_BUG_ON(xa, index != j++);
                }
@@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa)
 
        for (i = 0; i < 100; i++) {
                for (j = 0; j < 100; j++) {
+                       rcu_read_lock();
                        for (k = 0; k < 100; k++) {
                                xas_set(&xas, j);
                                xas_for_each_marked(&xas, entry, k, XA_MARK_0)
@@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa)
                                        XA_BUG_ON(xa,
                                                xas.xa_node != XAS_RESTART);
                        }
+                       rcu_read_unlock();
                }
                xa_store_index(xa, i, GFP_KERNEL);
                xa_set_mark(xa, i, XA_MARK_0);
@@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa)
        }
 }
 
+static void check_align_1(struct xarray *xa, char *name)
+{
+       int i;
+       unsigned int id;
+       unsigned long index;
+       void *entry;
+
+       for (i = 0; i < 8; i++) {
+               id = 0;
+               XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
+                               != 0);
+               XA_BUG_ON(xa, id != i);
+       }
+       xa_for_each(xa, index, entry)
+               XA_BUG_ON(xa, xa_is_err(entry));
+       xa_destroy(xa);
+}
+
+static noinline void check_align(struct xarray *xa)
+{
+       char name[] = "Motorola 68000";
+
+       check_align_1(xa, name);
+       check_align_1(xa, name + 1);
+       check_align_1(xa, name + 2);
+       check_align_1(xa, name + 3);
+//     check_align_2(xa, name);
+}
+
 static LIST_HEAD(shadow_nodes);
 
 static void test_update_node(struct xa_node *node)
@@ -1332,6 +1364,7 @@ static int xarray_checks(void)
        check_create_range(&array);
        check_store_range(&array);
        check_store_iter(&array);
+       check_align(&xa0);
 
        check_workingset(&array, 0);
        check_workingset(&array, 64);
index 5f3f9311de893a2975990060f5dfae6a6fb3d462..81c3171ddde9cce9d6e4898352b5addf5393de4a 100644 (file)
@@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas)
                if (xas->xa_shift > node->shift)
                        break;
                entry = xas_descend(xas, node);
+               if (node->shift == 0)
+                       break;
        }
        return entry;
 }
@@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
        for (;;) {
                void *entry = xa_entry_locked(xas->xa, node, offset);
 
-               if (xa_is_node(entry)) {
+               if (node->shift && xa_is_node(entry)) {
                        node = xa_to_node(entry);
                        offset = 0;
                        continue;
@@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head)
 /*
  * xas_create() - Create a slot to store an entry in.
  * @xas: XArray operation state.
+ * @allow_root: %true if we can store the entry in the root directly
  *
  * Most users will not need to call this function directly, as it is called
  * by xas_store().  It is useful for doing conditional store operations
@@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head)
  * If the slot was newly created, returns %NULL.  If it failed to create the
  * slot, returns %NULL and indicates the error in @xas.
  */
-static void *xas_create(struct xa_state *xas)
+static void *xas_create(struct xa_state *xas, bool allow_root)
 {
        struct xarray *xa = xas->xa;
        void *entry;
@@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas)
                shift = xas_expand(xas, entry);
                if (shift < 0)
                        return NULL;
+               if (!shift && !allow_root)
+                       shift = XA_CHUNK_SHIFT;
                entry = xa_head_locked(xa);
                slot = &xa->xa_head;
        } else if (xas_error(xas)) {
@@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas)
        xas->xa_sibs = 0;
 
        for (;;) {
-               xas_create(xas);
+               xas_create(xas, true);
                if (xas_error(xas))
                        goto restore;
                if (xas->xa_index <= (index | XA_CHUNK_MASK))
@@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry)
        bool value = xa_is_value(entry);
 
        if (entry)
-               first = xas_create(xas);
+               first = xas_create(xas, !xa_is_node(entry));
        else
                first = xas_load(xas);
 
@@ -1250,35 +1255,6 @@ void *xas_find_conflict(struct xa_state *xas)
 }
 EXPORT_SYMBOL_GPL(xas_find_conflict);
 
-/**
- * xa_init_flags() - Initialise an empty XArray with flags.
- * @xa: XArray.
- * @flags: XA_FLAG values.
- *
- * If you need to initialise an XArray with special flags (eg you need
- * to take the lock from interrupt context), use this function instead
- * of xa_init().
- *
- * Context: Any context.
- */
-void xa_init_flags(struct xarray *xa, gfp_t flags)
-{
-       unsigned int lock_type;
-       static struct lock_class_key xa_lock_irq;
-       static struct lock_class_key xa_lock_bh;
-
-       spin_lock_init(&xa->xa_lock);
-       xa->xa_flags = flags;
-       xa->xa_head = NULL;
-
-       lock_type = xa_lock_type(xa);
-       if (lock_type == XA_LOCK_IRQ)
-               lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
-       else if (lock_type == XA_LOCK_BH)
-               lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
-}
-EXPORT_SYMBOL(xa_init_flags);
-
 /**
  * xa_load() - Load an entry from an XArray.
  * @xa: XArray.
@@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr)
 {
        if (xa_is_zero(curr))
                return NULL;
-       XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
        if (xas_error(xas))
                curr = xas->xa_node;
        return curr;
@@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
        XA_STATE(xas, xa, index);
        void *curr;
 
-       if (WARN_ON_ONCE(xa_is_internal(entry)))
+       if (WARN_ON_ONCE(xa_is_advanced(entry)))
                return XA_ERROR(-EINVAL);
        if (xa_track_free(xa) && !entry)
                entry = XA_ZERO_ENTRY;
@@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
        XA_STATE(xas, xa, index);
        void *curr;
 
-       if (WARN_ON_ONCE(xa_is_internal(entry)))
+       if (WARN_ON_ONCE(xa_is_advanced(entry)))
                return XA_ERROR(-EINVAL);
        if (xa_track_free(xa) && !entry)
                entry = XA_ZERO_ENTRY;
@@ -1464,6 +1439,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
 }
 EXPORT_SYMBOL(__xa_cmpxchg);
 
+/**
+ * __xa_insert() - Store this entry in the XArray if no entry is present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present.  Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context.  Expects xa_lock to be held on entry.  May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+       XA_STATE(xas, xa, index);
+       void *curr;
+
+       if (WARN_ON_ONCE(xa_is_advanced(entry)))
+               return -EINVAL;
+       if (!entry)
+               entry = XA_ZERO_ENTRY;
+
+       do {
+               curr = xas_load(&xas);
+               if (!curr) {
+                       xas_store(&xas, entry);
+                       if (xa_track_free(xa))
+                               xas_clear_mark(&xas, XA_FREE_MARK);
+               } else {
+                       xas_set_err(&xas, -EEXIST);
+               }
+       } while (__xas_nomem(&xas, gfp));
+
+       return xas_error(&xas);
+}
+EXPORT_SYMBOL(__xa_insert);
+
 /**
  * __xa_reserve() - Reserve this index in the XArray.
  * @xa: XArray.
@@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
                        if (last + 1)
                                order = __ffs(last + 1);
                        xas_set_order(&xas, last, order);
-                       xas_create(&xas);
+                       xas_create(&xas, true);
                        if (xas_error(&xas))
                                goto unlock;
                }
@@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
        XA_STATE(xas, xa, 0);
        int err;
 
-       if (WARN_ON_ONCE(xa_is_internal(entry)))
+       if (WARN_ON_ONCE(xa_is_advanced(entry)))
                return -EINVAL;
        if (WARN_ON_ONCE(!xa_track_free(xa)))
                return -EINVAL;
index 8a8bb8796c6c43cb711c0f618c488df188853623..72e6d0c55cfad9b51b173cca10c34011b035879e 100644 (file)
@@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
        bdi->cgwb_congested_tree = RB_ROOT;
        mutex_init(&bdi->cgwb_release_mutex);
+       init_rwsem(&bdi->wb_switch_rwsem);
 
        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
        if (!ret) {
index f0f91461a9f45c350fd8fccc0ccaf6c9578e4b3a..218099b5ed31d1e971d8d64b0951a31b8e310c01 100644 (file)
@@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
        return 0;
 }
 
-static int mincore_unmapped_range(unsigned long addr, unsigned long end,
-                                  struct mm_walk *walk)
+/*
+ * Later we can get more picky about what "in core" means precisely.
+ * For now, simply check to see if the page is in the page cache,
+ * and is up to date; i.e. that no page-in operation would be required
+ * at this time if an application were to map and access this page.
+ */
+static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
+{
+       unsigned char present = 0;
+       struct page *page;
+
+       /*
+        * When tmpfs swaps out a page from a file, any process mapping that
+        * file will not get a swp_entry_t in its pte, but rather it is like
+        * any other file mapping (ie. marked !present and faulted in with
+        * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
+        */
+#ifdef CONFIG_SWAP
+       if (shmem_mapping(mapping)) {
+               page = find_get_entry(mapping, pgoff);
+               /*
+                * shmem/tmpfs may return swap: account for swapcache
+                * page too.
+                */
+               if (xa_is_value(page)) {
+                       swp_entry_t swp = radix_to_swp_entry(page);
+                       page = find_get_page(swap_address_space(swp),
+                                            swp_offset(swp));
+               }
+       } else
+               page = find_get_page(mapping, pgoff);
+#else
+       page = find_get_page(mapping, pgoff);
+#endif
+       if (page) {
+               present = PageUptodate(page);
+               put_page(page);
+       }
+
+       return present;
+}
+
+static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
+                               struct vm_area_struct *vma, unsigned char *vec)
 {
-       unsigned char *vec = walk->private;
        unsigned long nr = (end - addr) >> PAGE_SHIFT;
+       int i;
 
-       memset(vec, 0, nr);
-       walk->private += nr;
+       if (vma->vm_file) {
+               pgoff_t pgoff;
+
+               pgoff = linear_page_index(vma, addr);
+               for (i = 0; i < nr; i++, pgoff++)
+                       vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
+       } else {
+               for (i = 0; i < nr; i++)
+                       vec[i] = 0;
+       }
+       return nr;
+}
+
+static int mincore_unmapped_range(unsigned long addr, unsigned long end,
+                                  struct mm_walk *walk)
+{
+       walk->private += __mincore_unmapped_range(addr, end,
+                                                 walk->vma, walk->private);
        return 0;
 }
 
@@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                goto out;
        }
 
-       /* We'll consider a THP page under construction to be there */
        if (pmd_trans_unstable(pmd)) {
-               memset(vec, 1, nr);
+               __mincore_unmapped_range(addr, end, vma, vec);
                goto out;
        }
 
@@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                pte_t pte = *ptep;
 
                if (pte_none(pte))
-                       *vec = 0;
+                       __mincore_unmapped_range(addr, addr + PAGE_SIZE,
+                                                vma, vec);
                else if (pte_present(pte))
                        *vec = 1;
                else { /* pte is a swap entry */
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       /*
-                        * migration or hwpoison entries are always
-                        * uptodate
-                        */
-                       *vec = !!non_swap_entry(entry);
+                       if (non_swap_entry(entry)) {
+                               /*
+                                * migration or hwpoison entries are always
+                                * uptodate
+                                */
+                               *vec = 1;
+                       } else {
+#ifdef CONFIG_SWAP
+                               *vec = mincore_page(swap_address_space(entry),
+                                                   swp_offset(entry));
+#else
+                               WARN_ON(1);
+                               *vec = 1;
+#endif
+                       }
                }
                vec++;
        }
index 70417e9b932ddcc2d7e5eb8ff1652a0d6ae6d457..314bbc8010fbedaa779d8b3eb772cc5a0fb26eda 100644 (file)
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
        dst = (ax25_address *)(bp + 1);
        src = (ax25_address *)(bp + 8);
 
+       ax25_route_lock_use();
        route = ax25_get_route(dst, NULL);
        if (route) {
                digipeat = route->digipeat;
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
        ax25_queue_xmit(skb, dev);
 
 put:
-       if (route)
-               ax25_put_route(route);
 
+       ax25_route_lock_unuse();
        return NETDEV_TX_OK;
 }
 
index a0eff323af12c027ea13a70bfbfffa68b5e48324..66f74c85cf6bd1487a13fbfeeca9eabcdf58fe11 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/export.h>
 
 static ax25_route *ax25_route_list;
-static DEFINE_RWLOCK(ax25_route_lock);
+DEFINE_RWLOCK(ax25_route_lock);
 
 void ax25_rt_device_down(struct net_device *dev)
 {
@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
  *     Find AX.25 route
  *
  *     Only routes with a reference count of zero can be destroyed.
+ *     Must be called with ax25_route_lock read locked.
  */
 ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
 {
@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
        ax25_route *ax25_def_rt = NULL;
        ax25_route *ax25_rt;
 
-       read_lock(&ax25_route_lock);
        /*
         *      Bind to the physical interface we heard them on, or the default
         *      route if none is found;
@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
        if (ax25_spe_rt != NULL)
                ax25_rt = ax25_spe_rt;
 
-       if (ax25_rt != NULL)
-               ax25_hold_route(ax25_rt);
-
-       read_unlock(&ax25_route_lock);
-
        return ax25_rt;
 }
 
@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
        ax25_route *ax25_rt;
        int err = 0;
 
-       if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
+       ax25_route_lock_use();
+       ax25_rt = ax25_get_route(addr, NULL);
+       if (!ax25_rt) {
+               ax25_route_lock_unuse();
                return -EHOSTUNREACH;
-
+       }
        if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
                err = -EHOSTUNREACH;
                goto put;
@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
        }
 
 put:
-       ax25_put_route(ax25_rt);
-
+       ax25_route_lock_unuse();
        return err;
 }
 
index 0af8f0db892a3311fb5a1a898ab0bff5696adf00..79bb8afa9c0c0990729a781acc7220a78ee317af 100644 (file)
@@ -67,6 +67,9 @@
  */
 #define MAX_NFRAMES 256
 
+/* limit timers to 400 days for sending/timeouts */
+#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
+
 /* use of last_frames[index].flags */
 #define RX_RECV    0x40 /* received data for this element */
 #define RX_THR     0x80 /* element not been sent due to throttle feature */
@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
        return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
 }
 
+/* check limitations for timeval provided by user */
+static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
+{
+       if ((msg_head->ival1.tv_sec < 0) ||
+           (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
+           (msg_head->ival1.tv_usec < 0) ||
+           (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
+           (msg_head->ival2.tv_sec < 0) ||
+           (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
+           (msg_head->ival2.tv_usec < 0) ||
+           (msg_head->ival2.tv_usec >= USEC_PER_SEC))
+               return true;
+
+       return false;
+}
+
 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
 #define OPSIZ sizeof(struct bcm_op)
 #define MHSIZ sizeof(struct bcm_msg_head)
@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
        if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
                return -EINVAL;
 
+       /* check timeval limitations */
+       if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+               return -EINVAL;
+
        /* check the given can_id */
        op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
        if (op) {
@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
             (!(msg_head->can_id & CAN_RTR_FLAG))))
                return -EINVAL;
 
+       /* check timeval limitations */
+       if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+               return -EINVAL;
+
        /* check the given can_id */
        op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
        if (op) {
index d5718284db5721dfe96554b48cca5d38bdc40942..3661cdd927f15fc78a8b1436822924be5d4c8a17 100644 (file)
@@ -3206,9 +3206,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
        dout("con_keepalive %p\n", con);
        mutex_lock(&con->mutex);
        clear_standby(con);
+       con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
        mutex_unlock(&con->mutex);
-       if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
-           con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+
+       if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
                queue_con(con);
 }
 EXPORT_SYMBOL(ceph_con_keepalive);
index a4bf22ee3aedb746c999a20a4e6f0bd9e0ccbfe0..7c4a41dc04bbe13fa7d955d40b15fe963623186a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 #include <net/gre.h>
+#include <net/erspan.h>
 
 #include <net/icmp.h>
 #include <net/route.h>
@@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        hdr_len += 4;
        }
        tpi->hdr_len = hdr_len;
+
+       /* ERSPAN ver 1 and 2 protocol sets GRE key field
+        * to 0 and sets the configured key in the
+        * inner erspan header field
+        */
+       if (greh->protocol == htons(ETH_P_ERSPAN) ||
+           greh->protocol == htons(ETH_P_ERSPAN2)) {
+               struct erspan_base_hdr *ershdr;
+
+               if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
+                       return -EINVAL;
+
+               ershdr = (struct erspan_base_hdr *)options;
+               tpi->key = cpu_to_be32(get_session_id(ershdr));
+       }
+
        return hdr_len;
 }
 EXPORT_SYMBOL(gre_parse_header);
index b1a74d80d86891e6cdcd088ba708616925c93af6..20a64fe6254b0c134edfcc2faebd3eac00934e0a 100644 (file)
@@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        int len;
 
        itn = net_generic(net, erspan_net_id);
-       len = gre_hdr_len + sizeof(*ershdr);
-
-       /* Check based hdr len */
-       if (unlikely(!pskb_may_pull(skb, len)))
-               return PACKET_REJECT;
 
        iph = ip_hdr(skb);
        ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
        ver = ershdr->ver;
 
-       /* The original GRE header does not have key field,
-        * Use ERSPAN 10-bit session ID as key.
-        */
-       tpi->key = cpu_to_be32(get_session_id(ershdr));
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
                                  tpi->flags | TUNNEL_KEY,
                                  iph->saddr, iph->daddr, tpi->key);
index c4f5602308edca064297fe8764764f65ebe84569..054d01c16dc6add2e8754f1d0c3c8f9e9772f863 100644 (file)
@@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        dst = tnl_params->daddr;
        if (dst == 0) {
                /* NBMA tunnel */
+               struct ip_tunnel_info *tun_info;
 
                if (!skb_dst(skb)) {
                        dev->stats.tx_fifo_errors++;
                        goto tx_error;
                }
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               tun_info = skb_tunnel_info(skb);
+               if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
+                   ip_tunnel_info_af(tun_info) == AF_INET &&
+                   tun_info->key.u.ipv4.dst)
+                       dst = tun_info->key.u.ipv4.dst;
+               else if (skb->protocol == htons(ETH_P_IP)) {
                        rt = skb_rtable(skb);
                        dst = rt_nexthop(rt, inner_iph->daddr);
                }
index 93d5ad2b1a69790384bd9cefa8e0b024e1714c97..84c358804355332c354c222a6d43096adc5c2f6d 100644 (file)
@@ -3495,8 +3495,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 
                        if (!addrconf_link_ready(dev)) {
                                /* device is not ready yet. */
-                               pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
-                                       dev->name);
+                               pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
+                                        dev->name);
                                break;
                        }
 
@@ -5120,6 +5120,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
                        if (idev) {
                                err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
                                                     &fillargs);
+                               if (err > 0)
+                                       err = 0;
                        }
                        goto put_tgt_net;
                }
index b1be67ca6768c1adf6be40c93023dac40cee5bec..4416368dbd49fe07d928104a759a58fb9cfedf15 100644 (file)
@@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
        struct ip6_tnl *tunnel;
        u8 ver;
 
-       if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
-               return PACKET_REJECT;
-
        ipv6h = ipv6_hdr(skb);
        ershdr = (struct erspan_base_hdr *)skb->data;
        ver = ershdr->ver;
-       tpi->key = cpu_to_be32(get_session_id(ershdr));
 
        tunnel = ip6gre_tunnel_lookup(skb->dev,
                                      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
index de65fe3ed9cc66e9d6373c4df22d356e8b36579d..2493c74c2d3789a2e504f91b53fc93a02a7234c0 100644 (file)
@@ -1490,6 +1490,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
                sta->sta.tdls = true;
 
+       if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !sdata->u.mgd.associated)
+               return -EINVAL;
+
        err = sta_apply_parameters(local, sta, params);
        if (err) {
                sta_info_free(local, sta);
index 45aad3d3108cccce9626c2682ae390a8b0991568..bb4d71efb6fb87f462887312ed631a4f11a16148 100644 (file)
@@ -231,7 +231,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
                struct ieee80211_hdr_3addr hdr;
                u8 category;
                u8 action_code;
-       } __packed action;
+       } __packed __aligned(2) action;
 
        if (!sdata)
                return;
@@ -2723,7 +2723,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        skb_set_queue_mapping(skb, q);
 
        if (!--mesh_hdr->ttl) {
-               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+               if (!is_multicast_ether_addr(hdr->addr1))
+                       IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+                                                    dropped_frames_ttl);
                goto out;
        }
 
index ed8e006dae85734784d146858478f5cd5265c803..6200cd2b4b996b156aad38e3d94f0e87335f9cad 100644 (file)
@@ -280,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
 
        if (saddr) {
                fl6->saddr = saddr->v6.sin6_addr;
-               fl6->fl6_sport = saddr->v6.sin6_port;
+               if (!fl6->fl6_sport)
+                       fl6->fl6_sport = saddr->v6.sin6_port;
 
                pr_debug("src=%pI6 - ", &fl6->saddr);
        }
index 4e0eeb113ef57528692d53544eb7c074e1538332..6abc8b274270730e482730bbc3ef735a7ffd2e52 100644 (file)
@@ -440,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        }
        if (saddr) {
                fl4->saddr = saddr->v4.sin_addr.s_addr;
-               fl4->fl4_sport = saddr->v4.sin_port;
+               if (!fl4->fl4_sport)
+                       fl4->fl4_sport = saddr->v4.sin_port;
        }
 
        pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
index f4ac6c592e1396e136311defe312be22ece411d8..d05c57664e36e073304e0dc083a9c1d4a342002d 100644 (file)
@@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
         *
         * [INIT ACK back to where the INIT came from.]
         */
-       retval->transport = chunk->transport;
+       if (chunk->transport)
+               retval->transport =
+                       sctp_assoc_lookup_paddr(asoc,
+                                               &chunk->transport->ipaddr);
 
        retval->subh.init_hdr =
                sctp_addto_chunk(retval, sizeof(initack), &initack);
@@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
         *
         * [COOKIE ACK back to where the COOKIE ECHO came from.]
         */
-       if (retval && chunk)
-               retval->transport = chunk->transport;
+       if (retval && chunk && chunk->transport)
+               retval->transport =
+                       sctp_assoc_lookup_paddr(asoc,
+                                               &chunk->transport->ipaddr);
 
        return retval;
 }
index 3892e7630f3adf6d42b57244b761c9e1aa558e95..80e0ae5534ecb0e134d609097271967d87f6ba4f 100644 (file)
@@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
        struct sctp_strreset_outreq *outreq = param.v;
        struct sctp_stream *stream = &asoc->stream;
        __u32 result = SCTP_STRRESET_DENIED;
-       __u16 i, nums, flags = 0;
        __be16 *str_p = NULL;
        __u32 request_seq;
+       __u16 i, nums;
 
        request_seq = ntohl(outreq->request_seq);
 
@@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
        if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
                goto out;
 
+       nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
+       str_p = outreq->list_of_streams;
+       for (i = 0; i < nums; i++) {
+               if (ntohs(str_p[i]) >= stream->incnt) {
+                       result = SCTP_STRRESET_ERR_WRONG_SSN;
+                       goto out;
+               }
+       }
+
        if (asoc->strreset_chunk) {
                if (!sctp_chunk_lookup_strreset_param(
                                asoc, outreq->response_seq,
@@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
                        sctp_chunk_put(asoc->strreset_chunk);
                        asoc->strreset_chunk = NULL;
                }
-
-               flags = SCTP_STREAM_RESET_INCOMING_SSN;
        }
 
-       nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
-       if (nums) {
-               str_p = outreq->list_of_streams;
-               for (i = 0; i < nums; i++) {
-                       if (ntohs(str_p[i]) >= stream->incnt) {
-                               result = SCTP_STRRESET_ERR_WRONG_SSN;
-                               goto out;
-                       }
-               }
-
+       if (nums)
                for (i = 0; i < nums; i++)
                        SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
-       } else {
+       else
                for (i = 0; i < stream->incnt; i++)
                        SCTP_SI(stream, i)->mid = 0;
-       }
 
        result = SCTP_STRRESET_PERFORMED;
 
        *evp = sctp_ulpevent_make_stream_reset_event(asoc,
-               flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
-               GFP_ATOMIC);
+               SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
 
 out:
        sctp_update_strreset_result(asoc, result);
@@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
 
        result = SCTP_STRRESET_PERFORMED;
 
-       *evp = sctp_ulpevent_make_stream_reset_event(asoc,
-               SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
-
 out:
        sctp_update_strreset_result(asoc, result);
 err:
@@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
        if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
                goto out;
 
+       in = ntohs(addstrm->number_of_streams);
+       incnt = stream->incnt + in;
+       if (!in || incnt > SCTP_MAX_STREAM)
+               goto out;
+
+       if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
+               goto out;
+
        if (asoc->strreset_chunk) {
                if (!sctp_chunk_lookup_strreset_param(
                        asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
                }
        }
 
-       in = ntohs(addstrm->number_of_streams);
-       incnt = stream->incnt + in;
-       if (!in || incnt > SCTP_MAX_STREAM)
-               goto out;
-
-       if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
-               goto out;
-
        stream->incnt = incnt;
 
        result = SCTP_STRRESET_PERFORMED;
@@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
 
        result = SCTP_STRRESET_PERFORMED;
 
-       *evp = sctp_ulpevent_make_stream_change_event(asoc,
-               0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
-
 out:
        sctp_update_strreset_result(asoc, result);
 err:
@@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
                                        sout->mid_uo = 0;
                                }
                        }
-
-                       flags = SCTP_STREAM_RESET_OUTGOING_SSN;
                }
 
+               flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
+
                for (i = 0; i < stream->outcnt; i++)
                        SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
@@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
                nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
                       sizeof(__u16);
 
+               flags |= SCTP_STREAM_RESET_INCOMING_SSN;
+
                *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
                        nums, str_p, GFP_ATOMIC);
        } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
index 5e49492d5911d816c85bdec83af843e0a5235c1c..74150ad958239f631e34662fd1995eba148013ce 100644 (file)
@@ -555,7 +555,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        },
        [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
        [NL80211_ATTR_PEER_MEASUREMENTS] =
-               NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX,
+               NLA_POLICY_NESTED(NL80211_PMSR_ATTR_MAX,
                                  nl80211_pmsr_attr_policy),
 };
 
index ecfb1a06dbb2be08cfc99028bb9817deaa5fc3a7..dd58b9909ac999ae9683684d05f6744c8a6def9e 100644 (file)
@@ -1024,8 +1024,13 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
        }
 
        rtnl_lock();
-       if (WARN_ON(regdb && !IS_ERR(regdb))) {
-               /* just restore and free new db */
+       if (regdb && !IS_ERR(regdb)) {
+               /* negative case - a bug
+                * positive case - can happen due to race in case of multiple cb's in
+                * queue, due to usage of asynchronous callback
+                *
+                * Either case, just restore and free new db.
+                */
        } else if (set_error) {
                regdb = ERR_PTR(set_error);
        } else if (fw) {
@@ -1255,7 +1260,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
  * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
  * however it is safe for now to assume that a frequency rule should not be
  * part of a frequency's band if the start freq or end freq are off by more
- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
  * 60 GHz band.
  * This resolution can be lowered and should be considered as we add
  * regulatory rule support for other "bands".
@@ -1270,7 +1275,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
         * with the Channel starting frequency above 45 GHz.
         */
        u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
-                       10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+                       20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
        if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
                return true;
        if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
index a5b09e75e7874bb6a1facc9a9fd70e6fdac10791..f7d2b373da0aaebd869b06773c24b7994c0cb472 100644 (file)
@@ -541,7 +541,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
 {
        /* first let's check the buffer parameter's */
        if (params->buffer.fragment_size == 0 ||
-           params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
+           params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+           params->buffer.fragments == 0)
                return -EINVAL;
 
        /* now codec parameters */
index 51cc6589443f5f12e21c5ea6e66a6e30a6a445cc..152f54137082321f66dea8bb0563a39f299e61d6 100644 (file)
@@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index 0b3e7a18ca78b07548aadcc7ea15a4add9515a30..b4f472157ebdf6a49198672b2fb04dd0953a39ab 100644 (file)
@@ -6926,7 +6926,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
        {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
        {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
-       {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
+       {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
        {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
        {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
        {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
index 022a8912c8a2554eaec61b712fd9f83e474fd6dc..3d58338fa3cf79b3d9c28351cfd9411167fe51a5 100644 (file)
@@ -611,14 +611,16 @@ static int acp3x_audio_probe(struct platform_device *pdev)
        }
        irqflags = *((unsigned int *)(pdev->dev.platform_data));
 
-       adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data),
-                            GFP_KERNEL);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
                        return -ENODEV;
        }
 
+       adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
+       if (!adata)
+               return -ENOMEM;
+
        adata->acp3x_base = devm_ioremap(&pdev->dev, res->start,
                                         resource_size(res));
 
index 3ab2949c1dfa454f2df8ebf277dd60d82e56631e..b19d7a3e7a2cc07827808ac2fe92797deac56a37 100644 (file)
@@ -1890,51 +1890,31 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
        pm_runtime_disable(&hdev->dev);
 }
 
-#ifdef CONFIG_PM
-static int hdmi_codec_prepare(struct device *dev)
-{
-       struct hdac_device *hdev = dev_to_hdac_dev(dev);
-
-       pm_runtime_get_sync(&hdev->dev);
-
-       /*
-        * Power down afg.
-        * codec_read is preferred over codec_write to set the power state.
-        * This way verb is send to set the power state and response
-        * is received. So setting power state is ensured without using loop
-        * to read the state.
-        */
-       snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
-                                                       AC_PWRST_D3);
-
-       return 0;
-}
-
-static void hdmi_codec_complete(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int hdmi_codec_resume(struct device *dev)
 {
        struct hdac_device *hdev = dev_to_hdac_dev(dev);
        struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
+       int ret;
 
-       /* Power up afg */
-       snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
-                                                       AC_PWRST_D0);
-
-       hdac_hdmi_skl_enable_all_pins(hdev);
-       hdac_hdmi_skl_enable_dp12(hdev);
-
+       ret = pm_runtime_force_resume(dev);
+       if (ret < 0)
+               return ret;
        /*
         * As the ELD notify callback request is not entertained while the
         * device is in suspend state. Need to manually check detection of
         * all pins here. pin capablity change is not support, so use the
         * already set pin caps.
+        *
+        * NOTE: this is safe to call even if the codec doesn't actually resume.
+        * The pin check involves only with DRM audio component hooks, so it
+        * works even if the HD-audio side is still dreaming peacefully.
         */
        hdac_hdmi_present_sense_all_pins(hdev, hdmi, false);
-
-       pm_runtime_put_sync(&hdev->dev);
+       return 0;
 }
 #else
-#define hdmi_codec_prepare NULL
-#define hdmi_codec_complete NULL
+#define hdmi_codec_resume NULL
 #endif
 
 static const struct snd_soc_component_driver hdmi_hda_codec = {
@@ -2135,75 +2115,6 @@ static int hdac_hdmi_dev_remove(struct hdac_device *hdev)
 }
 
 #ifdef CONFIG_PM
-/*
- * Power management sequences
- * ==========================
- *
- * The following explains the PM handling of HDAC HDMI with its parent
- * device SKL and display power usage
- *
- * Probe
- * -----
- * In SKL probe,
- * 1. skl_probe_work() powers up the display (refcount++ -> 1)
- * 2. enumerates the codecs on the link
- * 3. powers down the display  (refcount-- -> 0)
- *
- * In HDAC HDMI probe,
- * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1)
- * 2. probe the codec
- * 3. put the HDAC HDMI device to runtime suspend
- * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
- *
- * Once children are runtime suspended, SKL device also goes to runtime
- * suspend
- *
- * HDMI Playback
- * -------------
- * Open HDMI device,
- * 1. skl_runtime_resume() invoked
- * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
- *
- * Close HDMI device,
- * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
- * 2. skl_runtime_suspend() invoked
- *
- * S0/S3 Cycle with playback in progress
- * -------------------------------------
- * When the device is opened for playback, the device is runtime active
- * already and the display refcount is 1 as explained above.
- *
- * Entering to S3,
- * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just
- *    increments the PM runtime usage count of the codec since the device
- *    is in use already
- * 2. skl_suspend() powers down the display (refcount-- -> 0)
- *
- * Wakeup from S3,
- * 1. skl_resume() powers up the display (refcount++ -> 1)
- * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just
- *    decrements the PM runtime usage count of the codec since the device
- *    is in use already
- *
- * Once playback is stopped, the display refcount is set to 0 as explained
- * above in the HDMI playback sequence. The PM handlings are designed in
- * such way that to balance the refcount of display power when the codec
- * device put to S3 while playback is going on.
- *
- * S0/S3 Cycle without playback in progress
- * ----------------------------------------
- * Entering to S3,
- * 1. hdmi_codec_prepare() invoke the runtime resume of codec
- * 2. skl_runtime_resume() invoked
- * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
- * 4. skl_suspend() powers down the display (refcount-- -> 0)
- *
- * Wakeup from S3,
- * 1. skl_resume() powers up the display (refcount++ -> 1)
- * 2. hdmi_codec_complete() invokes the runtime suspend of codec
- * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
- * 4. skl_runtime_suspend() invoked
- */
 static int hdac_hdmi_runtime_suspend(struct device *dev)
 {
        struct hdac_device *hdev = dev_to_hdac_dev(dev);
@@ -2277,8 +2188,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
 
 static const struct dev_pm_ops hdac_hdmi_pm = {
        SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
-       .prepare = hdmi_codec_prepare,
-       .complete = hdmi_codec_complete,
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, hdmi_codec_resume)
 };
 
 static const struct hda_device_id hdmi_list[] = {
index 6cb1653be80417ae65ceddbc52791dcc92a4a367..4cc24a5d5c3167cf625a4135e068b6798b12a842 100644 (file)
@@ -1400,24 +1400,20 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
                if (ret != 0) {
                        dev_err(component->dev,
                                "Failed to set digital mute: %d\n", ret);
-                       mutex_unlock(&pcm512x->mutex);
-                       return ret;
+                       goto unlock;
                }
 
                regmap_read_poll_timeout(pcm512x->regmap,
                                         PCM512x_ANALOG_MUTE_DET,
                                         mute_det, (mute_det & 0x3) == 0,
                                         200, 10000);
-
-               mutex_unlock(&pcm512x->mutex);
        } else {
                pcm512x->mute &= ~0x1;
                ret = pcm512x_update_mute(pcm512x);
                if (ret != 0) {
                        dev_err(component->dev,
                                "Failed to update digital mute: %d\n", ret);
-                       mutex_unlock(&pcm512x->mutex);
-                       return ret;
+                       goto unlock;
                }
 
                regmap_read_poll_timeout(pcm512x->regmap,
@@ -1428,9 +1424,10 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
                                         200, 10000);
        }
 
+unlock:
        mutex_unlock(&pcm512x->mutex);
 
-       return 0;
+       return ret;
 }
 
 static const struct snd_soc_dai_ops pcm512x_dai_ops = {
index 0ef966d56bac300f00f1fac7aa92f54fa9949e7e..e2855ab9a2c6b5f84a4e179d254564d62cb6efff 100644 (file)
@@ -1128,8 +1128,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c,
                return ret;
        }
 
-       regmap_read(rt274->regmap,
+       ret = regmap_read(rt274->regmap,
                RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+       if (ret)
+               return ret;
+
        if (val != RT274_VENDOR_ID) {
                dev_err(&i2c->dev,
                        "Device with ID register %#x is not rt274\n", val);
index 4d46f4567c3a8c69cab55ca45ab1bbbc686b8945..bec2eefa8b0f03efcd518de0c047ac5064221115 100644 (file)
@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
 
        rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
                        GFP_KERNEL);
+       if (!rt5514_dsp)
+               return -ENOMEM;
 
        rt5514_dsp->dev = &rt5514_spi->dev;
        mutex_init(&rt5514_dsp->dma_lock);
index 34cfaf8f6f3452b9839614a03c4b72c9cb82cd17..89c43b26c3790814645172e8165e1ac971972319 100644 (file)
@@ -2512,6 +2512,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
        regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
        regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
        regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
+       regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
 
        mutex_unlock(&rt5682->calibrate_mutex);
 
index d82a8301fd745c1206923b3363d1e51ce66746bd..96944cff0ed730b04406036e6b38ddb5499d7726 100644 (file)
 #define RT5682_SCLK_SRC_PLL2                   (0x2 << 13)
 #define RT5682_SCLK_SRC_SDW                    (0x3 << 13)
 #define RT5682_SCLK_SRC_RCCLK                  (0x4 << 13)
-#define RT5682_PLL1_SRC_MASK                   (0x3 << 10)
-#define RT5682_PLL1_SRC_SFT                    10
-#define RT5682_PLL1_SRC_MCLK                   (0x0 << 10)
-#define RT5682_PLL1_SRC_BCLK1                  (0x1 << 10)
-#define RT5682_PLL1_SRC_SDW                    (0x2 << 10)
-#define RT5682_PLL1_SRC_RC                     (0x3 << 10)
-#define RT5682_PLL2_SRC_MASK                   (0x3 << 8)
-#define RT5682_PLL2_SRC_SFT                    8
-#define RT5682_PLL2_SRC_MCLK                   (0x0 << 8)
-#define RT5682_PLL2_SRC_BCLK1                  (0x1 << 8)
-#define RT5682_PLL2_SRC_SDW                    (0x2 << 8)
-#define RT5682_PLL2_SRC_RC                     (0x3 << 8)
+#define RT5682_PLL2_SRC_MASK                   (0x3 << 10)
+#define RT5682_PLL2_SRC_SFT                    10
+#define RT5682_PLL2_SRC_MCLK                   (0x0 << 10)
+#define RT5682_PLL2_SRC_BCLK1                  (0x1 << 10)
+#define RT5682_PLL2_SRC_SDW                    (0x2 << 10)
+#define RT5682_PLL2_SRC_RC                     (0x3 << 10)
+#define RT5682_PLL1_SRC_MASK                   (0x3 << 8)
+#define RT5682_PLL1_SRC_SFT                    8
+#define RT5682_PLL1_SRC_MCLK                   (0x0 << 8)
+#define RT5682_PLL1_SRC_BCLK1                  (0x1 << 8)
+#define RT5682_PLL1_SRC_SDW                    (0x2 << 8)
+#define RT5682_PLL1_SRC_RC                     (0x3 << 8)
 
 
 
index e2b5a11b16d1901775ff0fe8cedce8c2d5c7e5f2..f03195d2ab2ea773b9efa52783c6b5cd7c5f368d 100644 (file)
@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
+               /* Initial cold start */
+               if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
+                       break;
+
                /* Switch off BCLK_N Divider */
                snd_soc_component_update_bits(component, AIC32X4_BCLKN,
                                    AIC32X4_BCLKEN, 0);
index 392d5eef356d3a8a90cb2b4cb52c07d5520ebbc0..99e07b01a2ce9d5fa3d8e1d5bbc72823d399ef4b 100644 (file)
@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+       ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
                       pdcr, ptcr);
 
        if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                "TxFS output from %s, ",
                                audmux_port_string((ptcr >> 27) & 0x7));
        else
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                "TxFS input, ");
 
        if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                "TxClk output from %s",
                                audmux_port_string((ptcr >> 22) & 0x7));
        else
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                "TxClk input");
 
-       ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+       ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
        if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                "Port is symmetric");
        } else {
                if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
-                       ret += snprintf(buf + ret, PAGE_SIZE - ret,
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                        "RxFS output from %s, ",
                                        audmux_port_string((ptcr >> 17) & 0x7));
                else
-                       ret += snprintf(buf + ret, PAGE_SIZE - ret,
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                        "RxFS input, ");
 
                if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
-                       ret += snprintf(buf + ret, PAGE_SIZE - ret,
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                        "RxClk output from %s",
                                        audmux_port_string((ptcr >> 12) & 0x7));
                else
-                       ret += snprintf(buf + ret, PAGE_SIZE - ret,
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                        "RxClk input");
        }
 
-       ret += snprintf(buf + ret, PAGE_SIZE - ret,
+       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                        "\nData received from %s\n",
                        audmux_port_string((pdcr >> 13) & 0x7));
 
index 99a62ba409df83424bc84031f067e4e70a0db03d..bd9fd2035c554b9479500113816483b4d128b2c9 100644 (file)
@@ -91,7 +91,7 @@ config SND_SST_ATOM_HIFI2_PLATFORM_PCI
 config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
        tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
        default ACPI
-       depends on X86 && ACPI
+       depends on X86 && ACPI && PCI
        select SND_SST_IPC_ACPI
        select SND_SST_ATOM_HIFI2_PLATFORM
        select SND_SOC_ACPI_INTEL_MATCH
index afc5598660955a462f1f2bfdf45fe9905a23e339..91a2436ce9525aea53c537ba1f879456050fcc66 100644 (file)
@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params,
                                struct snd_soc_dai *dai)
 {
-       snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+       int ret;
+
+       ret =
+               snd_pcm_lib_malloc_pages(substream,
+                               params_buffer_bytes(params));
+       if (ret)
+               return ret;
        memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
        return 0;
 }
index 68e6543e6cb026aa20b3a339c9f7c3f24c3820f5..99f2a0156ae88cb509b70994cc6784902901161d 100644 (file)
@@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
                .stream_name = "Loopback",
                .cpu_dai_name = "Loopback Pin",
                .platform_name = "haswell-pcm-audio",
-               .dynamic = 0,
+               .dynamic = 1,
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
index c74c4f17316fe898ff29e16a2be4982499d7f612..8f83b182c4f95d283acf52eb04cc70095778d4f4 100644 (file)
@@ -55,39 +55,6 @@ enum {
        GLK_DPCM_AUDIO_HDMI3_PB,
 };
 
-static int platform_clock_control(struct snd_soc_dapm_widget *w,
-                                       struct snd_kcontrol *k, int  event)
-{
-       struct snd_soc_dapm_context *dapm = w->dapm;
-       struct snd_soc_card *card = dapm->card;
-       struct snd_soc_dai *codec_dai;
-       int ret = 0;
-
-       codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI);
-       if (!codec_dai) {
-               dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
-               return -EIO;
-       }
-
-       if (SND_SOC_DAPM_EVENT_OFF(event)) {
-               ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
-               if (ret)
-                       dev_err(card->dev, "failed to stop sysclk: %d\n", ret);
-       } else if (SND_SOC_DAPM_EVENT_ON(event)) {
-               ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
-                                       GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
-               if (ret < 0) {
-                       dev_err(card->dev, "can't set codec pll: %d\n", ret);
-                       return ret;
-               }
-       }
-
-       if (ret)
-               dev_err(card->dev, "failed to start internal clk: %d\n", ret);
-
-       return ret;
-}
-
 static const struct snd_kcontrol_new geminilake_controls[] = {
        SOC_DAPM_PIN_SWITCH("Headphone Jack"),
        SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -102,14 +69,10 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = {
        SND_SOC_DAPM_SPK("HDMI1", NULL),
        SND_SOC_DAPM_SPK("HDMI2", NULL),
        SND_SOC_DAPM_SPK("HDMI3", NULL),
-       SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
-                       platform_clock_control, SND_SOC_DAPM_PRE_PMU |
-                       SND_SOC_DAPM_POST_PMD),
 };
 
 static const struct snd_soc_dapm_route geminilake_map[] = {
        /* HP jack connectors - unknown if we have jack detection */
-       { "Headphone Jack", NULL, "Platform Clock" },
        { "Headphone Jack", NULL, "HPOL" },
        { "Headphone Jack", NULL, "HPOR" },
 
@@ -117,7 +80,6 @@ static const struct snd_soc_dapm_route geminilake_map[] = {
        { "Spk", NULL, "Speaker" },
 
        /* other jacks */
-       { "Headset Mic", NULL, "Platform Clock" },
        { "IN1P", NULL, "Headset Mic" },
 
        /* digital mics */
@@ -177,6 +139,13 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
        struct snd_soc_jack *jack;
        int ret;
 
+       ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
+                                       GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
+       if (ret < 0) {
+               dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
+               return ret;
+       }
+
        /* Configure sysclk for codec */
        ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
                                        RT5682_PLL_FREQ, SND_SOC_CLOCK_IN);
index eab1f439dd3f1ad9a4553d03fa2190d673ec94a3..a4022983a7ce0050b3a5d857b0edd6ed691e0636 100644 (file)
@@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
                .stream_name = "Loopback",
                .cpu_dai_name = "Loopback Pin",
                .platform_name = "haswell-pcm-audio",
-               .dynamic = 0,
+               .dynamic = 1,
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
index 60c94836bf5bc93bb3dde46a237c21abf77e5b6c..4ed5b7e17d44aad3e09e6d6a8726ae3742bc92a1 100644 (file)
@@ -336,9 +336,6 @@ static int skl_suspend(struct device *dev)
                skl->skl_sst->fw_loaded = false;
        }
 
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-               snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
-
        return 0;
 }
 
@@ -350,10 +347,6 @@ static int skl_resume(struct device *dev)
        struct hdac_ext_link *hlink = NULL;
        int ret;
 
-       /* Turned OFF in HDMI codec driver after codec reconfiguration */
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-               snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
-
        /*
         * resume only when we are not in suspend active, otherwise need to
         * restore the device
@@ -446,8 +439,10 @@ static int skl_free(struct hdac_bus *bus)
        snd_hdac_ext_bus_exit(bus);
 
        cancel_work_sync(&skl->probe_work);
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
                snd_hdac_i915_exit(bus);
+       }
 
        return 0;
 }
@@ -814,7 +809,7 @@ static void skl_probe_work(struct work_struct *work)
        err = skl_platform_register(bus->dev);
        if (err < 0) {
                dev_err(bus->dev, "platform register failed: %d\n", err);
-               return;
+               goto out_err;
        }
 
        err = skl_machine_device_register(skl);
index 5b986b74dd36f96d0a4a37bc8714f0b741c39486..548eb4fa2da64415ccf7f9db83f84575a251093f 100644 (file)
@@ -570,10 +570,10 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
        prtd->audio_client = q6asm_audio_client_alloc(dev,
                                        (q6asm_cb)compress_event_handler,
                                        prtd, stream_id, LEGACY_PCM_MODE);
-       if (!prtd->audio_client) {
+       if (IS_ERR(prtd->audio_client)) {
                dev_err(dev, "Could not allocate memory\n");
-               kfree(prtd);
-               return -ENOMEM;
+               ret = PTR_ERR(prtd->audio_client);
+               goto free_prtd;
        }
 
        size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE *
@@ -582,7 +582,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
                                  &prtd->dma_buffer);
        if (ret) {
                dev_err(dev, "Cannot allocate buffer(s)\n");
-               return ret;
+               goto free_client;
        }
 
        if (pdata->sid < 0)
@@ -595,6 +595,13 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
        runtime->private_data = prtd;
 
        return 0;
+
+free_client:
+       q6asm_audio_client_free(prtd->audio_client);
+free_prtd:
+       kfree(prtd);
+
+       return ret;
 }
 
 static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
@@ -874,7 +881,7 @@ static int of_q6asm_parse_dai_data(struct device *dev,
 
        for_each_child_of_node(dev->of_node, node) {
                ret = of_property_read_u32(node, "reg", &id);
-               if (ret || id > MAX_SESSIONS || id < 0) {
+               if (ret || id >= MAX_SESSIONS || id < 0) {
                        dev_err(dev, "valid dai id not found:%d\n", ret);
                        continue;
                }
index 1db8ef6682233feaf38145efb07af877c28a1ef2..6f66a58e23caa1178bec669a14eb54c4dec72a95 100644 (file)
@@ -158,17 +158,24 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
        return ret;
 }
 
+static void sdm845_jack_free(struct snd_jack *jack)
+{
+       struct snd_soc_component *component = jack->private_data;
+
+       snd_soc_component_set_jack(component, NULL, NULL);
+}
+
 static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_component *component;
-       struct snd_soc_dai_link *dai_link = rtd->dai_link;
        struct snd_soc_card *card = rtd->card;
+       struct snd_soc_dai *codec_dai = rtd->codec_dai;
+       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
        struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
-       int i, rval;
+       struct snd_jack *jack;
+       int rval;
 
        if (!pdata->jack_setup) {
-               struct snd_jack *jack;
-
                rval = snd_soc_card_jack_new(card, "Headset Jack",
                                SND_JACK_HEADSET |
                                SND_JACK_HEADPHONE |
@@ -190,16 +197,22 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
                pdata->jack_setup = true;
        }
 
-       for (i = 0 ; i < dai_link->num_codecs; i++) {
-               struct snd_soc_dai *dai = rtd->codec_dais[i];
+       switch (cpu_dai->id) {
+       case PRIMARY_MI2S_RX:
+               jack  = pdata->jack.jack;
+               component = codec_dai->component;
 
-               component = dai->component;
-               rval = snd_soc_component_set_jack(
-                               component, &pdata->jack, NULL);
+               jack->private_data = component;
+               jack->private_free = sdm845_jack_free;
+               rval = snd_soc_component_set_jack(component,
+                                                 &pdata->jack, NULL);
                if (rval != 0 && rval != -ENOTSUPP) {
                        dev_warn(card->dev, "Failed to set jack: %d\n", rval);
                        return rval;
                }
+               break;
+       default:
+               break;
        }
 
        return 0;
index 922fb6aa3ed191c5047ae21ecb5d708287a81f35..5aee11c94f2a749f13c2957fb4eeb35c263ed482 100644 (file)
@@ -202,7 +202,7 @@ static int camelot_prepare(struct snd_pcm_substream *substream)
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
 
-       pr_debug("PCM data: addr 0x%08ulx len %d\n",
+       pr_debug("PCM data: addr 0x%08lx len %d\n",
                 (u32)runtime->dma_addr, runtime->dma_bytes);
  
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
index 0462b3ec977a221a5d7869d3e7afb02e36cd7e5a..aae450ba4f08f3c02c2b23f88b50ef3e07c8e817 100644 (file)
@@ -742,7 +742,7 @@ static struct snd_soc_component *soc_find_component(
                if (of_node) {
                        if (component->dev->of_node == of_node)
                                return component;
-               } else if (strcmp(component->name, name) == 0) {
+               } else if (name && strcmp(component->name, name) == 0) {
                        return component;
                }
        }
@@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
         * this function should be removed in the future
         */
        /* convert Legacy platform link */
-       if (!platform) {
+       if (!platform || dai_link->legacy_platform) {
                platform = devm_kzalloc(card->dev,
                                sizeof(struct snd_soc_dai_link_component),
                                GFP_KERNEL);
                if (!platform)
                        return -ENOMEM;
 
-               dai_link->platform      = platform;
-               platform->name          = dai_link->platform_name;
-               platform->of_node       = dai_link->platform_of_node;
-               platform->dai_name      = NULL;
+               dai_link->platform        = platform;
+               dai_link->legacy_platform = 1;
+               platform->name            = dai_link->platform_name;
+               platform->of_node         = dai_link->platform_of_node;
+               platform->dai_name        = NULL;
        }
 
        /* if there's no platform we match on the empty platform */
@@ -1129,6 +1130,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
                        link->name);
                return -EINVAL;
        }
+
+       /*
+        * Defer card registartion if platform dai component is not added to
+        * component list.
+        */
+       if ((link->platform->of_node || link->platform->name) &&
+           !soc_find_component(link->platform->of_node, link->platform->name))
+               return -EPROBE_DEFER;
+
        /*
         * CPU device may be specified by either name or OF node, but
         * can be left unspecified, and will be matched based on DAI
@@ -1140,6 +1150,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
                        link->name);
                return -EINVAL;
        }
+
+       /*
+        * Defer card registartion if cpu dai component is not added to
+        * component list.
+        */
+       if ((link->cpu_of_node || link->cpu_name) &&
+           !soc_find_component(link->cpu_of_node, link->cpu_name))
+               return -EPROBE_DEFER;
+
        /*
         * At least one of CPU DAI name or CPU device name/node must be
         * specified
@@ -2739,15 +2758,18 @@ int snd_soc_register_card(struct snd_soc_card *card)
        if (!card->name || !card->dev)
                return -EINVAL;
 
+       mutex_lock(&client_mutex);
        for_each_card_prelinks(card, i, link) {
 
                ret = soc_init_dai_link(card, link);
                if (ret) {
                        dev_err(card->dev, "ASoC: failed to init link %s\n",
                                link->name);
+                       mutex_unlock(&client_mutex);
                        return ret;
                }
        }
+       mutex_unlock(&client_mutex);
 
        dev_set_drvdata(card->dev, card);
 
index a5178845065b3586bc1bde669c03580574f1bcb7..2c4c134195392936bc03f9440c013360ac853a31 100644 (file)
@@ -2019,19 +2019,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                out = is_connected_output_ep(w, NULL, NULL);
        }
 
-       ret = snprintf(buf, PAGE_SIZE, "%s: %s%s  in %d out %d",
+       ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s  in %d out %d",
                       w->name, w->power ? "On" : "Off",
                       w->force ? " (forced)" : "", in, out);
 
        if (w->reg >= 0)
-               ret += snprintf(buf + ret, PAGE_SIZE - ret,
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                " - R%d(0x%x) mask 0x%x",
                                w->reg, w->reg, w->mask << w->shift);
 
-       ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+       ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
        if (w->sname)
-               ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
                                w->sname,
                                w->active ? "active" : "inactive");
 
@@ -2044,7 +2044,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                        if (!p->connect)
                                continue;
 
-                       ret += snprintf(buf + ret, PAGE_SIZE - ret,
+                       ret += scnprintf(buf + ret, PAGE_SIZE - ret,
                                        " %s  \"%s\" \"%s\"\n",
                                        (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
                                        p->name ? p->name : "static",
index eeda6d5565bccc5bffa00873a9a3c517dcacf41d..a10fcb5963c67a2b41ac02aaf3e6b7fcb329a990 100644 (file)
@@ -108,7 +108,7 @@ struct davinci_mcasp {
        /* Used for comstraint setting on the second stream */
        u32     channels;
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        struct davinci_mcasp_context context;
 #endif
 
@@ -1486,74 +1486,6 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
-{
-       struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
-       struct davinci_mcasp_context *context = &mcasp->context;
-       u32 reg;
-       int i;
-
-       context->pm_state = pm_runtime_active(mcasp->dev);
-       if (!context->pm_state)
-               pm_runtime_get_sync(mcasp->dev);
-
-       for (i = 0; i < ARRAY_SIZE(context_regs); i++)
-               context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
-
-       if (mcasp->txnumevt) {
-               reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
-               context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
-       }
-       if (mcasp->rxnumevt) {
-               reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
-               context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
-       }
-
-       for (i = 0; i < mcasp->num_serializer; i++)
-               context->xrsr_regs[i] = mcasp_get_reg(mcasp,
-                                               DAVINCI_MCASP_XRSRCTL_REG(i));
-
-       pm_runtime_put_sync(mcasp->dev);
-
-       return 0;
-}
-
-static int davinci_mcasp_resume(struct snd_soc_dai *dai)
-{
-       struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
-       struct davinci_mcasp_context *context = &mcasp->context;
-       u32 reg;
-       int i;
-
-       pm_runtime_get_sync(mcasp->dev);
-
-       for (i = 0; i < ARRAY_SIZE(context_regs); i++)
-               mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
-
-       if (mcasp->txnumevt) {
-               reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
-               mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
-       }
-       if (mcasp->rxnumevt) {
-               reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
-               mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
-       }
-
-       for (i = 0; i < mcasp->num_serializer; i++)
-               mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
-                             context->xrsr_regs[i]);
-
-       if (!context->pm_state)
-               pm_runtime_put_sync(mcasp->dev);
-
-       return 0;
-}
-#else
-#define davinci_mcasp_suspend NULL
-#define davinci_mcasp_resume NULL
-#endif
-
 #define DAVINCI_MCASP_RATES    SNDRV_PCM_RATE_8000_192000
 
 #define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \
@@ -1571,8 +1503,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
        {
                .name           = "davinci-mcasp.0",
                .probe          = davinci_mcasp_dai_probe,
-               .suspend        = davinci_mcasp_suspend,
-               .resume         = davinci_mcasp_resume,
                .playback       = {
                        .channels_min   = 1,
                        .channels_max   = 32 * 16,
@@ -1976,7 +1906,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        }
 
        mcasp->num_serializer = pdata->num_serializer;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev,
                                        mcasp->num_serializer, sizeof(u32),
                                        GFP_KERNEL);
@@ -2196,11 +2126,73 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int davinci_mcasp_runtime_suspend(struct device *dev)
+{
+       struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
+       struct davinci_mcasp_context *context = &mcasp->context;
+       u32 reg;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(context_regs); i++)
+               context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
+
+       if (mcasp->txnumevt) {
+               reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+               context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
+       }
+       if (mcasp->rxnumevt) {
+               reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+               context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
+       }
+
+       for (i = 0; i < mcasp->num_serializer; i++)
+               context->xrsr_regs[i] = mcasp_get_reg(mcasp,
+                                               DAVINCI_MCASP_XRSRCTL_REG(i));
+
+       return 0;
+}
+
+static int davinci_mcasp_runtime_resume(struct device *dev)
+{
+       struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
+       struct davinci_mcasp_context *context = &mcasp->context;
+       u32 reg;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(context_regs); i++)
+               mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
+
+       if (mcasp->txnumevt) {
+               reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+               mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
+       }
+       if (mcasp->rxnumevt) {
+               reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+               mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
+       }
+
+       for (i = 0; i < mcasp->num_serializer; i++)
+               mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
+                             context->xrsr_regs[i]);
+
+       return 0;
+}
+
+#endif
+
+static const struct dev_pm_ops davinci_mcasp_pm_ops = {
+       SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend,
+                          davinci_mcasp_runtime_resume,
+                          NULL)
+};
+
 static struct platform_driver davinci_mcasp_driver = {
        .probe          = davinci_mcasp_probe,
        .remove         = davinci_mcasp_remove,
        .driver         = {
                .name   = "davinci-mcasp",
+               .pm     = &davinci_mcasp_pm_ops,
                .of_match_table = mcasp_dt_ids,
        },
 };
index 25e287feb58c33e845db60054e16ed49856e5679..723a583a8d570f625f92f5815f1ffe431624b340 100644 (file)
@@ -1,5 +1,5 @@
 config SND_SOC_XILINX_I2S
-       tristate "Audio support for the the Xilinx I2S"
+       tristate "Audio support for the Xilinx I2S"
        help
          Select this option to enable Xilinx I2S Audio. This enables
          I2S playback and capture using xilinx soft IP. In transmitter
index d4ae9eff41ce51cb2f2777263b5190edf49c3e49..8b353166ad447699480baae7944fe087a3bb79d8 100644 (file)
@@ -1,12 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
-/*
- * Xilinx ASoC I2S audio support
- *
- * Copyright (C) 2018 Xilinx, Inc.
- *
- * Author: Praveen Vuppala <praveenv@xilinx.com>
- * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
- */
+//
+// Xilinx ASoC I2S audio support
+//
+// Copyright (C) 2018 Xilinx, Inc.
+//
+// Author: Praveen Vuppala <praveenv@xilinx.com>
+// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
 
 #include <linux/io.h>
 #include <linux/module.h>
index e752384045556cba86811d0b1a5e1dd66aca03fa..2d4baf57822fae8e7111a89e63fa047f4d0638ec 100644 (file)
@@ -18,8 +18,8 @@ ssize_t security_show(struct device *dev,
         * For the test version we need to poll the "hardware" in order
         * to get the updated status for unlock testing.
         */
-       nvdimm->sec.state = nvdimm_security_state(nvdimm, false);
-       nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, true);
+       nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+       nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
 
        switch (nvdimm->sec.state) {
        case NVDIMM_SECURITY_DISABLED:
index f8d468f54e986dd00aa82ba1aa29370896f656ee..aaa1e9f083c372153836764e85feb7a358f0a273 100644 (file)
@@ -37,7 +37,7 @@ static int get_debugfs(char **path)
        struct libmnt_table *tb;
        struct libmnt_iter *itr = NULL;
        struct libmnt_fs *fs;
-       int found = 0;
+       int found = 0, ret;
 
        cxt = mnt_new_context();
        if (!cxt)
@@ -58,8 +58,11 @@ static int get_debugfs(char **path)
                        break;
                }
        }
-       if (found)
-               asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+       if (found) {
+               ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+               if (ret < 0)
+                       err(EXIT_FAILURE, "failed to format string");
+       }
 
        mnt_free_iter(itr);
        mnt_free_context(cxt);
index 23022e9d32eb816d9a9955d94f08078a6d5fdfd8..b52cfdefecbfe9f760b569baf757d2228fe0c6af 100644 (file)
@@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
         * already exist.
         */
        region = (struct userspace_mem_region *) userspace_mem_region_find(
-               vm, guest_paddr, guest_paddr + npages * vm->page_size);
+               vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
        if (region != NULL)
                TEST_ASSERT(false, "overlapping userspace_mem_region already "
                        "exists\n"
@@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                region = region->next) {
                if (region->region.slot == slot)
                        break;
-               if ((guest_paddr <= (region->region.guest_phys_addr
-                               + region->region.memory_size))
-                       && ((guest_paddr + npages * vm->page_size)
-                               >= region->region.guest_phys_addr))
-                       break;
        }
        if (region != NULL)
                TEST_ASSERT(false, "A mem region with the requested slot "
-                       "or overlapping physical memory range already exists.\n"
+                       "already exists.\n"
                        "  requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
                        "  existing slot: %u paddr: 0x%lx size: 0x%lx",
                        slot, guest_paddr, npages,
index ea3c73e8f4f6e7a97303e1801a674c12dc01219d..c49c2a28b0eb290ccd6c51498a0b9fd716b58b07 100644 (file)
@@ -103,6 +103,12 @@ int main(int argc, char *argv[])
 
        vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
 
+       /* KVM should return supported EVMCS version range */
+       TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
+                   (evmcs_ver & 0xff) > 0,
+                   "Incorrect EVMCS version range: %x:%x\n",
+                   evmcs_ver & 0xff, evmcs_ver >> 8);
+
        run = vcpu_state(vm, VCPU_ID);
 
        vcpu_regs_get(vm, VCPU_ID, &regs1);
index e20b017e70731c13a8128dbf8396ddfb2f861958..b2065536d40757eeac0487aff3a95c5fcec9467d 100644 (file)
@@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) {
 
        rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
        ASSERT_NE(-1, rc);
-       EXPECT_NE(0, rc);
+       ASSERT_NE(0, rc);
 
        /* Disable alarm interrupts */
        rc = ioctl(self->fd, RTC_AIE_OFF, 0);
        ASSERT_NE(-1, rc);
 
-       if (rc == 0)
-               return;
-
        rc = read(self->fd, &data, sizeof(unsigned long));
        ASSERT_NE(-1, rc);
        TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) {
 
        rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
        ASSERT_NE(-1, rc);
-       EXPECT_NE(0, rc);
+       ASSERT_NE(0, rc);
+
+       rc = read(self->fd, &data, sizeof(unsigned long));
+       ASSERT_NE(-1, rc);
+
+       rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+       ASSERT_NE(-1, rc);
+
+       new = timegm((struct tm *)&tm);
+       ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_alm_set_minute) {
+       struct timeval tv = { .tv_sec = 62 };
+       unsigned long data;
+       struct rtc_time tm;
+       fd_set readfds;
+       time_t secs, new;
+       int rc;
+
+       rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+       ASSERT_NE(-1, rc);
+
+       secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
+       gmtime_r(&secs, (struct tm *)&tm);
+
+       rc = ioctl(self->fd, RTC_ALM_SET, &tm);
+       if (rc == -1) {
+               ASSERT_EQ(EINVAL, errno);
+               TH_LOG("skip alarms are not supported.");
+               return;
+       }
+
+       rc = ioctl(self->fd, RTC_ALM_READ, &tm);
+       ASSERT_NE(-1, rc);
+
+       TH_LOG("Alarm time now set to %02d:%02d:%02d.",
+              tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+       /* Enable alarm interrupts */
+       rc = ioctl(self->fd, RTC_AIE_ON, 0);
+       ASSERT_NE(-1, rc);
+
+       FD_ZERO(&readfds);
+       FD_SET(self->fd, &readfds);
+
+       rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+       ASSERT_NE(-1, rc);
+       ASSERT_NE(0, rc);
+
+       /* Disable alarm interrupts */
+       rc = ioctl(self->fd, RTC_AIE_OFF, 0);
+       ASSERT_NE(-1, rc);
+
+       rc = read(self->fd, &data, sizeof(unsigned long));
+       ASSERT_NE(-1, rc);
+       TH_LOG("data: %lx", data);
+
+       rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+       ASSERT_NE(-1, rc);
+
+       new = timegm((struct tm *)&tm);
+       ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_wkalm_set_minute) {
+       struct timeval tv = { .tv_sec = 62 };
+       struct rtc_wkalrm alarm = { 0 };
+       struct rtc_time tm;
+       unsigned long data;
+       fd_set readfds;
+       time_t secs, new;
+       int rc;
+
+       rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
+       ASSERT_NE(-1, rc);
+
+       secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
+       gmtime_r(&secs, (struct tm *)&alarm.time);
+
+       alarm.enabled = 1;
+
+       rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
+       if (rc == -1) {
+               ASSERT_EQ(EINVAL, errno);
+               TH_LOG("skip alarms are not supported.");
+               return;
+       }
+
+       rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
+       ASSERT_NE(-1, rc);
+
+       TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
+              alarm.time.tm_mday, alarm.time.tm_mon + 1,
+              alarm.time.tm_year + 1900, alarm.time.tm_hour,
+              alarm.time.tm_min, alarm.time.tm_sec);
+
+       FD_ZERO(&readfds);
+       FD_SET(self->fd, &readfds);
+
+       rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+       ASSERT_NE(-1, rc);
+       ASSERT_NE(0, rc);
 
        rc = read(self->fd, &data, sizeof(unsigned long));
        ASSERT_NE(-1, rc);
index fce7f4ce069251a675c05f30f412c5361c4dbde3..1760b3e397306ce951498688692776a506e5fd10 100644 (file)
@@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark
 CFLAGS += -Wl,-no-as-needed -Wall
 
 seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
-       $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
+       $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
 
 TEST_PROGS += $(BINARIES)
 EXTRA_CLEAN := $(BINARIES)
index 067cb4607d6cd519793b4ea2d26a5ef96f446e89..496a9a8c773aba088f0e88a752a08614d3750ec7 100644 (file)
@@ -3044,7 +3044,7 @@ TEST(user_notification_basic)
        /* Check that the basic notification machinery works */
        listener = user_trap_syscall(__NR_getpid,
                                     SECCOMP_FILTER_FLAG_NEW_LISTENER);
-       EXPECT_GE(listener, 0);
+       ASSERT_GE(listener, 0);
 
        /* Installing a second listener in the chain should EBUSY */
        EXPECT_EQ(user_trap_syscall(__NR_getpid,
@@ -3103,7 +3103,7 @@ TEST(user_notification_kill_in_middle)
 
        listener = user_trap_syscall(__NR_getpid,
                                     SECCOMP_FILTER_FLAG_NEW_LISTENER);
-       EXPECT_GE(listener, 0);
+       ASSERT_GE(listener, 0);
 
        /*
         * Check that nothing bad happens when we kill the task in the middle
@@ -3152,7 +3152,7 @@ TEST(user_notification_signal)
 
        listener = user_trap_syscall(__NR_gettid,
                                     SECCOMP_FILTER_FLAG_NEW_LISTENER);
-       EXPECT_GE(listener, 0);
+       ASSERT_GE(listener, 0);
 
        pid = fork();
        ASSERT_GE(pid, 0);
@@ -3215,7 +3215,7 @@ TEST(user_notification_closed_listener)
 
        listener = user_trap_syscall(__NR_getpid,
                                     SECCOMP_FILTER_FLAG_NEW_LISTENER);
-       EXPECT_GE(listener, 0);
+       ASSERT_GE(listener, 0);
 
        /*
         * Check that we get an ENOSYS when the listener is closed.
@@ -3376,7 +3376,7 @@ TEST(seccomp_get_notif_sizes)
 {
        struct seccomp_notif_sizes sizes;
 
-       EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
+       ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
        EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
        EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
 }
index 880b96fc80d4cabea168a1fb4e9bc5e828c4b0d5..c0534e298b5128d9bc2800abb5b59d7d5379f75e 100644 (file)
@@ -25,6 +25,7 @@ struct gup_benchmark {
        __u64 size;
        __u32 nr_pages_per_call;
        __u32 flags;
+       __u64 expansion[10];    /* For future use */
 };
 
 int main(int argc, char **argv)
index 50f7e92724813a3525154ede4f2b282af7e5a839..bf1bb15b6fbe3e2831a302a01586de6605b242e2 100644 (file)
@@ -1503,7 +1503,7 @@ void check_mpx_insns_and_tables(void)
                exit(20);
        }
        if (successes != total_nr_tests) {
-               eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n",
+               eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n",
                                successes, total_nr_tests);
                exit(21);
        }
index 460b4bdf4c1edff9d5dfa0d451dbaa393d53b80c..5d546dcdbc805b93516e73ea1dd0757fd1449aff 100644 (file)
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
        pkey_assert(err);
 }
 
+void become_child(void)
+{
+       pid_t forkret;
+
+       forkret = fork();
+       pkey_assert(forkret >= 0);
+       dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+       if (!forkret) {
+               /* in the child */
+               return;
+       }
+       exit(0);
+}
+
 /* Assumes that all pkeys other than 'pkey' are unallocated */
 void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
 {
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
        int nr_allocated_pkeys = 0;
        int i;
 
-       for (i = 0; i < NR_PKEYS*2; i++) {
+       for (i = 0; i < NR_PKEYS*3; i++) {
                int new_pkey;
                dprintf1("%s() alloc loop: %d\n", __func__, i);
                new_pkey = alloc_pkey();
@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
                if ((new_pkey == -1) && (errno == ENOSPC)) {
                        dprintf2("%s() failed to allocate pkey after %d tries\n",
                                __func__, nr_allocated_pkeys);
-                       break;
+               } else {
+                       /*
+                        * Ensure the number of successes never
+                        * exceeds the number of keys supported
+                        * in the hardware.
+                        */
+                       pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+                       allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
                }
-               pkey_assert(nr_allocated_pkeys < NR_PKEYS);
-               allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+
+               /*
+                * Make sure that allocation state is properly
+                * preserved across fork().
+                */
+               if (i == NR_PKEYS*2)
+                       become_child();
        }
 
        dprintf3("%s()::%d\n", __func__, __LINE__);
 
-       /*
-        * ensure it did not reach the end of the loop without
-        * failure:
-        */
-       pkey_assert(i < NR_PKEYS*2);
-
        /*
         * There are 16 pkeys supported in hardware.  Three are
         * allocated by the time we get here:
index 00a26a82fa98b1eec4f82046b9a88517ceb8c7e4..97311333700e7154e19c86f5b5de29208332b429 100644 (file)
@@ -44,7 +44,6 @@ int main()
 #include <stdbool.h>
 #include <sys/ptrace.h>
 #include <sys/user.h>
-#include <sys/ucontext.h>
 #include <link.h>
 #include <sys/auxv.h>
 #include <dlfcn.h>