]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag '5.2-rc5-smb3-fixes' of git://git.samba.org/sfrench/cifs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Jun 2019 16:51:44 +0000 (09:51 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Jun 2019 16:51:44 +0000 (09:51 -0700)
Pull cifs fixes from Steve French:
 "Four small SMB3 fixes, all for stable"

* tag '5.2-rc5-smb3-fixes' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: fix GlobalMid_Lock bug in cifs_reconnect
  SMB3: retry on STATUS_INSUFFICIENT_RESOURCES instead of failing write
  cifs: add spinlock for the openFileList to cifsInodeInfo
  cifs: fix panic in smb2_reconnect

495 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-net-qmi
Documentation/arm64/sve.txt
Documentation/block/switching-sched.txt
Documentation/cgroup-v1/blkio-controller.txt
Documentation/cgroup-v1/hugetlb.txt
Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
Documentation/devicetree/bindings/riscv/cpus.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/riscv/sifive.yaml [new file with mode: 0644]
Documentation/networking/af_xdp.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/rds.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am335x-pcm-953.dtsi
arch/arm/boot/dts/am335x-wega.dtsi
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts
arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts
arch/arm/boot/dts/bcm4708-luxul-xap-1510.dts
arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
arch/arm/boot/dts/bcm4708-netgear-r6250.dts
arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
arch/arm/boot/dts/bcm4709-netgear-r7000.dts
arch/arm/boot/dts/bcm4709-netgear-r8000.dts
arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts
arch/arm/boot/dts/bcm47094-phicomm-k3.dts
arch/arm/boot/dts/bcm94708.dts
arch/arm/boot/dts/bcm94709.dts
arch/arm/boot/dts/bcm963138dvt.dts
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/dra71-evm.dts
arch/arm/boot/dts/dra71x.dtsi [new file with mode: 0644]
arch/arm/boot/dts/dra72x.dtsi
arch/arm/boot/dts/dra76x-mmc-iodelay.dtsi
arch/arm/boot/dts/dra76x.dtsi
arch/arm/configs/mvebu_v7_defconfig
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-imx/cpuidle-imx6sx.c
arch/arm/mach-ixp4xx/Kconfig
arch/arm/mach-ixp4xx/goramo_mlr.c
arch/arm/mach-ixp4xx/miccpt-pci.c
arch/arm/mach-ixp4xx/omixp-setup.c
arch/arm/mach-ixp4xx/vulcan-pci.c
arch/arm/mach-ixp4xx/vulcan-setup.c
arch/arm64/Kconfig.platforms
arch/arm64/Makefile
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/include/uapi/asm/sigcontext.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ssbd.c
arch/arm64/kvm/guest.c
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/btext.h
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/kernel/machine_kexec_32.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/mm/book3s64/pgtable.c
arch/powerpc/mm/pgtable.c
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit_comp64.c
arch/riscv/boot/dts/Makefile [new file with mode: 0644]
arch/riscv/boot/dts/sifive/Makefile [new file with mode: 0644]
arch/riscv/boot/dts/sifive/fu540-c000.dtsi [new file with mode: 0644]
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts [new file with mode: 0644]
arch/riscv/configs/defconfig
arch/riscv/include/asm/bitops.h
arch/riscv/kernel/reset.c
arch/riscv/lib/delay.c
arch/riscv/mm/fault.c
arch/riscv/net/bpf_jit_comp.c
arch/s390/Makefile
arch/s390/include/asm/ctl_reg.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/intel-family.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/kgdb.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmcs12.h
arch/x86/mm/kasan_init_64.c
arch/x86/mm/kaslr.c
arch/x86/net/bpf_jit_comp.c
block/Kconfig
block/bfq-cgroup.c
block/bio.c
block/blk-mq-debugfs.c
block/blk-mq-debugfs.h
block/blk-mq-sched.c
drivers/ata/libata-core.c
drivers/base/devres.c
drivers/block/null_blk_zoned.c
drivers/block/ps3vram.c
drivers/bus/ti-sysc.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/timer-ti-dm.c
drivers/dax/device.c
drivers/firmware/Kconfig
drivers/firmware/imx/imx-scu-irq.c
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi_vbt.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sdvo_regs.h
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/meson/meson_crtc.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_vclk.c
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/panfrost/Kconfig
drivers/gpu/drm/panfrost/panfrost_devfreq.c
drivers/hid/hid-a4tech.c
drivers/hid/hid-core.c
drivers/hid/hid-hyperv.c
drivers/hid/hid-ids.h
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-rmi.c
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
drivers/hid/wacom_wac.c
drivers/i2c/busses/i2c-acorn.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/input/keyboard/imx_keypad.c
drivers/input/misc/uinput.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/silead.c
drivers/iommu/arm-smmu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.c
drivers/iommu/iommu.c
drivers/irqchip/irq-ixp4xx.c
drivers/md/bcache/bset.c
drivers/md/bcache/bset.h
drivers/md/bcache/sysfs.c
drivers/md/md.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/platform/qcom/venus/hfi_helper.h
drivers/mmc/core/core.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_io.c
drivers/mmc/core/sdio_irq.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/net/can/flexcan.c
drivers/net/can/m_can/m_can.c
drivers/net/can/spi/Kconfig
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/Kconfig
drivers/net/can/xilinx_can.c
drivers/net/dsa/Makefile
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/realtek-smi-core.c [moved from drivers/net/dsa/realtek-smi.c with 99% similarity]
drivers/net/dsa/realtek-smi-core.h [moved from drivers/net/dsa/realtek-smi.h with 100% similarity]
drivers/net/dsa/rtl8366.c
drivers/net/dsa/rtl8366rb.c
drivers/net/ethernet/8390/Kconfig
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/geneve.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/ax88796b.c [moved from drivers/net/phy/asix.c with 100% similarity]
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/ie.c
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/realtek/rtw88/fw.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/phy.c
drivers/net/wireless/rsi/rsi_91x_sdio.c
drivers/nvdimm/pmem.c
drivers/pci/p2pdma.c
drivers/pci/pci-driver.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/mlx-platform.c
drivers/ras/cec.c
drivers/regulator/tps6507x-regulator.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa_cmd.h
drivers/soc/bcm/brcmstb/biuctrl.c
drivers/soc/ixp4xx/ixp4xx-npe.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/gadget/udc/fusb300_udc.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/storage/unusual_realtek.h
drivers/usb/typec/bus.c
drivers/usb/typec/ucsi/ucsi_ccg.c
drivers/vfio/mdev/mdev_core.c
drivers/vfio/mdev/mdev_private.h
drivers/vfio/mdev/mdev_sysfs.c
drivers/xen/swiotlb-xen.c
fs/btrfs/extent-tree.c
fs/btrfs/ioctl.c
fs/btrfs/reada.c
fs/btrfs/xattr.c
fs/fuse/dev.c
fs/gfs2/bmap.c
fs/io_uring.c
fs/iomap.c
fs/namespace.c
fs/notify/fanotify/fanotify.c
fs/notify/mark.c
fs/ocfs2/dcache.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/pnode.c
fs/quota/dquot.c
fs/xfs/xfs_aops.c
include/drm/drm_edid.h
include/linux/bio.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/cpuhotplug.h
include/linux/device.h
include/linux/fsnotify_backend.h
include/linux/genalloc.h
include/linux/memcontrol.h
include/linux/memremap.h
include/linux/mmc/host.h
include/linux/mmc/sdio_func.h
include/linux/phylink.h
include/linux/sched/mm.h
include/linux/skmsg.h
include/linux/sysctl.h
include/linux/tcp.h
include/net/addrconf.h
include/net/cfg80211.h
include/net/flow_dissector.h
include/net/netns/ipv4.h
include/net/sock.h
include/net/tcp.h
include/sound/sof/dai.h
include/sound/sof/header.h
include/sound/sof/info.h
include/sound/sof/xtensa.h
include/uapi/linux/bpf.h
include/uapi/linux/snmp.h
include/uapi/sound/sof/abi.h
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/lpm_trie.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/cred.c
kernel/exit.c
kernel/livepatch/core.c
kernel/memremap.c
kernel/ptrace.c
kernel/sysctl.c
kernel/time/timekeeping.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_output.c
kernel/trace/trace_uprobe.c
lib/genalloc.c
lib/test_stackinit.c
mm/hmm.c
mm/khugepaged.c
mm/list_lru.c
mm/memcontrol.c
mm/mlock.c
mm/mmu_gather.c
mm/vmalloc.c
mm/vmscan.c
net/ax25/ax25_route.c
net/can/af_can.c
net/core/bpf_sk_storage.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/neighbour.c
net/core/skbuff.c
net/core/sock.c
net/core/sysctl_net_core.c
net/ipv4/fib_semantics.c
net/ipv4/ip_output.c
net/ipv4/proc.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/icmp.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/reassembly.c
net/ipv6/udp.c
net/lapb/lapb_iface.c
net/mac80211/ieee80211_i.h
net/mac80211/key.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tdls.c
net/mac80211/util.c
net/mac80211/wpa.c
net/mpls/Kconfig
net/mpls/mpls_iptunnel.c
net/nfc/netlink.c
net/openvswitch/vport-internal_dev.c
net/sched/cls_flower.c
net/sctp/sm_make_chunk.c
net/tipc/group.c
net/tls/tls_sw.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/Makefile
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/pmsr.c
net/wireless/scan.c
net/wireless/util.c
net/xdp/xdp_umem.c
samples/bpf/bpf_load.c
samples/bpf/task_fd_query_user.c
scripts/decode_stacktrace.sh
security/apparmor/include/policy.h
security/apparmor/policy_unpack.c
security/selinux/avc.c
security/selinux/hooks.c
security/smack/smack_lsm.c
sound/firewire/motu/motu-stream.c
sound/firewire/oxfw/oxfw.c
sound/hda/ext/hdac_ext_bus.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ews.c
sound/soc/codecs/ak4458.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/cs42xx8.c
sound/soc/codecs/max98090.c
sound/soc/codecs/rt274.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/rt5677-spi.c
sound/soc/fsl/fsl_asrc.c
sound/soc/intel/atom/sst/sst_pvt.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/boards/cht_bsw_nau8824.c
sound/soc/intel/boards/cht_bsw_rt5672.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/intel/common/soc-acpi-intel-byt-match.c
sound/soc/intel/common/soc-acpi-intel-cnl-match.c
sound/soc/mediatek/Kconfig
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/sof/Kconfig
sound/soc/sof/control.c
sound/soc/sof/core.c
sound/soc/sof/intel/bdw.c
sound/soc/sof/intel/byt.c
sound/soc/sof/intel/cnl.c
sound/soc/sof/intel/hda-ctrl.c
sound/soc/sof/intel/hda-ipc.c
sound/soc/sof/intel/hda.c
sound/soc/sof/ipc.c
sound/soc/sof/loader.c
sound/soc/sof/pcm.c
sound/soc/sof/xtensa/core.c
sound/soc/sunxi/sun4i-codec.c
sound/soc/sunxi/sun4i-i2s.c
tools/arch/x86/include/uapi/asm/kvm.h
tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
tools/bpf/bpftool/Documentation/bpftool-prog.rst
tools/bpf/bpftool/bash-completion/bpftool
tools/bpf/bpftool/cgroup.c
tools/bpf/bpftool/map.c
tools/bpf/bpftool/prog.c
tools/include/uapi/linux/bpf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf_internal.h
tools/lib/bpf/libbpf_probes.c
tools/testing/nvdimm/test/iomap.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/test_lpm_map.c
tools/testing/selftests/bpf/test_section_names.c
tools/testing/selftests/bpf/test_sock_addr.c
tools/testing/selftests/bpf/verifier/div_overflow.c
tools/testing/selftests/bpf/verifier/subreg.c [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
tools/testing/selftests/net/forwarding/tc_flower.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic/vgic-its.c

index 07a777f9d687eb8a9bc74b4aa53c080af8f3fccb..0fef932de3dba5e696e51dce43ffdd5283077639 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -81,6 +81,7 @@ Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
 Henrik Rydberg <rydberg@bitmath.org>
@@ -238,6 +239,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Will Deacon <will@kernel.org> <will.deacon@arm.com>
 Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index 7122d6264c49d6c02c2c0074e145f19b93fc63dd..c310db4ccbc2eeb2aa7250438fd9f7b308b8fdbd 100644 (file)
@@ -29,7 +29,7 @@ Contact:      Bjørn Mork <bjorn@mork.no>
 Description:
                Unsigned integer.
 
-               Write a number ranging from 1 to 127 to add a qmap mux
+               Write a number ranging from 1 to 254 to add a qmap mux
                based network device, supported by recent Qualcomm based
                modems.
 
@@ -46,5 +46,5 @@ Contact:      Bjørn Mork <bjorn@mork.no>
 Description:
                Unsigned integer.
 
-               Write a number ranging from 1 to 127 to delete a previously
+               Write a number ranging from 1 to 254 to delete a previously
                created qmap mux based network device.
index 9940e924a47ed4cf378d083c76b4820e1cafd68d..5689fc9a976afa6c352d5b033342f223879cbcb2 100644 (file)
@@ -56,6 +56,18 @@ model features for SVE is included in Appendix A.
   is to connect to a target process first and then attempt a
   ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
 
+* Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory
+  between userspace and the kernel, the register value is encoded in memory in
+  an endianness-invariant layout, with bits [(8 * i + 7) : (8 * i)] encoded at
+  byte offset i from the start of the memory representation.  This affects for
+  example the signal frame (struct sve_context) and ptrace interface
+  (struct user_sve_header) and associated data.
+
+  Beware that on big-endian systems this results in a different byte order than
+  for the FPSIMD V-registers, which are stored as single host-endian 128-bit
+  values, with bits [(127 - 8 * i) : (120 - 8 * i)] of the register encoded at
+  byte offset i.  (struct fpsimd_context, struct user_fpsimd_state).
+
 
 2.  Vector length terminology
 -----------------------------
@@ -124,6 +136,10 @@ the SVE instruction set architecture.
   size and layout.  Macros SVE_SIG_* are defined [1] to facilitate access to
   the members.
 
+* Each scalable register (Zn, Pn, FFR) is stored in an endianness-invariant
+  layout, with bits [(8 * i + 7) : (8 * i)] stored at byte offset i from the
+  start of the register's representation in memory.
+
 * If the SVE context is too big to fit in sigcontext.__reserved[], then extra
   space is allocated on the stack, an extra_context record is written in
   __reserved[] referencing this space.  sve_context is then written in the
index 3b2612e342f184664d0bb9d5a4cb941f21326825..7977f6fb8b2056bf7707c41fb6909e7e40fb5e91 100644 (file)
@@ -13,11 +13,9 @@ you can do so by typing:
 
 # mount none /sys -t sysfs
 
-As of the Linux 2.6.10 kernel, it is now possible to change the
-IO scheduler for a given block device on the fly (thus making it possible,
-for instance, to set the CFQ scheduler for the system default, but
-set a specific device to use the deadline or noop schedulers - which
-can improve that device's throughput).
+It is possible to change the IO scheduler for a given block device on
+the fly to select one of mq-deadline, none, bfq, or kyber schedulers -
+which can improve that device's throughput.
 
 To set a specific scheduler, simply do this:
 
@@ -30,8 +28,8 @@ The list of defined schedulers can be found by simply doing
 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
 will be displayed, with the currently selected scheduler in brackets:
 
-# cat /sys/block/hda/queue/scheduler
-noop deadline [cfq]
-# echo deadline > /sys/block/hda/queue/scheduler
-# cat /sys/block/hda/queue/scheduler
-noop [deadline] cfq
+# cat /sys/block/sda/queue/scheduler
+[mq-deadline] kyber bfq none
+# echo none >/sys/block/sda/queue/scheduler
+# cat /sys/block/sda/queue/scheduler
+[none] mq-deadline kyber bfq
index 673dc34d3f7812c5030133821e81d9772f8aa546..d1a1b7bdd03add6eb4a0e84b20420ebdaea9dbaf 100644 (file)
@@ -8,61 +8,13 @@ both at leaf nodes as well as at intermediate nodes in a storage hierarchy.
 Plan is to use the same cgroup based management interface for blkio controller
 and based on user options switch IO policies in the background.
 
-Currently two IO control policies are implemented. First one is proportional
-weight time based division of disk policy. It is implemented in CFQ. Hence
-this policy takes effect only on leaf nodes when CFQ is being used. The second
-one is throttling policy which can be used to specify upper IO rate limits
-on devices. This policy is implemented in generic block layer and can be
-used on leaf nodes as well as higher level logical devices like device mapper.
+One IO control policy is throttling policy which can be used to
+specify upper IO rate limits on devices. This policy is implemented in
+generic block layer and can be used on leaf nodes as well as higher
+level logical devices like device mapper.
 
 HOWTO
 =====
-Proportional Weight division of bandwidth
------------------------------------------
-You can do a very simple testing of running two dd threads in two different
-cgroups. Here is what you can do.
-
-- Enable Block IO controller
-       CONFIG_BLK_CGROUP=y
-
-- Enable group scheduling in CFQ
-       CONFIG_CFQ_GROUP_IOSCHED=y
-
-- Compile and boot into kernel and mount IO controller (blkio); see
-  cgroups.txt, Why are cgroups needed?.
-
-       mount -t tmpfs cgroup_root /sys/fs/cgroup
-       mkdir /sys/fs/cgroup/blkio
-       mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
-
-- Create two cgroups
-       mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
-
-- Set weights of group test1 and test2
-       echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
-       echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
-
-- Create two same size files (say 512MB each) on same disk (file1, file2) and
-  launch two dd threads in different cgroup to read those files.
-
-       sync
-       echo 3 > /proc/sys/vm/drop_caches
-
-       dd if=/mnt/sdb/zerofile1 of=/dev/null &
-       echo $! > /sys/fs/cgroup/blkio/test1/tasks
-       cat /sys/fs/cgroup/blkio/test1/tasks
-
-       dd if=/mnt/sdb/zerofile2 of=/dev/null &
-       echo $! > /sys/fs/cgroup/blkio/test2/tasks
-       cat /sys/fs/cgroup/blkio/test2/tasks
-
-- At macro level, first dd should finish first. To get more precise data, keep
-  on looking at (with the help of script), at blkio.disk_time and
-  blkio.disk_sectors files of both test1 and test2 groups. This will tell how
-  much disk time (in milliseconds), each group got and how many sectors each
-  group dispatched to the disk. We provide fairness in terms of disk time, so
-  ideally io.disk_time of cgroups should be in proportion to the weight.
-
 Throttling/Upper Limit policy
 -----------------------------
 - Enable Block IO controller
@@ -94,7 +46,7 @@ Throttling/Upper Limit policy
 Hierarchical Cgroups
 ====================
 
-Both CFQ and throttling implement hierarchy support; however,
+Throttling implements hierarchy support; however,
 throttling's hierarchy support is enabled iff "sane_behavior" is
 enabled from cgroup side, which currently is a development option and
 not publicly available.
@@ -107,9 +59,8 @@ If somebody created a hierarchy like as follows.
                        |
                     test3
 
-CFQ by default and throttling with "sane_behavior" will handle the
-hierarchy correctly.  For details on CFQ hierarchy support, refer to
-Documentation/block/cfq-iosched.txt.  For throttling, all limits apply
+Throttling with "sane_behavior" will handle the
+hierarchy correctly. For throttling, all limits apply
 to the whole subtree while all statistics are local to the IOs
 directly generated by tasks in that cgroup.
 
@@ -130,10 +81,6 @@ CONFIG_DEBUG_BLK_CGROUP
        - Debug help. Right now some additional stats file show up in cgroup
          if this option is enabled.
 
-CONFIG_CFQ_GROUP_IOSCHED
-       - Enables group scheduling in CFQ. Currently only 1 level of group
-         creation is allowed.
-
 CONFIG_BLK_DEV_THROTTLING
        - Enable block device throttling support in block layer.
 
@@ -344,32 +291,3 @@ Common files among various policies
 - blkio.reset_stats
        - Writing an int to this file will result in resetting all the stats
          for that cgroup.
-
-CFQ sysfs tunable
-=================
-/sys/block/<disk>/queue/iosched/slice_idle
-------------------------------------------
-On a faster hardware CFQ can be slow, especially with sequential workload.
-This happens because CFQ idles on a single queue and single queue might not
-drive deeper request queue depths to keep the storage busy. In such scenarios
-one can try setting slice_idle=0 and that would switch CFQ to IOPS
-(IO operations per second) mode on NCQ supporting hardware.
-
-That means CFQ will not idle between cfq queues of a cfq group and hence be
-able to driver higher queue depth and achieve better throughput. That also
-means that cfq provides fairness among groups in terms of IOPS and not in
-terms of disk time.
-
-/sys/block/<disk>/queue/iosched/group_idle
-------------------------------------------
-If one disables idling on individual cfq queues and cfq service trees by
-setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
-on the group in an attempt to provide fairness among groups.
-
-By default group_idle is same as slice_idle and does not do anything if
-slice_idle is enabled.
-
-One can experience an overall throughput drop if you have created multiple
-groups and put applications in that group which are not driving enough
-IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
-on individual groups and throughput should improve.
index 106245c3aecc3f32b465196c1fe6ae767ca9ac10..1260e5369b9b5309806fb83702871076bdf66989 100644 (file)
@@ -32,14 +32,18 @@ Brief summary of control files
  hugetlb.<hugepagesize>.usage_in_bytes     # show current usage for "hugepagesize" hugetlb
  hugetlb.<hugepagesize>.failcnt                   # show the number of allocation failure due to HugeTLB limit
 
-For a system supporting two hugepage size (16M and 16G) the control
+For a system supporting three hugepage sizes (64k, 32M and 1G), the control
 files include:
 
-hugetlb.16GB.limit_in_bytes
-hugetlb.16GB.max_usage_in_bytes
-hugetlb.16GB.usage_in_bytes
-hugetlb.16GB.failcnt
-hugetlb.16MB.limit_in_bytes
-hugetlb.16MB.max_usage_in_bytes
-hugetlb.16MB.usage_in_bytes
-hugetlb.16MB.failcnt
+hugetlb.1GB.limit_in_bytes
+hugetlb.1GB.max_usage_in_bytes
+hugetlb.1GB.usage_in_bytes
+hugetlb.1GB.failcnt
+hugetlb.64KB.limit_in_bytes
+hugetlb.64KB.max_usage_in_bytes
+hugetlb.64KB.usage_in_bytes
+hugetlb.64KB.failcnt
+hugetlb.32MB.limit_in_bytes
+hugetlb.32MB.max_usage_in_bytes
+hugetlb.32MB.usage_in_bytes
+hugetlb.32MB.failcnt
index 188c8bd4eb67709bdc05bc1014fe86d692256be4..5a0111d4de58c2e546ff257708c3ae5c07cf08e1 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
  - compatible: Should be one of the following:
    - "microchip,mcp2510" for MCP2510.
    - "microchip,mcp2515" for MCP2515.
+   - "microchip,mcp25625" for MCP25625.
  - reg: SPI chip select.
  - clocks: The clock feeding the CAN controller.
  - interrupts: Should contain IRQ line for the CAN controller.
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
new file mode 100644 (file)
index 0000000..27f02ec
--- /dev/null
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/riscv/cpus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RISC-V bindings for 'cpus' DT nodes
+
+maintainers:
+  - Paul Walmsley <paul.walmsley@sifive.com>
+  - Palmer Dabbelt <palmer@sifive.com>
+
+allOf:
+  - $ref: /schemas/cpus.yaml#
+
+properties:
+  $nodename:
+    const: cpus
+    description: Container of cpu nodes
+
+  '#address-cells':
+    const: 1
+    description: |
+      A single unsigned 32-bit integer uniquely identifies each RISC-V
+      hart in a system.  (See the "reg" node under the "cpu" node,
+      below).
+
+  '#size-cells':
+    const: 0
+
+patternProperties:
+  '^cpu@[0-9a-f]+$':
+    properties:
+      compatible:
+        type: array
+        items:
+          - enum:
+              - sifive,rocket0
+              - sifive,e5
+              - sifive,e51
+              - sifive,u54-mc
+              - sifive,u54
+              - sifive,u5
+          - const: riscv
+        description:
+          Identifies that the hart uses the RISC-V instruction set
+          and identifies the type of the hart.
+
+      mmu-type:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/string"
+          - enum:
+              - riscv,sv32
+              - riscv,sv39
+              - riscv,sv48
+        description:
+          Identifies the MMU address translation mode used on this
+          hart.  These values originate from the RISC-V Privileged
+          Specification document, available from
+          https://riscv.org/specifications/
+
+      riscv,isa:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/string"
+          - enum:
+              - rv64imac
+              - rv64imafdc
+        description:
+          Identifies the specific RISC-V instruction set architecture
+          supported by the hart.  These are documented in the RISC-V
+          User-Level ISA document, available from
+          https://riscv.org/specifications/
+
+      timebase-frequency:
+        type: integer
+        minimum: 1
+        description:
+          Specifies the clock frequency of the system timer in Hz.
+          This value is common to all harts on a single system image.
+
+      interrupt-controller:
+        type: object
+        description: Describes the CPU's local interrupt controller
+
+        properties:
+          '#interrupt-cells':
+            const: 1
+
+          compatible:
+            const: riscv,cpu-intc
+
+          interrupt-controller: true
+
+        required:
+          - '#interrupt-cells'
+          - compatible
+          - interrupt-controller
+
+    required:
+      - riscv,isa
+      - timebase-frequency
+      - interrupt-controller
+
+examples:
+  - |
+    // Example 1: SiFive Freedom U540G Development Kit
+    cpus {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        timebase-frequency = <1000000>;
+        cpu@0 {
+                clock-frequency = <0>;
+                compatible = "sifive,rocket0", "riscv";
+                device_type = "cpu";
+                i-cache-block-size = <64>;
+                i-cache-sets = <128>;
+                i-cache-size = <16384>;
+                reg = <0>;
+                riscv,isa = "rv64imac";
+                cpu_intc0: interrupt-controller {
+                        #interrupt-cells = <1>;
+                        compatible = "riscv,cpu-intc";
+                        interrupt-controller;
+                };
+        };
+        cpu@1 {
+                clock-frequency = <0>;
+                compatible = "sifive,rocket0", "riscv";
+                d-cache-block-size = <64>;
+                d-cache-sets = <64>;
+                d-cache-size = <32768>;
+                d-tlb-sets = <1>;
+                d-tlb-size = <32>;
+                device_type = "cpu";
+                i-cache-block-size = <64>;
+                i-cache-sets = <64>;
+                i-cache-size = <32768>;
+                i-tlb-sets = <1>;
+                i-tlb-size = <32>;
+                mmu-type = "riscv,sv39";
+                reg = <1>;
+                riscv,isa = "rv64imafdc";
+                tlb-split;
+                cpu_intc1: interrupt-controller {
+                        #interrupt-cells = <1>;
+                        compatible = "riscv,cpu-intc";
+                        interrupt-controller;
+                };
+        };
+    };
+
+  - |
+    // Example 2: Spike ISA Simulator with 1 Hart
+    cpus {
+            cpu@0 {
+                    device_type = "cpu";
+                    reg = <0>;
+                    compatible = "riscv";
+                    riscv,isa = "rv64imafdc";
+                    mmu-type = "riscv,sv48";
+                    interrupt-controller {
+                            #interrupt-cells = <1>;
+                            interrupt-controller;
+                            compatible = "riscv,cpu-intc";
+                    };
+            };
+    };
+...
diff --git a/Documentation/devicetree/bindings/riscv/sifive.yaml b/Documentation/devicetree/bindings/riscv/sifive.yaml
new file mode 100644 (file)
index 0000000..9d17dc2
--- /dev/null
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/riscv/sifive.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive SoC-based boards
+
+maintainers:
+  - Paul Walmsley <paul.walmsley@sifive.com>
+  - Palmer Dabbelt <palmer@sifive.com>
+
+description:
+  SiFive SoC-based boards
+
+properties:
+  $nodename:
+    const: '/'
+  compatible:
+    items:
+      - enum:
+          - sifive,freedom-unleashed-a00
+      - const: sifive,fu540-c000
+      - const: sifive,fu540
+...
index e14d7d40fc75d0efca88a5aba77a4965ba93ff86..50bccbf68308594109e22dfeca6d91b48883e674 100644 (file)
@@ -316,16 +316,16 @@ A: When a netdev of a physical NIC is initialized, Linux usually
    all the traffic, you can force the netdev to only have 1 queue, queue
    id 0, and then bind to queue 0. You can use ethtool to do this::
 
-   sudo ethtool -L <interface> combined 1
+     sudo ethtool -L <interface> combined 1
 
    If you want to only see part of the traffic, you can program the
    NIC through ethtool to filter out your traffic to a single queue id
    that you can bind your XDP socket to. Here is one example in which
    UDP traffic to and from port 4242 are sent to queue 2::
 
-   sudo ethtool -N <interface> rx-flow-hash udp4 fn
-   sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
-   4242 action 2
+     sudo ethtool -N <interface> rx-flow-hash udp4 fn
+     sudo ethtool -N <interface> flow-type udp4 src-port 4242 dst-port \
+     4242 action 2
 
    A number of other ways are possible all up to the capabilitites of
    the NIC you have.
index 14fe93049d28e965d7349b03c5c8782c3d386e7d..22f6b8b1110ad20c36e7ceea6d67fd2cc938eb7b 100644 (file)
@@ -255,6 +255,14 @@ tcp_base_mss - INTEGER
        Path MTU discovery (MTU probing).  If MTU probing is enabled,
        this is the initial MSS used by the connection.
 
+tcp_min_snd_mss - INTEGER
+       TCP SYN and SYNACK messages usually advertise an ADVMSS option,
+       as described in RFC 1122 and RFC 6691.
+       If this ADVMSS option is smaller than tcp_min_snd_mss,
+       it is silently capped to tcp_min_snd_mss.
+
+       Default : 48 (at least 8 bytes of payload per segment)
+
 tcp_congestion_control - STRING
        Set the congestion control algorithm to be used for new
        connections. The algorithm "reno" is always available, but
@@ -772,6 +780,14 @@ tcp_challenge_ack_limit - INTEGER
        in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
        Default: 100
 
+tcp_rx_skb_cache - BOOLEAN
+       Controls a per TCP socket cache of one skb, that might help
+       performance of some workloads. This might be dangerous
+       on systems with a lot of TCP sockets, since it increases
+       memory usage.
+
+       Default: 0 (disabled)
+
 UDP variables:
 
 udp_l3mdev_accept - BOOLEAN
index 0235ae69af2a862938e550e61453b4fe81d7b2d3..f2a0147c933d1b7be6f9b09de38de2e8994e6e2f 100644 (file)
@@ -389,7 +389,7 @@ Multipath RDS (mprds)
   a common (to all paths) part, and a per-path struct rds_conn_path. All
   I/O workqs and reconnect threads are driven from the rds_conn_path.
   Transports such as TCP that are multipath capable may then set up a
-  TPC socket per rds_conn_path, and this is managed by the transport via
+  TCP socket per rds_conn_path, and this is managed by the transport via
   the transport privatee cp_transport_data pointer.
 
   Transports announce themselves as multipath capable by setting the
index ba6c42c576ddd9a8c0a9c6f58ba038db5a8fdb46..2a4531bb06bde708d1851072fa2023ccb11496b5 100644 (file)
@@ -1079,7 +1079,7 @@ yet and must be cleared on entry.
 
 4.35 KVM_SET_USER_MEMORY_REGION
 
-Capability: KVM_CAP_USER_MEM
+Capability: KVM_CAP_USER_MEMORY
 Architectures: all
 Type: vm ioctl
 Parameters: struct kvm_userspace_memory_region (in)
@@ -3857,43 +3857,59 @@ Type: vcpu ioctl
 Parameters: struct kvm_nested_state (in/out)
 Returns: 0 on success, -1 on error
 Errors:
-  E2BIG:     the total state size (including the fixed-size part of struct
-             kvm_nested_state) exceeds the value of 'size' specified by
+  E2BIG:     the total state size exceeds the value of 'size' specified by
              the user; the size required will be written into size.
 
 struct kvm_nested_state {
        __u16 flags;
        __u16 format;
        __u32 size;
+
        union {
-               struct kvm_vmx_nested_state vmx;
-               struct kvm_svm_nested_state svm;
+               struct kvm_vmx_nested_state_hdr vmx;
+               struct kvm_svm_nested_state_hdr svm;
+
+               /* Pad the header to 128 bytes.  */
                __u8 pad[120];
-       };
-       __u8 data[0];
+       } hdr;
+
+       union {
+               struct kvm_vmx_nested_state_data vmx[0];
+               struct kvm_svm_nested_state_data svm[0];
+       } data;
 };
 
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
+#define KVM_STATE_NESTED_EVMCS         0x00000004
 
-#define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
-#define KVM_STATE_NESTED_SMM_VMXON     0x00000002
+#define KVM_STATE_NESTED_FORMAT_VMX            0
+#define KVM_STATE_NESTED_FORMAT_SVM            1
 
-struct kvm_vmx_nested_state {
+#define KVM_STATE_NESTED_VMX_VMCS_SIZE         0x1000
+
+#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE    0x00000001
+#define KVM_STATE_NESTED_VMX_SMM_VMXON         0x00000002
+
+struct kvm_vmx_nested_state_hdr {
        __u64 vmxon_pa;
-       __u64 vmcs_pa;
+       __u64 vmcs12_pa;
 
        struct {
                __u16 flags;
        } smm;
 };
 
+struct kvm_vmx_nested_state_data {
+       __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+       __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+};
+
 This ioctl copies the vcpu's nested virtualization state from the kernel to
 userspace.
 
-The maximum size of the state, including the fixed-size part of struct
-kvm_nested_state, can be retrieved by passing KVM_CAP_NESTED_STATE to
-the KVM_CHECK_EXTENSION ioctl().
+The maximum size of the state can be retrieved by passing KVM_CAP_NESTED_STATE
+to the KVM_CHECK_EXTENSION ioctl().
 
 4.115 KVM_SET_NESTED_STATE
 
@@ -3903,8 +3919,8 @@ Type: vcpu ioctl
 Parameters: struct kvm_nested_state (in)
 Returns: 0 on success, -1 on error
 
-This copies the vcpu's kvm_nested_state struct from userspace to the kernel.  For
-the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
+This copies the vcpu's kvm_nested_state struct from userspace to the kernel.
+For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
 
 4.116 KVM_(UN)REGISTER_COALESCED_MMIO
 
index 57f496cff9997358f6626d2207f09cacd2f0e237..46d3497af51febd374cea03ca771ce377b35a410 100644 (file)
@@ -364,7 +364,7 @@ F:  drivers/acpi/fan.c
 
 ACPI FOR ARM64 (ACPI/arm64)
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-M:     Hanjun Guo <hanjun.guo@linaro.org>
+M:     Hanjun Guo <guohanjun@huawei.com>
 M:     Sudeep Holla <sudeep.holla@arm.com>
 L:     linux-acpi@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1235,7 +1235,7 @@ F:        arch/arm/lib/floppydma.S
 F:     arch/arm/include/asm/floppy.h
 
 ARM PMU PROFILING AND DEBUGGING
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 M:     Mark Rutland <mark.rutland@arm.com>
 S:     Maintained
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1307,7 +1307,7 @@ F:        Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt
 F:     drivers/irqchip/irq-vic.c
 
 ARM SMMU DRIVERS
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 R:     Robin Murphy <robin.murphy@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -2085,7 +2085,7 @@ F:        drivers/tty/serial/msm_serial.c
 F:     drivers/usb/dwc3/dwc3-qcom.c
 F:     include/dt-bindings/*/qcom*
 F:     include/linux/*/qcom*
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
 
 ARM/RADISYS ENP2611 MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
@@ -2550,7 +2550,7 @@ F:        drivers/i2c/busses/i2c-xiic.c
 
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
 S:     Maintained
@@ -2734,7 +2734,7 @@ S:        Maintained
 F:     drivers/net/wireless/atmel/atmel*
 
 ATOMIC INFRASTRUCTURE
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 M:     Peter Zijlstra <peterz@infradead.org>
 R:     Boqun Feng <boqun.feng@gmail.com>
 L:     linux-kernel@vger.kernel.org
@@ -3121,7 +3121,7 @@ F:        arch/arm/mach-bcm/
 
 BROADCOM BCM2835 ARM ARCHITECTURE
 M:     Eric Anholt <eric@anholt.net>
-M:     Stefan Wahren <stefan.wahren@i2se.com>
+M:     Stefan Wahren <wahrenst@gmx.net>
 L:     linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/anholt/linux
@@ -8566,7 +8566,7 @@ S:        Odd Fixes
 
 KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
 M:     "J. Bruce Fields" <bfields@fieldses.org>
-M:     Jeff Layton <jlayton@kernel.org>
+M:     Chuck Lever <chuck.lever@oracle.com>
 L:     linux-nfs@vger.kernel.org
 W:     http://nfs.sourceforge.net/
 T:     git git://linux-nfs.org/~bfields/linux.git
@@ -9121,7 +9121,7 @@ F:        drivers/misc/lkdtm/*
 LINUX KERNEL MEMORY CONSISTENCY MODEL (LKMM)
 M:     Alan Stern <stern@rowland.harvard.edu>
 M:     Andrea Parri <andrea.parri@amarulasolutions.com>
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Boqun Feng <boqun.feng@gmail.com>
 M:     Nicholas Piggin <npiggin@gmail.com>
@@ -9229,7 +9229,7 @@ F:        Documentation/admin-guide/LSM/LoadPin.rst
 LOCKING PRIMITIVES
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
 S:     Maintained
@@ -10550,7 +10550,7 @@ F:      arch/arm/boot/dts/mmp*
 F:     arch/arm/mach-mmp/
 
 MMU GATHER AND TLB INVALIDATION
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 M:     "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
 M:     Andrew Morton <akpm@linux-foundation.org>
 M:     Nick Piggin <npiggin@gmail.com>
@@ -12039,7 +12039,7 @@ S:      Maintained
 F:     drivers/pci/controller/dwc/*layerscape*
 
 PCI DRIVER FOR GENERIC OF HOSTS
-M:     Will Deacon <will.deacon@arm.com>
+M:     Will Deacon <will@kernel.org>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -14335,6 +14335,15 @@ S:     Supported
 K:     sifive
 N:     sifive
 
+SIFIVE FU540 SYSTEM-ON-CHIP
+M:     Paul Walmsley <paul.walmsley@sifive.com>
+M:     Palmer Dabbelt <palmer@sifive.com>
+L:     linux-riscv@lists.infradead.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
+S:     Supported
+K:     fu540
+N:     fu540
+
 SILEAD TOUCHSCREEN DRIVER
 M:     Hans de Goede <hdegoede@redhat.com>
 L:     linux-input@vger.kernel.org
index b81e1726125077a2c1bf2d4141e65596f2510528..9514dac2660a914dcaf5d0c7f6c10140c7a23df9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Golden Lions
 
 # *DOCUMENTATION*
index baceaa7bb33b133731bdab2c55aea0fa30741678..20a3d982769234d982234e3ffe5d390bd443d0cd 100644 (file)
@@ -197,7 +197,7 @@ &mmc1 {
        bus-width = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
-       cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index b7d28a20341fb5d0494d55cd5a261f2c20d87a2f..84581fed3d060e07852a1dfc8081cd0fc7052e6e 100644 (file)
@@ -157,7 +157,7 @@ &mmc1 {
        bus-width = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
-       cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index ca6d9f02a800c8a0e042d43280fa762ebce6fef4..ced1a19d5f8982f11c12986dce5ea4d81decec3e 100644 (file)
@@ -1759,11 +1759,10 @@ target-module@b0000 {                   /* 0x481b0000, ap 58 50.0 */
                target-module@cc000 {                   /* 0x481cc000, ap 60 46.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
                        ti,hwmods = "d_can0";
-                       reg = <0xcc000 0x4>;
-                       reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
-                       clock-names = "fck";
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
+                                <&dcan0_fck>;
+                       clock-names = "fck", "osc";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0xcc000 0x2000>;
@@ -1782,11 +1781,10 @@ dcan0: can@0 {
                target-module@d0000 {                   /* 0x481d0000, ap 62 42.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
                        ti,hwmods = "d_can1";
-                       reg = <0xd0000 0x4>;
-                       reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
-                       clock-names = "fck";
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
+                                <&dcan1_fck>;
+                       clock-names = "fck", "osc";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0xd0000 0x2000>;
index 85c6f4ff18242f7a26c669bd4036c7cf4cab8633..989cb60b90295868f1b983ff4129c867c33fce6b 100644 (file)
@@ -1575,8 +1575,6 @@ timer8: timer@0 {
                target-module@cc000 {                   /* 0x481cc000, ap 50 46.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
                        ti,hwmods = "d_can0";
-                       reg = <0xcc000 0x4>;
-                       reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
                        clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
                        clock-names = "fck";
@@ -1596,8 +1594,6 @@ dcan0: can@0 {
                target-module@d0000 {                   /* 0x481d0000, ap 52 3a.0 */
                        compatible = "ti,sysc-omap4", "ti,sysc";
                        ti,hwmods = "d_can1";
-                       reg = <0xd0000 0x4>;
-                       reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
                        clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
                        clock-names = "fck";
index f7bd26458915fbb1656c62e9668bda4a8d0ad6a3..42e433da79ec869c9029155585bbffb1913e8d03 100644 (file)
@@ -420,6 +420,7 @@ &mmc1 {
        vqmmc-supply = <&ldo1_reg>;
        bus-width = <4>;
        cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
+       no-1-8-v;
 };
 
 &mmc2 {
index 79d454ff3be4205f9cb816f7c5a819f91c6e140f..1c6f561ac52b2739570e3638f2855d920626748f 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index 99365bb8c41ed39cce31583a2e7a3ae78113bfa7..e550799a6ae0529276ac520dde3ce95e86da2f7f 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index bc330b1f6de094f0cd4bd94623f5ee929a337188..7bfa2238f70bc6b0b440dce02637d41cb45c229c 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x18000000>;
        };
index 258d2b251900cd4b6ade3a6198b24a71655f6d23..fd361c9b1374361f23f932396f13a54fb688823a 100644 (file)
@@ -17,6 +17,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 
index babcfec50dde23e5f60bf60c7530b003d2619476..7c34360d3285d5c2ce460089e62434775c50f688 100644 (file)
@@ -18,6 +18,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 
index e7fdaed99bd0acd0607b5caf8cbbebb4aab0528f..969b8d78e4929b6db22bebaf8d932756aae7aa0c 100644 (file)
@@ -16,6 +16,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 
index 42bafc6440130be6f729b1f69f1228b7cc72659e..b62854ee27abb4404d0e77bfda0a4a498adf76f7 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 
index dce35eb79dbe55c3a14e4e8993b2c58a368c544c..75f7b4ef35da6b0021041202ae529ac5fa7e0ebd 100644 (file)
@@ -21,6 +21,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index b7a024b7951bcada9c3eae17087048f0b2609020..148d16a9085e89d6176f05bef6512fca329ef9e3 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index f7f834cd3448d0b4f7ea54869285a50f9455d409..eed3aab6679b783d09cad511235a27074fbfc6bb 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index 4cb10f88a95eafc1e51c501d4591704a4256a971..8f1e565c3db45b709ba1a4be968d99aa174098ef 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index 77d1687b4228b60950628de95a4f7d14c0b81325..ce888b1835d1f7e9a177067d8cd72372bd2eacc3 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x18000000>;
        };
index 983149b55269c3be51227a4b645f9a8c9a9e843f..ed8619b54d692e7d52026d44a03aebdd6e7b6fb3 100644 (file)
@@ -17,6 +17,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index ca41481b44bd1ff296b0de84dec67868a053225e..1f87993eae1d1a8407dd3e07256888806c1b882e 100644 (file)
@@ -20,6 +20,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index aa69e656d395367fad0fc3beeb1633463ac9bd46..6c6199a53d0918d707c362aae413dd7fde837545 100644 (file)
@@ -31,6 +31,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x08000000>;
        };
index b527d2ff987ed36b4564245974375b84b9617f06..f806be5da723779ebc9c40781191e711013cb926 100644 (file)
@@ -16,6 +16,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 
index ec09c0426d16a8037828e0169a75a38ff7310a2a..456045f17a00d7239c3580266d20f039ac18a7ba 100644 (file)
@@ -14,6 +14,7 @@ / {
        model = "Phicomm K3";
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x18000000>;
        };
index 934f07adfe3c00b025097cfc4cb911ddfa061645..3d13e46c69494579dbbbc34d65d0fd78af14eaa3 100644 (file)
@@ -39,6 +39,7 @@ / {
        compatible = "brcm,bcm94708", "brcm,bcm4708";
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 };
index 31e4dd098776a490e0f0578f94e4c12697e0776f..5017b7b259cbe20463753cd8e8303de09e6258aa 100644 (file)
@@ -39,6 +39,7 @@ / {
        compatible = "brcm,bcm94709", "brcm,bcm4709", "brcm,bcm4708";
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000>;
        };
 };
index 8dca97eeaf57e6a5a66b647c6c8f5b29f0f62421..29525686e51a060dd31130e0d2cc10f526d23780 100644 (file)
@@ -17,6 +17,7 @@ chosen {
        };
 
        memory {
+               device_type = "memory";
                reg = <0x0 0x08000000>;
        };
 
index fe9f0bc29fec20df23b0bd0a6567d610f92f90dc..23faedec08abd04f215ba55d21391281d93e374f 100644 (file)
@@ -3543,7 +3543,7 @@ timer16: timer@0 {
                        };
                };
 
-               target-module@38000 {                   /* 0x48838000, ap 29 12.0 */
+               rtctarget: target-module@38000 {                        /* 0x48838000, ap 29 12.0 */
                        compatible = "ti,sysc-omap4-simple", "ti,sysc";
                        ti,hwmods = "rtcss";
                        reg = <0x38074 0x4>,
@@ -4450,8 +4450,6 @@ target-module@0 {                 /* 0x4ae20000, ap 19 08.0 */
                        timer12: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER12_CLKCTRL 24>;
-                               clock-names = "fck";
                                interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-alwon;
                                ti,timer-secure;
index 82cc7ec37af0a3204f32241642568f31272e7be8..c496ae83e27ea8a53b2c396074510ebdf6d59823 100644 (file)
@@ -6,7 +6,7 @@
  * published by the Free Software Foundation.
  */
 
-#include "dra72-evm-common.dtsi"
+#include "dra71x.dtsi"
 #include "dra7-mmc-iodelay.dtsi"
 #include "dra72x-mmc-iodelay.dtsi"
 #include <dt-bindings/net/ti-dp83867.h>
diff --git a/arch/arm/boot/dts/dra71x.dtsi b/arch/arm/boot/dts/dra71x.dtsi
new file mode 100644 (file)
index 0000000..695a08e
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "dra72-evm-common.dtsi"
+
+&rtctarget {
+       status = "disabled";
+};
+
+&usb4_tm {
+       status = "disabled";
+};
index 89831552cd86c5121a83e2ab23dbc467ae9d5c8b..9c39c6b9b5d60ad4b6b7aee41276424ea0711a4a 100644 (file)
@@ -62,3 +62,7 @@ &pcie1_ep {
 &pcie2_rc {
        compatible = "ti,dra726-pcie-rc", "ti,dra7-pcie";
 };
+
+&usb4_tm {
+       status = "disabled";
+};
index baba7b00eca7976c92847fe664e7b599364d83a7..fdca48186916097692406f8518e2af96765a5ce6 100644 (file)
@@ -22,7 +22,7 @@
  *
  * Datamanual Revisions:
  *
- * DRA76x Silicon Revision 1.0: SPRS993A, Revised July 2017
+ * DRA76x Silicon Revision 1.0: SPRS993E, Revised December 2018
  *
  */
 
@@ -169,25 +169,25 @@ mmc1_iodelay_sdr104_conf: mmc1_iodelay_sdr104_conf {
        /* Corresponds to MMC2_HS200_MANUAL1 in datamanual */
        mmc2_iodelay_hs200_conf: mmc2_iodelay_hs200_conf {
                pinctrl-pin-array = <
-                       0x190 A_DELAY_PS(384) G_DELAY_PS(0)       /* CFG_GPMC_A19_OEN */
-                       0x194 A_DELAY_PS(0) G_DELAY_PS(174)       /* CFG_GPMC_A19_OUT */
-                       0x1a8 A_DELAY_PS(410) G_DELAY_PS(0)       /* CFG_GPMC_A20_OEN */
-                       0x1ac A_DELAY_PS(85) G_DELAY_PS(0)        /* CFG_GPMC_A20_OUT */
-                       0x1b4 A_DELAY_PS(468) G_DELAY_PS(0)       /* CFG_GPMC_A21_OEN */
-                       0x1b8 A_DELAY_PS(139) G_DELAY_PS(0)       /* CFG_GPMC_A21_OUT */
-                       0x1c0 A_DELAY_PS(676) G_DELAY_PS(0)       /* CFG_GPMC_A22_OEN */
-                       0x1c4 A_DELAY_PS(69) G_DELAY_PS(0)        /* CFG_GPMC_A22_OUT */
-                       0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154)    /* CFG_GPMC_A23_OUT */
-                       0x1d8 A_DELAY_PS(640) G_DELAY_PS(0)       /* CFG_GPMC_A24_OEN */
-                       0x1dc A_DELAY_PS(0) G_DELAY_PS(0)         /* CFG_GPMC_A24_OUT */
-                       0x1e4 A_DELAY_PS(356) G_DELAY_PS(0)       /* CFG_GPMC_A25_OEN */
-                       0x1e8 A_DELAY_PS(0) G_DELAY_PS(0)         /* CFG_GPMC_A25_OUT */
-                       0x1f0 A_DELAY_PS(579) G_DELAY_PS(0)       /* CFG_GPMC_A26_OEN */
-                       0x1f4 A_DELAY_PS(0) G_DELAY_PS(0)         /* CFG_GPMC_A26_OUT */
-                       0x1fc A_DELAY_PS(435) G_DELAY_PS(0)       /* CFG_GPMC_A27_OEN */
-                       0x200 A_DELAY_PS(36) G_DELAY_PS(0)        /* CFG_GPMC_A27_OUT */
-                       0x364 A_DELAY_PS(759) G_DELAY_PS(0)       /* CFG_GPMC_CS1_OEN */
-                       0x368 A_DELAY_PS(72) G_DELAY_PS(0)        /* CFG_GPMC_CS1_OUT */
+                       0x190 A_DELAY_PS(384) G_DELAY_PS(0)     /* CFG_GPMC_A19_OEN */
+                       0x194 A_DELAY_PS(350) G_DELAY_PS(174)   /* CFG_GPMC_A19_OUT */
+                       0x1a8 A_DELAY_PS(410) G_DELAY_PS(0)     /* CFG_GPMC_A20_OEN */
+                       0x1ac A_DELAY_PS(335) G_DELAY_PS(0)     /* CFG_GPMC_A20_OUT */
+                       0x1b4 A_DELAY_PS(468) G_DELAY_PS(0)     /* CFG_GPMC_A21_OEN */
+                       0x1b8 A_DELAY_PS(339) G_DELAY_PS(0)     /* CFG_GPMC_A21_OUT */
+                       0x1c0 A_DELAY_PS(676) G_DELAY_PS(0)     /* CFG_GPMC_A22_OEN */
+                       0x1c4 A_DELAY_PS(219) G_DELAY_PS(0)     /* CFG_GPMC_A22_OUT */
+                       0x1d0 A_DELAY_PS(1062) G_DELAY_PS(154)  /* CFG_GPMC_A23_OUT */
+                       0x1d8 A_DELAY_PS(640) G_DELAY_PS(0)     /* CFG_GPMC_A24_OEN */
+                       0x1dc A_DELAY_PS(150) G_DELAY_PS(0)     /* CFG_GPMC_A24_OUT */
+                       0x1e4 A_DELAY_PS(356) G_DELAY_PS(0)     /* CFG_GPMC_A25_OEN */
+                       0x1e8 A_DELAY_PS(150) G_DELAY_PS(0)     /* CFG_GPMC_A25_OUT */
+                       0x1f0 A_DELAY_PS(579) G_DELAY_PS(0)     /* CFG_GPMC_A26_OEN */
+                       0x1f4 A_DELAY_PS(200) G_DELAY_PS(0)     /* CFG_GPMC_A26_OUT */
+                       0x1fc A_DELAY_PS(435) G_DELAY_PS(0)     /* CFG_GPMC_A27_OEN */
+                       0x200 A_DELAY_PS(236) G_DELAY_PS(0)     /* CFG_GPMC_A27_OUT */
+                       0x364 A_DELAY_PS(759) G_DELAY_PS(0)     /* CFG_GPMC_CS1_OEN */
+                       0x368 A_DELAY_PS(372) G_DELAY_PS(0)     /* CFG_GPMC_CS1_OUT */
              >;
        };
 
index 9ee45aa365d87d37689e3625648d13f5876a30bb..82b3dc90b7d6b6fb4a33b73850d89246fb564b44 100644 (file)
@@ -81,3 +81,11 @@ mcan_clk: mcan_clk@3fc {
                reg = <0x3fc>;
        };
 };
+
+&rtctarget {
+       status = "disabled";
+};
+
+&usb4_tm {
+       status = "disabled";
+};
index 48f7b4277b8ddd83dd2b24f465ac6438cefb6a69..3ac2e84fdeaa465dba7659782904497659d53867 100644 (file)
@@ -131,6 +131,7 @@ CONFIG_MV_XOR=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_MEMORY=y
 CONFIG_PWM=y
+CONFIG_PHY_MVEBU_A38X_COMPHY=y
 CONFIG_EXT4_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
index 4ee65a8a3b8035483f98e335cda7c12c54854bbf..31ae3be5741d60cf22407955923b7eaa5ca3d540 100644 (file)
@@ -1480,6 +1480,8 @@ static __init void da850_evm_init(void)
        if (ret)
                pr_warn("%s: dsp/rproc registration failed: %d\n",
                        __func__, ret);
+
+       regulator_has_full_constraints();
 }
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
index 9ff02de448c6d30b718a1779891de5f9b50b492b..2d69e704f7f6b656bfc55b12da7d5a95f623e386 100644 (file)
@@ -683,6 +683,9 @@ static struct platform_device da8xx_lcdc_device = {
        .id             = 0,
        .num_resources  = ARRAY_SIZE(da8xx_lcdc_resources),
        .resource       = da8xx_lcdc_resources,
+       .dev            = {
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       }
 };
 
 int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
index fd0053e47a151179db824e1aac6fe1526d20327e..3708a71f30e62bce60661e9527d22fd307838c60 100644 (file)
@@ -15,6 +15,7 @@
 
 #include "common.h"
 #include "cpuidle.h"
+#include "hardware.h"
 
 static int imx6sx_idle_finish(unsigned long val)
 {
@@ -110,7 +111,7 @@ int __init imx6sx_cpuidle_init(void)
         * except for power up sw2iso which need to be
         * larger than LDO ramp up time.
         */
-       imx_gpc_set_arm_power_up_timing(0xf, 1);
+       imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1);
        imx_gpc_set_arm_power_down_timing(1, 1);
 
        return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
index 2f052c56cd9efdb11cd034f7cbbb2201f42126ab..fc5378b00f3ddf71ebc9dc3e41145214991310c6 100644 (file)
@@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
        select I2C
        select I2C_IOP3XX
        select PCI
-       select SERIAL_OF_PLATFORM
        select TIMER_OF
        select USE_OF
        help
index 4d805080020e32937ceec454f5ce0dd4a8ac094c..a0e0b6b7dc5c39364a5bce02d6ecc5d99e9838e0 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/mach/pci.h>
 #include <asm/system_info.h>
 
+#include "irqs.h"
+
 #define SLOT_ETHA              0x0B    /* IDSEL = AD21 */
 #define SLOT_ETHB              0x0C    /* IDSEL = AD20 */
 #define SLOT_MPCI              0x0D    /* IDSEL = AD19 */
index d114ccd2017cebcd4d3eac80b59daeaeb8f36ebf..ca889ef068a5f3ca73f28a3a363552072a23c854 100644 (file)
@@ -25,6 +25,8 @@
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 
+#include "irqs.h"
+
 #define MAX_DEV                4
 #define IRQ_LINES      4
 
index 2d494b45437670d2ab5c8c4a366c75c43d5d5967..c02fa6f4838278047d35613866337e2718e48ca2 100644 (file)
@@ -27,6 +27,8 @@
 
 #include <mach/hardware.h>
 
+#include "irqs.h"
+
 static struct resource omixp_flash_resources[] = {
        {
                .flags  = IORESOURCE_MEM,
index a4220fa5e0c3ef7cafa6c3a93e4c5209a28ac277..6e41e5ece4e12bc4719c4da2ebcf2e5347763f78 100644 (file)
@@ -21,6 +21,8 @@
 #include <asm/mach/pci.h>
 #include <asm/mach-types.h>
 
+#include "irqs.h"
+
 /* PCI controller GPIO to IRQ pin mappings */
 #define INTA   2
 #define INTB   3
index 2c03d2f6b6479b2b1423e347b1b69998e65810a9..d2ebb7c675a8c59c311d1bbc3f04a31bdbd04455 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
 
+#include "irqs.h"
+
 static struct flash_platform_data vulcan_flash_data = {
        .map_name       = "cfi_probe",
        .width          = 2,
index c7ad684926c312642b265b508722f7580dda24b7..d07fc063c930b6a4f56545bae435da31d6300282 100644 (file)
@@ -89,6 +89,7 @@ config ARCH_K3
        bool "Texas Instruments Inc. K3 multicore SoC architecture"
        select PM_GENERIC_DOMAINS if PM
        select MAILBOX
+       select SOC_TI
        select TI_MESSAGE_MANAGER
        select TI_SCI_PROTOCOL
        select TI_SCI_INTR_IRQCHIP
@@ -168,6 +169,7 @@ config ARCH_MXC
        select IMX_GPCV2_PM_DOMAINS
        select PM
        select PM_GENERIC_DOMAINS
+       select SOC_BUS
        help
          This enables support for the ARMv8 based SoCs in the
          NXP i.MX family.
index 8fbd583b18e1fd4a25315f2020a31f22ded5b34e..e9d2e578cbe67be2886fb637ba660bfab5598aef 100644 (file)
@@ -51,7 +51,7 @@ endif
 
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS  += -Wno-psabi
+KBUILD_CFLAGS  += $(call cc-disable-warning, psabi)
 KBUILD_AFLAGS  += $(lseinstr) $(brokengasinst)
 
 KBUILD_CFLAGS  += $(call cc-option,-mabi=lp64)
index 2c41b04708fe33465b8193a65410d74efcd56be1..851c68dc6d61b954357e69323f9739f7291ec318 100644 (file)
@@ -812,8 +812,7 @@ extern int kern_addr_valid(unsigned long addr);
 
 #include <asm-generic/pgtable.h>
 
-void pgd_cache_init(void);
-#define pgtable_cache_init     pgd_cache_init
+static inline void pgtable_cache_init(void) { }
 
 /*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
index 3a18702289469450e37532a0c989ea527fd96c5a..dff8f9ea5754f97ac92346094aff563d706cc052 100644 (file)
@@ -195,6 +195,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
        unsigned long asid = ASID(vma->vm_mm);
        unsigned long addr;
 
+       start = round_down(start, stride);
+       end = round_up(end, stride);
+
        if ((end - start) >= (MAX_TLBI_OPS * stride)) {
                flush_tlb_mm(vma->vm_mm);
                return;
index 7b7ac0f6cec9e87c93abbaa2ddffea565c3302b0..d819a3e8b552b47bd3019eeef6c39ccc4500a6c9 100644 (file)
@@ -260,6 +260,13 @@ struct kvm_vcpu_events {
         KVM_REG_SIZE_U256 |                                            \
         ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
 
+/*
+ * Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and
+ * KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness-
+ * invariant layout which differs from the layout used for the FPSIMD
+ * V-registers on big-endian systems: see sigcontext.h for more explanation.
+ */
+
 #define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
 #define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
 
index d78623acb6491e6ef4ae64a365c22e29bd5cefe2..e932284993d401ca3b5fda787f616eb626931bfb 100644 (file)
@@ -65,8 +65,6 @@
 
 #ifndef __ASSEMBLY__
 
-#include <linux/prctl.h>
-
 /*
  * User structures for general purpose, floating point and debug registers.
  */
@@ -113,10 +111,10 @@ struct user_sve_header {
 
 /*
  * Common SVE_PT_* flags:
- * These must be kept in sync with prctl interface in <linux/ptrace.h>
+ * These must be kept in sync with prctl interface in <linux/prctl.h>
  */
-#define SVE_PT_VL_INHERIT              (PR_SVE_VL_INHERIT >> 16)
-#define SVE_PT_VL_ONEXEC               (PR_SVE_SET_VL_ONEXEC >> 16)
+#define SVE_PT_VL_INHERIT              ((1 << 17) /* PR_SVE_VL_INHERIT */ >> 16)
+#define SVE_PT_VL_ONEXEC               ((1 << 18) /* PR_SVE_SET_VL_ONEXEC */ >> 16)
 
 
 /*
@@ -176,6 +174,10 @@ struct user_sve_header {
  *     FPCR    uint32_t                        FPCR
  *
  * Additional data might be appended in the future.
+ *
+ * The Z-, P- and FFR registers are represented in memory in an endianness-
+ * invariant layout which differs from the layout used for the FPSIMD
+ * V-registers on big-endian systems: see sigcontext.h for more explanation.
  */
 
 #define SVE_PT_SVE_ZREG_SIZE(vq)       __SVE_ZREG_SIZE(vq)
index 5f3c0cec5af921fb56970d4b1384fe097c8e2d76..3d448a0bb2256344caf95abc0c66f43c06d83034 100644 (file)
@@ -77,6 +77,15 @@ struct fpsimd_context {
        __uint128_t vregs[32];
 };
 
+/*
+ * Note: similarly to all other integer fields, each V-register is stored in an
+ * endianness-dependent format, with the byte at offset i from the start of the
+ * in-memory representation of the register value containing
+ *
+ *    bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or
+ *    bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts.
+ */
+
 /* ESR_EL1 context */
 #define ESR_MAGIC      0x45535201
 
@@ -204,6 +213,11 @@ struct sve_context {
  *     FFR     uint16_t[vq]                    first-fault status register
  *
  * Additional data might be appended in the future.
+ *
+ * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR)
+ * is encoded in memory in an endianness-invariant format, with the byte at
+ * offset i from the start of the in-memory representation containing bits
+ * [(7 + 8 * i) : (8 * i)] of the register value.
  */
 
 #define SVE_SIG_ZREG_SIZE(vq)  __SVE_ZREG_SIZE(vq)
index a38bf74bcca8c5c1732cf161718b497e59160853..bb42cd04baec6c8db21054e168ac448dabbe5cdb 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/slab.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
+#include <linux/swab.h>
 
 #include <asm/esr.h>
 #include <asm/fpsimd.h>
@@ -352,6 +353,23 @@ static int __init sve_sysctl_init(void) { return 0; }
 #define ZREG(sve_state, vq, n) ((char *)(sve_state) +          \
        (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
 
+#ifdef CONFIG_CPU_BIG_ENDIAN
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+       u64 a = swab64(x);
+       u64 b = swab64(x >> 64);
+
+       return ((__uint128_t)a << 64) | b;
+}
+#else
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+       return x;
+}
+#endif
+
+#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+
 /*
  * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
  * task->thread.sve_state.
@@ -369,14 +387,16 @@ static void fpsimd_to_sve(struct task_struct *task)
        void *sst = task->thread.sve_state;
        struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
        unsigned int i;
+       __uint128_t *p;
 
        if (!system_supports_sve())
                return;
 
        vq = sve_vq_from_vl(task->thread.sve_vl);
-       for (i = 0; i < 32; ++i)
-               memcpy(ZREG(sst, vq, i), &fst->vregs[i],
-                      sizeof(fst->vregs[i]));
+       for (i = 0; i < 32; ++i) {
+               p = (__uint128_t *)ZREG(sst, vq, i);
+               *p = arm64_cpu_to_le128(fst->vregs[i]);
+       }
 }
 
 /*
@@ -395,14 +415,16 @@ static void sve_to_fpsimd(struct task_struct *task)
        void const *sst = task->thread.sve_state;
        struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
        unsigned int i;
+       __uint128_t const *p;
 
        if (!system_supports_sve())
                return;
 
        vq = sve_vq_from_vl(task->thread.sve_vl);
-       for (i = 0; i < 32; ++i)
-               memcpy(&fst->vregs[i], ZREG(sst, vq, i),
-                      sizeof(fst->vregs[i]));
+       for (i = 0; i < 32; ++i) {
+               p = (__uint128_t const *)ZREG(sst, vq, i);
+               fst->vregs[i] = arm64_le128_to_cpu(*p);
+       }
 }
 
 #ifdef CONFIG_ARM64_SVE
@@ -491,6 +513,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
        void *sst = task->thread.sve_state;
        struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
        unsigned int i;
+       __uint128_t *p;
 
        if (!test_tsk_thread_flag(task, TIF_SVE))
                return;
@@ -499,9 +522,10 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
 
        memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
 
-       for (i = 0; i < 32; ++i)
-               memcpy(ZREG(sst, vq, i), &fst->vregs[i],
-                      sizeof(fst->vregs[i]));
+       for (i = 0; i < 32; ++i) {
+               p = (__uint128_t *)ZREG(sst, vq, i);
+               *p = arm64_cpu_to_le128(fst->vregs[i]);
+       }
 }
 
 int sve_set_vector_length(struct task_struct *task,
index 885f13e587088fbd2448353d511cb83bdd09bcb1..52cfc6148355f8d129fd307010a1aabecf515631 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/compat.h>
 #include <linux/errno.h>
+#include <linux/prctl.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 #include <linux/thread_info.h>
index 3ae2f82fca469eb0922e556e018647beebc46294..c8aa00179363c7e1c36f29efaf74d96ea58d03ce 100644 (file)
@@ -70,10 +70,8 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
-static int validate_core_offset(const struct kvm_vcpu *vcpu,
-                               const struct kvm_one_reg *reg)
+static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
 {
-       u64 off = core_reg_offset_from_id(reg->id);
        int size;
 
        switch (off) {
@@ -103,8 +101,7 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
                return -EINVAL;
        }
 
-       if (KVM_REG_SIZE(reg->id) != size ||
-           !IS_ALIGNED(off, size / sizeof(__u32)))
+       if (!IS_ALIGNED(off, size / sizeof(__u32)))
                return -EINVAL;
 
        /*
@@ -115,6 +112,21 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
        if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
                return -EINVAL;
 
+       return size;
+}
+
+static int validate_core_offset(const struct kvm_vcpu *vcpu,
+                               const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size = core_reg_size_from_offset(vcpu, off);
+
+       if (size < 0)
+               return -EINVAL;
+
+       if (KVM_REG_SIZE(reg->id) != size)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -207,13 +219,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 
 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
-
-static bool vq_present(
-       const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
-       unsigned int vq)
-{
-       return (*vqs)[vq_word(vq)] & vq_mask(vq);
-}
+#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq))
 
 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
@@ -258,7 +264,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 
        max_vq = 0;
        for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
-               if (vq_present(&vqs, vq))
+               if (vq_present(vqs, vq))
                        max_vq = vq;
 
        if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
@@ -272,7 +278,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
         * maximum:
         */
        for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
-               if (vq_present(&vqs, vq) != sve_vq_available(vq))
+               if (vq_present(vqs, vq) != sve_vq_available(vq))
                        return -EINVAL;
 
        /* Can't run with no vector lengths at all: */
@@ -453,19 +459,34 @@ static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
 {
        unsigned int i;
        int n = 0;
-       const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
 
        for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
-               /*
-                * The KVM_REG_ARM64_SVE regs must be used instead of
-                * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
-                * SVE-enabled vcpus:
-                */
-               if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
+               u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
+               int size = core_reg_size_from_offset(vcpu, i);
+
+               if (size < 0)
+                       continue;
+
+               switch (size) {
+               case sizeof(__u32):
+                       reg |= KVM_REG_SIZE_U32;
+                       break;
+
+               case sizeof(__u64):
+                       reg |= KVM_REG_SIZE_U64;
+                       break;
+
+               case sizeof(__uint128_t):
+                       reg |= KVM_REG_SIZE_U128;
+                       break;
+
+               default:
+                       WARN_ON(1);
                        continue;
+               }
 
                if (uindices) {
-                       if (put_user(core_reg | i, uindices))
+                       if (put_user(reg, uindices))
                                return -EFAULT;
                        uindices++;
                }
index 7dede2e34b708958d1ce71adaa7fd34a3d7bfc70..ccf00a8b98c6ab422d70f82974b03e5e457dff97 100644 (file)
@@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd)
        return false;
 }
 
+static inline int pmd_is_serializing(pmd_t pmd)
+{
+       /*
+        * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
+        * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
+        *
+        * This condition may also occur when flushing a pmd while flushing
+        * it (see ptep_modify_prot_start), so callers must ensure this
+        * case is fine as well.
+        */
+       if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
+                                               cpu_to_be64(_PAGE_INVALID))
+               return true;
+
+       return false;
+}
+
 static inline int pmd_bad(pmd_t pmd)
 {
        if (radix_enabled())
@@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd)
 #define pmd_access_permitted pmd_access_permitted
 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
 {
+       /*
+        * pmdp_invalidate sets this combination (which is not caught by
+        * !pte_present() check in pte_access_permitted), to prevent
+        * lock-free lookups, as part of the serialize_against_pte_lookup()
+        * synchronisation.
+        *
+        * This also catches the case where the PTE's hardware PRESENT bit is
+        * cleared while TLB is flushed, which is suboptimal but should not
+        * be frequent.
+        */
+       if (pmd_is_serializing(pmd))
+               return false;
+
        return pte_access_permitted(pmd_pte(pmd), write);
 }
 
index 3ffad030393c13b2c00277953b34f07e648f2e52..461b0f1938647f7730fd08b51d807ef618643760 100644 (file)
@@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height,
                                 int depth, int pitch);
 extern void btext_setup_display(int width, int height, int depth, int pitch,
                                unsigned long address);
+#ifdef CONFIG_PPC32
 extern void btext_prepare_BAT(void);
+#else
+static inline void btext_prepare_BAT(void) { }
+#endif
 extern void btext_map(void);
 extern void btext_unmap(void);
 
index 4a585cba17879bc72515c824b5e4af85e13fb6b6..c684768187538610e37e59d90bd2cb3f2c8ad48d 100644 (file)
@@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
        return crashing_cpu >= 0;
 }
 
+void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
+                        unsigned long start_address) __noreturn;
+
 #ifdef CONFIG_KEXEC_FILE
 extern const struct kexec_file_ops kexec_elf64_ops;
 
index 493c5c943acd02a2b5f9d894cbe2b3ec1cd562a9..2291daf39cd1500c196a88ae93577900e8ef113c 100644 (file)
 #define PPC_INST_MADDLD                        0x10000033
 #define PPC_INST_DIVWU                 0x7c000396
 #define PPC_INST_DIVD                  0x7c0003d2
+#define PPC_INST_DIVDU                 0x7c000392
 #define PPC_INST_RLWINM                        0x54000000
 #define PPC_INST_RLWINM_DOT            0x54000001
 #define PPC_INST_RLWIMI                        0x50000000
index affe5dcce7f465bcfcae706e3c3a1ebb24b44dbe..2b160d68db494154c6098caab369181d2fbec29b 100644 (file)
@@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
  */
 void default_machine_kexec(struct kimage *image)
 {
-       extern const unsigned char relocate_new_kernel[];
        extern const unsigned int relocate_new_kernel_size;
        unsigned long page_list;
        unsigned long reboot_code_buffer, reboot_code_buffer_phys;
@@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
                                reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
        printk(KERN_INFO "Bye!\n");
 
+       if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
+               relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
+
        /* now call it */
        rnk = (relocate_new_kernel_t) reboot_code_buffer;
        (*rnk)(page_list, reboot_code_buffer_phys, image->start);
index 3555cad7bdde5d5faa446956078ebe5329666fb9..ed446b7ea1643cd1278feab02867ac6464056f3a 100644 (file)
@@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void)
                        prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
                                    width, height, pitch, addr);
                        btext_setup_display(width, height, 8, pitch, addr);
+                       btext_prepare_BAT();
                }
 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
        }
index 518d416971c1d6eddc0397b596c2fcc1cb6d108b..160bef0d553d970e8bca16a76156b421f5e4691c 100644 (file)
@@ -24,7 +24,7 @@ fi
 WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
 _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
-logo_linux_clut224
+logo_linux_clut224 btext_prepare_BAT
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
 __prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
 
index a255707e4aee07bf295794bb58ace3faf6e323c2..01bc9663360d6ce24568777c87ef686143b4e7b8 100644 (file)
@@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
        /*
         * This ensures that generic code that rely on IRQ disabling
         * to prevent a parallel THP split work as expected.
+        *
+        * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
+        * a special case check in pmd_access_permitted.
         */
        serialize_against_pte_lookup(vma->vm_mm);
        return __pmd(old_pmd);
index 39d2f8012386842863c0b2eac0550cf1a87b263b..fc10c0c24f519bd9cbdf96402ab9592ed0409ca7 100644 (file)
@@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
        pdshift = PMD_SHIFT;
        pmdp = pmd_offset(&pud, ea);
        pmd  = READ_ONCE(*pmdp);
+
        /*
-        * A hugepage collapse is captured by pmd_none, because
-        * it mark the pmd none and do a hpte invalidate.
+        * A hugepage collapse is captured by this condition, see
+        * pmdp_collapse_flush.
         */
        if (pmd_none(pmd))
                return NULL;
 
+#ifdef CONFIG_PPC_BOOK3S_64
+       /*
+        * A hugepage split is captured by this condition, see
+        * pmdp_invalidate.
+        *
+        * Huge page modification can be caught here too.
+        */
+       if (pmd_is_serializing(pmd))
+               return NULL;
+#endif
+
        if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
                if (is_thp)
                        *is_thp = true;
index 6026a7af031d1a924a54e489c20a63b197163145..55d4377ccfae3fe98555b7076b2cca12178afdff 100644 (file)
                                     ___PPC_RA(a) | IMM_L(i))
 #define PPC_DIVWU(d, a, b)     EMIT(PPC_INST_DIVWU | ___PPC_RT(d) |          \
                                     ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_DIVD(d, a, b)      EMIT(PPC_INST_DIVD | ___PPC_RT(d) |           \
+#define PPC_DIVDU(d, a, b)     EMIT(PPC_INST_DIVDU | ___PPC_RT(d) |          \
                                     ___PPC_RA(a) | ___PPC_RB(b))
 #define PPC_AND(d, a, b)       EMIT(PPC_INST_AND | ___PPC_RA(d) |            \
                                     ___PPC_RS(a) | ___PPC_RB(b))
index 63d05c499cacdfaf05f308054801d34fcee56d55..c2ee6041f02c46f8cbf0f8e91c202653d3e948b6 100644 (file)
@@ -395,12 +395,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
                case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
                        if (BPF_OP(code) == BPF_MOD) {
-                               PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
+                               PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
                                PPC_MULD(b2p[TMP_REG_1], src_reg,
                                                b2p[TMP_REG_1]);
                                PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
                        } else
-                               PPC_DIVD(dst_reg, dst_reg, src_reg);
+                               PPC_DIVDU(dst_reg, dst_reg, src_reg);
                        break;
                case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
                case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
@@ -428,7 +428,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                                break;
                        case BPF_ALU64:
                                if (BPF_OP(code) == BPF_MOD) {
-                                       PPC_DIVD(b2p[TMP_REG_2], dst_reg,
+                                       PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
                                                        b2p[TMP_REG_1]);
                                        PPC_MULD(b2p[TMP_REG_1],
                                                        b2p[TMP_REG_1],
@@ -436,7 +436,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                                        PPC_SUB(dst_reg, dst_reg,
                                                        b2p[TMP_REG_1]);
                                } else
-                                       PPC_DIVD(dst_reg, dst_reg,
+                                       PPC_DIVDU(dst_reg, dst_reg,
                                                        b2p[TMP_REG_1]);
                                break;
                        }
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
new file mode 100644 (file)
index 0000000..dcc3ada
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+subdir-y += sifive
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
new file mode 100644 (file)
index 0000000..baaeef9
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-y += hifive-unleashed-a00.dtb
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
new file mode 100644 (file)
index 0000000..3c06ee4
--- /dev/null
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2018-2019 SiFive, Inc */
+
+/dts-v1/;
+
+#include <dt-bindings/clock/sifive-fu540-prci.h>
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <2>;
+       compatible = "sifive,fu540-c000", "sifive,fu540";
+
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart1;
+       };
+
+       chosen {
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               timebase-frequency = <1000000>;
+               cpu0: cpu@0 {
+                       compatible = "sifive,e51", "sifive,rocket0", "riscv";
+                       device_type = "cpu";
+                       i-cache-block-size = <64>;
+                       i-cache-sets = <128>;
+                       i-cache-size = <16384>;
+                       reg = <0>;
+                       riscv,isa = "rv64imac";
+                       status = "disabled";
+                       cpu0_intc: interrupt-controller {
+                               #interrupt-cells = <1>;
+                               compatible = "riscv,cpu-intc";
+                               interrupt-controller;
+                       };
+               };
+               cpu1: cpu@1 {
+                       compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+                       d-cache-block-size = <64>;
+                       d-cache-sets = <64>;
+                       d-cache-size = <32768>;
+                       d-tlb-sets = <1>;
+                       d-tlb-size = <32>;
+                       device_type = "cpu";
+                       i-cache-block-size = <64>;
+                       i-cache-sets = <64>;
+                       i-cache-size = <32768>;
+                       i-tlb-sets = <1>;
+                       i-tlb-size = <32>;
+                       mmu-type = "riscv,sv39";
+                       reg = <1>;
+                       riscv,isa = "rv64imafdc";
+                       tlb-split;
+                       cpu1_intc: interrupt-controller {
+                               #interrupt-cells = <1>;
+                               compatible = "riscv,cpu-intc";
+                               interrupt-controller;
+                       };
+               };
+               cpu2: cpu@2 {
+                       clock-frequency = <0>;
+                       compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+                       d-cache-block-size = <64>;
+                       d-cache-sets = <64>;
+                       d-cache-size = <32768>;
+                       d-tlb-sets = <1>;
+                       d-tlb-size = <32>;
+                       device_type = "cpu";
+                       i-cache-block-size = <64>;
+                       i-cache-sets = <64>;
+                       i-cache-size = <32768>;
+                       i-tlb-sets = <1>;
+                       i-tlb-size = <32>;
+                       mmu-type = "riscv,sv39";
+                       reg = <2>;
+                       riscv,isa = "rv64imafdc";
+                       tlb-split;
+                       cpu2_intc: interrupt-controller {
+                               #interrupt-cells = <1>;
+                               compatible = "riscv,cpu-intc";
+                               interrupt-controller;
+                       };
+               };
+               cpu3: cpu@3 {
+                       clock-frequency = <0>;
+                       compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+                       d-cache-block-size = <64>;
+                       d-cache-sets = <64>;
+                       d-cache-size = <32768>;
+                       d-tlb-sets = <1>;
+                       d-tlb-size = <32>;
+                       device_type = "cpu";
+                       i-cache-block-size = <64>;
+                       i-cache-sets = <64>;
+                       i-cache-size = <32768>;
+                       i-tlb-sets = <1>;
+                       i-tlb-size = <32>;
+                       mmu-type = "riscv,sv39";
+                       reg = <3>;
+                       riscv,isa = "rv64imafdc";
+                       tlb-split;
+                       cpu3_intc: interrupt-controller {
+                               #interrupt-cells = <1>;
+                               compatible = "riscv,cpu-intc";
+                               interrupt-controller;
+                       };
+               };
+               cpu4: cpu@4 {
+                       clock-frequency = <0>;
+                       compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+                       d-cache-block-size = <64>;
+                       d-cache-sets = <64>;
+                       d-cache-size = <32768>;
+                       d-tlb-sets = <1>;
+                       d-tlb-size = <32>;
+                       device_type = "cpu";
+                       i-cache-block-size = <64>;
+                       i-cache-sets = <64>;
+                       i-cache-size = <32768>;
+                       i-tlb-sets = <1>;
+                       i-tlb-size = <32>;
+                       mmu-type = "riscv,sv39";
+                       reg = <4>;
+                       riscv,isa = "rv64imafdc";
+                       tlb-split;
+                       cpu4_intc: interrupt-controller {
+                               #interrupt-cells = <1>;
+                               compatible = "riscv,cpu-intc";
+                               interrupt-controller;
+                       };
+               };
+       };
+       soc {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               compatible = "sifive,fu540-c000", "sifive,fu540", "simple-bus";
+               ranges;
+               plic0: interrupt-controller@c000000 {
+                       #interrupt-cells = <1>;
+                       compatible = "sifive,plic-1.0.0";
+                       reg = <0x0 0xc000000 0x0 0x4000000>;
+                       riscv,ndev = <53>;
+                       interrupt-controller;
+                       interrupts-extended = <
+                               &cpu0_intc 0xffffffff
+                               &cpu1_intc 0xffffffff &cpu1_intc 9
+                               &cpu2_intc 0xffffffff &cpu2_intc 9
+                               &cpu3_intc 0xffffffff &cpu3_intc 9
+                               &cpu4_intc 0xffffffff &cpu4_intc 9>;
+               };
+               prci: clock-controller@10000000 {
+                       compatible = "sifive,fu540-c000-prci";
+                       reg = <0x0 0x10000000 0x0 0x1000>;
+                       clocks = <&hfclk>, <&rtcclk>;
+                       #clock-cells = <1>;
+               };
+               uart0: serial@10010000 {
+                       compatible = "sifive,fu540-c000-uart", "sifive,uart0";
+                       reg = <0x0 0x10010000 0x0 0x1000>;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <4>;
+                       clocks = <&prci PRCI_CLK_TLCLK>;
+               };
+               uart1: serial@10011000 {
+                       compatible = "sifive,fu540-c000-uart", "sifive,uart0";
+                       reg = <0x0 0x10011000 0x0 0x1000>;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <5>;
+                       clocks = <&prci PRCI_CLK_TLCLK>;
+               };
+               i2c0: i2c@10030000 {
+                       compatible = "sifive,fu540-c000-i2c", "sifive,i2c0";
+                       reg = <0x0 0x10030000 0x0 0x1000>;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <50>;
+                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       reg-shift = <2>;
+                       reg-io-width = <1>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+               qspi0: spi@10040000 {
+                       compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+                       reg = <0x0 0x10040000 0x0 0x1000
+                              0x0 0x20000000 0x0 0x10000000>;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <51>;
+                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+               qspi1: spi@10041000 {
+                       compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+                       reg = <0x0 0x10041000 0x0 0x1000
+                              0x0 0x30000000 0x0 0x10000000>;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <52>;
+                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+               qspi2: spi@10050000 {
+                       compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+                       reg = <0x0 0x10050000 0x0 0x1000>;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <6>;
+                       clocks = <&prci PRCI_CLK_TLCLK>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+       };
+};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
new file mode 100644 (file)
index 0000000..4da8870
--- /dev/null
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2018-2019 SiFive, Inc */
+
+#include "fu540-c000.dtsi"
+
+/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
+#define RTCCLK_FREQ            1000000
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <2>;
+       model = "SiFive HiFive Unleashed A00";
+       compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
+
+       chosen {
+       };
+
+       cpus {
+               timebase-frequency = <RTCCLK_FREQ>;
+       };
+
+       memory@80000000 {
+               device_type = "memory";
+               reg = <0x0 0x80000000 0x2 0x00000000>;
+       };
+
+       soc {
+       };
+
+       hfclk: hfclk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <33333333>;
+               clock-output-names = "hfclk";
+       };
+
+       rtcclk: rtcclk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <RTCCLK_FREQ>;
+               clock-output-names = "rtcclk";
+       };
+};
+
+&qspi0 {
+       flash@0 {
+               compatible = "issi,is25wp256", "jedec,spi-nor";
+               reg = <0>;
+               spi-max-frequency = <50000000>;
+               m25p,fast-read;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
+       };
+};
+
+&qspi2 {
+       status = "okay";
+       mmc@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3300 3300>;
+               disable-wp;
+       };
+};
index 2fd3461e50abc9311cfe546de626b3ae58cb7e88..4f02967e55defb55b2cfc40f3de537f5323e7803 100644 (file)
@@ -49,6 +49,8 @@ CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_SERIAL_SIFIVE=y
+CONFIG_SERIAL_SIFIVE_CONSOLE=y
 CONFIG_HVC_RISCV_SBI=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_DRM=y
@@ -64,6 +66,8 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
 CONFIG_VIRTIO_MMIO=y
+CONFIG_CLK_SIFIVE=y
+CONFIG_CLK_SIFIVE_FU540_PRCI=y
 CONFIG_SIFIVE_PLIC=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
index 3943be480af096688d47f59021e51b0e85e3a23e..396a3303c537489bc8b1b7a52a9334e07b5ceda3 100644 (file)
 #include <asm/barrier.h>
 #include <asm/bitsperlong.h>
 
-#ifndef smp_mb__before_clear_bit
-#define smp_mb__before_clear_bit()  smp_mb()
-#define smp_mb__after_clear_bit()   smp_mb()
-#endif /* smp_mb__before_clear_bit */
-
 #include <asm-generic/bitops/__ffs.h>
 #include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/fls.h>
index cfb6eb1d762d231c58145a6123589e92f2d62126..d0fe623bfb8f31c9a56e9f90ce25ec2b192a2a89 100644 (file)
@@ -13,6 +13,7 @@ static void default_power_off(void)
 }
 
 void (*pm_power_off)(void) = default_power_off;
+EXPORT_SYMBOL(pm_power_off);
 
 void machine_restart(char *cmd)
 {
index 7e893ae0f10edca35bae5796cebe7f87cdc6be5c..87ff89e88f2ca95bfc1fb47e10e0cb7e88b0880a 100644 (file)
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(__delay);
 
 void udelay(unsigned long usecs)
 {
-       unsigned long ucycles = usecs * lpj_fine * UDELAY_MULT;
+       u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT;
 
        if (unlikely(usecs > MAX_UDELAY_US)) {
                __delay((u64)usecs * riscv_timebase / 1000000ULL);
index fd7662afddeacf037a2de04acb094820dedf1022..3e2708c626a854884358ced1e6f60bbd1e5f5fa6 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <asm/pgalloc.h>
 #include <asm/ptrace.h>
+#include <asm/tlbflush.h>
 
 /*
  * This routine handles page faults.  It determines the address and the
@@ -265,6 +266,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
                pte_k = pte_offset_kernel(pmd_k, addr);
                if (!pte_present(*pte_k))
                        goto no_context;
+
+               /*
+                * The kernel assumes that TLBs don't cache invalid
+                * entries, but in RISC-V, SFENCE.VMA specifies an
+                * ordering constraint, not a cache flush; it is
+                * necessary even after writing invalid entries.
+                * Relying on flush_tlb_fix_spurious_fault would
+                * suffice, but the extra traps reduce
+                * performance. So, eagerly SFENCE.VMA.
+                */
+               local_flush_tlb_page(addr);
+
                return;
        }
 }
index 80b12aa5e10d06627e223166558e4e814c695077..426d5c33ea9037e10fd7706dddb8ac18d5649c46 100644 (file)
@@ -751,22 +751,32 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU | BPF_ADD | BPF_X:
        case BPF_ALU64 | BPF_ADD | BPF_X:
                emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_SUB | BPF_X:
        case BPF_ALU64 | BPF_SUB | BPF_X:
                emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_AND | BPF_X:
        case BPF_ALU64 | BPF_AND | BPF_X:
                emit(rv_and(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_OR | BPF_X:
        case BPF_ALU64 | BPF_OR | BPF_X:
                emit(rv_or(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_XOR | BPF_X:
        case BPF_ALU64 | BPF_XOR | BPF_X:
                emit(rv_xor(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_MUL | BPF_X:
        case BPF_ALU64 | BPF_MUL | BPF_X:
@@ -789,14 +799,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU | BPF_LSH | BPF_X:
        case BPF_ALU64 | BPF_LSH | BPF_X:
                emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_RSH | BPF_X:
        case BPF_ALU64 | BPF_RSH | BPF_X:
                emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_ARSH | BPF_X:
        case BPF_ALU64 | BPF_ARSH | BPF_X:
                emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
 
        /* dst = -dst */
@@ -804,6 +820,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU64 | BPF_NEG:
                emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
                     rv_subw(rd, RV_REG_ZERO, rd), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
 
        /* dst = BSWAP##imm(dst) */
@@ -958,14 +976,20 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU | BPF_LSH | BPF_K:
        case BPF_ALU64 | BPF_LSH | BPF_K:
                emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_RSH | BPF_K:
        case BPF_ALU64 | BPF_RSH | BPF_K:
                emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_ARSH | BPF_K:
        case BPF_ALU64 | BPF_ARSH | BPF_K:
                emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
+               if (!is64)
+                       emit_zext_32(rd, ctx);
                break;
 
        /* JUMP off */
index de8521fc9de58da66c68ac15f795834914bf83ee..e48013cf50a2b1b7cb591a110ec0125587b4eac3 100644 (file)
@@ -30,6 +30,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
 KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
+KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
 UTS_MACHINE    := s390x
index 4600453536c2bc0e6842ac7e46cfc8b6ef10cea6..3bda757317cf1930a5924aa30fccdd4ffa5c2a62 100644 (file)
@@ -55,7 +55,7 @@
                : "i" (low), "i" (high));                               \
 } while (0)
 
-static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
 {
        unsigned long reg;
 
@@ -64,7 +64,7 @@ static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
        __ctl_load(reg, cr, cr);
 }
 
-static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
 {
        unsigned long reg;
 
index 9e27fa05a7ae6fca6503fd2eb15928db93355c8d..4c95c365058aa44a4242c5bc8ccd2cb30cc5aa7c 100644 (file)
@@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
        struct fpu *fpu = &current->thread.fpu;
        int cpu = smp_processor_id();
 
-       if (WARN_ON_ONCE(current->mm == NULL))
+       if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
                return;
 
        if (!fpregs_state_valid(fpu, cpu)) {
@@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
  * otherwise.
  *
  * The FPU context is only stored/restored for a user task and
- * ->mm is used to distinguish between kernel and user threads.
+ * PF_KTHREAD is used to distinguish between kernel and user threads.
  */
 static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-       if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
+       if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
                if (!copy_fpregs_to_fpstate(old_fpu))
                        old_fpu->last_cpu = -1;
                else
index 9f15384c504a407818168966610b36d3798a6683..310118805f576e1585e9114e8d0a84c1008ddd4d 100644 (file)
@@ -52,6 +52,9 @@
 
 #define INTEL_FAM6_CANNONLAKE_MOBILE   0x66
 
+#define INTEL_FAM6_ICELAKE_X           0x6A
+#define INTEL_FAM6_ICELAKE_XEON_D      0x6C
+#define INTEL_FAM6_ICELAKE_DESKTOP     0x7D
 #define INTEL_FAM6_ICELAKE_MOBILE      0x7E
 
 /* "Small Core" Processors (Atom) */
index 7a0e64ccd6ff5d02108a4424fc72a36f49018987..d6ab5b4d15e543800a7a7524517b495fa6305074 100644 (file)
@@ -383,6 +383,9 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE  (1 << 2)
 #define KVM_X86_QUIRK_OUT_7E_INC_RIP   (1 << 3)
 
+#define KVM_STATE_NESTED_FORMAT_VMX    0
+#define KVM_STATE_NESTED_FORMAT_SVM    1       /* unused */
+
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
 #define KVM_STATE_NESTED_EVMCS         0x00000004
@@ -390,9 +393,16 @@ struct kvm_sync_regs {
 #define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON     0x00000002
 
-struct kvm_vmx_nested_state {
+#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
+
+struct kvm_vmx_nested_state_data {
+       __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+       __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+};
+
+struct kvm_vmx_nested_state_hdr {
        __u64 vmxon_pa;
-       __u64 vmcs_pa;
+       __u64 vmcs12_pa;
 
        struct {
                __u16 flags;
@@ -401,24 +411,25 @@ struct kvm_vmx_nested_state {
 
 /* for KVM_CAP_NESTED_STATE */
 struct kvm_nested_state {
-       /* KVM_STATE_* flags */
        __u16 flags;
-
-       /* 0 for VMX, 1 for SVM.  */
        __u16 format;
-
-       /* 128 for SVM, 128 + VMCS size for VMX.  */
        __u32 size;
 
        union {
-               /* VMXON, VMCS */
-               struct kvm_vmx_nested_state vmx;
+               struct kvm_vmx_nested_state_hdr vmx;
 
                /* Pad the header to 128 bytes.  */
                __u8 pad[120];
-       };
+       } hdr;
 
-       __u8 data[0];
+       /*
+        * Define data region as 0 bytes to preserve backwards-compatability
+        * to old definition of kvm_nested_state in order to avoid changing
+        * KVM_{GET,PUT}_NESTED_STATE ioctl values.
+        */
+       union {
+               struct kvm_vmx_nested_state_data vmx[0];
+       } data;
 };
 
 #endif /* _ASM_X86_KVM_H */
index 70a04436380e3e16affcc706366b9c6dd80f201a..a813987b5552ea84e018d72d28ec35ba93c84a1e 100644 (file)
@@ -872,7 +872,7 @@ int __init microcode_init(void)
                goto out_ucode_group;
 
        register_syscore_ops(&mc_syscore_ops);
-       cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+       cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
                                  mc_cpu_online, mc_cpu_down_prep);
 
        pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
index 7ee93125a2114b5f1cf818aa75103cf970193161..397206f23d14f9aeadf860747fa42662d42baadf 100644 (file)
@@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
        struct list_head *head;
        struct rdtgroup *entry;
 
+       if (!is_mbm_local_enabled())
+               return;
+
        r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
        closid = rgrp->closid;
        rmid = rgrp->mon.rmid;
index 2f48f208f7e29eb7152e73f4351eaa449dbc39f5..2131b8bbaad7d5bc38975d792ff2b96abda6312e 100644 (file)
@@ -2534,7 +2534,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
                if (closid_allocated(i) && i != closid) {
                        mode = rdtgroup_mode_by_closid(i);
                        if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
-                               break;
+                               /*
+                                * ctrl values for locksetup aren't relevant
+                                * until the schemata is written, and the mode
+                                * becomes RDT_MODE_PSEUDO_LOCKED.
+                                */
+                               continue;
                        /*
                         * If CDP is active include peer domain's
                         * usage to ensure there is no overlap
index 466fca686fb97cc9bb57f0dafba19c292037f948..649fbc3fcf9f52ee2f5bbe3f524d9d4f62b5ac95 100644 (file)
@@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
 
        kernel_fpu_disable();
 
-       if (current->mm) {
+       if (!(current->flags & PF_KTHREAD)) {
                if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
                        set_thread_flag(TIF_NEED_FPU_LOAD);
                        /*
index 5a8d118bc423ec720f1439c6785f7f5abc0c33ed..0071b794ed19348447ddbe269fd4102d68ff536e 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/compat.h>
 #include <linux/cpu.h>
+#include <linux/pagemap.h>
 
 #include <asm/fpu/internal.h>
 #include <asm/fpu/signal.h>
@@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
                struct user_i387_ia32_struct env;
                struct _fpstate_32 __user *fp = buf;
 
+               fpregs_lock();
+               if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+                       copy_fxregs_to_kernel(&tsk->thread.fpu);
+               fpregs_unlock();
+
                convert_from_fxsr(&env, tsk);
 
                if (__copy_to_user(buf, &env, sizeof(env)) ||
@@ -189,15 +195,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
        fpregs_unlock();
 
        if (ret) {
-               int aligned_size;
-               int nr_pages;
-
-               aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
-               nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
-
-               ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
-                                             NULL, FOLL_WRITE);
-               if (ret == nr_pages)
+               if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
                        goto retry;
                return -EFAULT;
        }
index 9a8c1648fc9a5167637add540d00865e697c2a07..6690c5652aebd290deebccd46a76390e8518f4cd 100644 (file)
@@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
                       BREAK_INSTR_SIZE);
        bpt->type = BP_POKE_BREAKPOINT;
 
-       return err;
+       return 0;
 }
 
 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
index 1e9ba81accba526b80bb698b193b405c07d48c02..d3c3d5e5ffd4999d3c1f1249167bbd8586fefb6f 100644 (file)
@@ -5602,14 +5602,18 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
        struct page *page;
        int i;
 
-       if (tdp_enabled)
-               return 0;
-
        /*
-        * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
-        * Therefore we need to allocate shadow page tables in the first
-        * 4GB of memory, which happens to fit the DMA32 zone.
+        * When using PAE paging, the four PDPTEs are treated as 'root' pages,
+        * while the PDP table is a per-vCPU construct that's allocated at MMU
+        * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
+        * x86_64.  Therefore we need to allocate the PDP table in the first
+        * 4GB of memory, which happens to fit the DMA32 zone.  Except for
+        * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
+        * skip allocating the PDP table.
         */
+       if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+               return 0;
+
        page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
        if (!page)
                return -ENOMEM;
index 1032f068f0b9cb8b144f92ee7e45124cb39bc924..5f9c1a200201c90c595c240a55389e8fa35e87bb 100644 (file)
@@ -1397,7 +1397,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
        }
 
        if (unlikely(!(evmcs->hv_clean_fields &
-                      HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+                      HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
                vmcs12->exception_bitmap = evmcs->exception_bitmap;
        }
 
@@ -1437,7 +1437,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
        }
 
        if (unlikely(!(evmcs->hv_clean_fields &
-                      HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+                      HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
                vmcs12->pin_based_vm_exec_control =
                        evmcs->pin_based_vm_exec_control;
                vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
@@ -5226,14 +5226,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
        struct vmcs12 *vmcs12;
        struct kvm_nested_state kvm_state = {
                .flags = 0,
-               .format = 0,
+               .format = KVM_STATE_NESTED_FORMAT_VMX,
                .size = sizeof(kvm_state),
-               .vmx.vmxon_pa = -1ull,
-               .vmx.vmcs_pa = -1ull,
+               .hdr.vmx.vmxon_pa = -1ull,
+               .hdr.vmx.vmcs12_pa = -1ull,
        };
+       struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
+               &user_kvm_nested_state->data.vmx[0];
 
        if (!vcpu)
-               return kvm_state.size + 2 * VMCS12_SIZE;
+               return kvm_state.size + sizeof(*user_vmx_nested_state);
 
        vmx = to_vmx(vcpu);
        vmcs12 = get_vmcs12(vcpu);
@@ -5243,23 +5245,23 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
 
        if (nested_vmx_allowed(vcpu) &&
            (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
-               kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
-               kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
+               kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
+               kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
 
                if (vmx_has_valid_vmcs12(vcpu)) {
-                       kvm_state.size += VMCS12_SIZE;
+                       kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
 
                        if (is_guest_mode(vcpu) &&
                            nested_cpu_has_shadow_vmcs(vmcs12) &&
                            vmcs12->vmcs_link_pointer != -1ull)
-                               kvm_state.size += VMCS12_SIZE;
+                               kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
                }
 
                if (vmx->nested.smm.vmxon)
-                       kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
+                       kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
 
                if (vmx->nested.smm.guest_mode)
-                       kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
+                       kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
 
                if (is_guest_mode(vcpu)) {
                        kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
@@ -5294,16 +5296,19 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
                        copy_shadow_to_vmcs12(vmx);
        }
 
+       BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
+       BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
+
        /*
         * Copy over the full allocated size of vmcs12 rather than just the size
         * of the struct.
         */
-       if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
+       if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
                return -EFAULT;
 
        if (nested_cpu_has_shadow_vmcs(vmcs12) &&
            vmcs12->vmcs_link_pointer != -1ull) {
-               if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
+               if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
                                 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
                        return -EFAULT;
        }
@@ -5331,33 +5336,35 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12;
        u32 exit_qual;
+       struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
+               &user_kvm_nested_state->data.vmx[0];
        int ret;
 
-       if (kvm_state->format != 0)
+       if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
                return -EINVAL;
 
-       if (!nested_vmx_allowed(vcpu))
-               return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
-
-       if (kvm_state->vmx.vmxon_pa == -1ull) {
-               if (kvm_state->vmx.smm.flags)
+       if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
+               if (kvm_state->hdr.vmx.smm.flags)
                        return -EINVAL;
 
-               if (kvm_state->vmx.vmcs_pa != -1ull)
+               if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
                        return -EINVAL;
 
-               vmx_leave_nested(vcpu);
-               return 0;
-       }
+               if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
+                       return -EINVAL;
+       } else {
+               if (!nested_vmx_allowed(vcpu))
+                       return -EINVAL;
 
-       if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
-               return -EINVAL;
+               if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
+                       return -EINVAL;
+       }
 
-       if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+       if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
            (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
                return -EINVAL;
 
-       if (kvm_state->vmx.smm.flags &
+       if (kvm_state->hdr.vmx.smm.flags &
            ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
                return -EINVAL;
 
@@ -5366,21 +5373,25 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
         * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
         * must be zero.
         */
-       if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
+       if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags)
                return -EINVAL;
 
-       if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
-           !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
+       if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+           !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
                return -EINVAL;
 
        vmx_leave_nested(vcpu);
-       if (kvm_state->vmx.vmxon_pa == -1ull)
-               return 0;
+       if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
+               if (!nested_vmx_allowed(vcpu))
+                       return -EINVAL;
 
-       if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
                nested_enable_evmcs(vcpu, NULL);
+       }
+
+       if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
+               return 0;
 
-       vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
+       vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
        ret = enter_vmx_operation(vcpu);
        if (ret)
                return ret;
@@ -5389,12 +5400,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
        if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
                return 0;
 
-       if (kvm_state->vmx.vmcs_pa != -1ull) {
-               if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
-                   !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+       if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
+               if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
+                   !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
                        return -EINVAL;
 
-               set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+               set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
        } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
                /*
                 * Sync eVMCS upon entry as we may not have
@@ -5405,16 +5416,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                return -EINVAL;
        }
 
-       if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
+       if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
                vmx->nested.smm.vmxon = true;
                vmx->nested.vmxon = false;
 
-               if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
+               if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
                        vmx->nested.smm.guest_mode = true;
        }
 
        vmcs12 = get_vmcs12(vcpu);
-       if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
+       if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
                return -EFAULT;
 
        if (vmcs12->hdr.revision_id != VMCS12_REVISION)
@@ -5431,12 +5442,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            vmcs12->vmcs_link_pointer != -1ull) {
                struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
 
-               if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12))
+               if (kvm_state->size <
+                   sizeof(*kvm_state) +
+                   sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
                        goto error_guest_mode;
 
                if (copy_from_user(shadow_vmcs12,
-                                  user_kvm_nested_state->data + VMCS12_SIZE,
-                                  sizeof(*vmcs12))) {
+                                  user_vmx_nested_state->shadow_vmcs12,
+                                  sizeof(*shadow_vmcs12))) {
                        ret = -EFAULT;
                        goto error_guest_mode;
                }
index 3a742428ad174ef2ed65e6d2f78c7b58f56916cb..337718fc8a36f9359d52c1633bebc49eab56874d 100644 (file)
@@ -201,9 +201,10 @@ struct __packed vmcs12 {
 /*
  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
- * current implementation, 4K are reserved to avoid future complications.
+ * current implementation, 4K are reserved to avoid future complications and
+ * to preserve userspace ABI.
  */
-#define VMCS12_SIZE 0x1000
+#define VMCS12_SIZE            KVM_STATE_NESTED_VMX_VMCS_SIZE
 
 /*
  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
index 8dc0fc0b1382b6cde08e2774449b39b0607a1a64..296da58f30138f5f82a699b10c51e8d44c30839e 100644 (file)
@@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
        if (!pgtable_l5_enabled())
                return (p4d_t *)pgd;
 
-       p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
+       p4d = pgd_val(*pgd) & PTE_PFN_MASK;
        p4d += __START_KERNEL_map - phys_base;
        return (p4d_t *)p4d + p4d_index(addr);
 }
index dc3f058bdf9be8bad920148a8a5737387b16b939..dc6182eecefad2b1fcf4f4f0aa2b11958d00cf24 100644 (file)
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
 } kaslr_regions[] = {
        { &page_offset_base, 0 },
        { &vmalloc_base, 0 },
-       { &vmemmap_base, 1 },
+       { &vmemmap_base, 0 },
 };
 
 /* Get size in bytes used by the memory region */
@@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
        unsigned long rand, memory_tb;
        struct rnd_state rand_state;
        unsigned long remain_entropy;
+       unsigned long vmemmap_size;
 
        vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
        vaddr = vaddr_start;
@@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
        if (memory_tb < kaslr_regions[0].size_tb)
                kaslr_regions[0].size_tb = memory_tb;
 
+       /*
+        * Calculate the vmemmap region size in TBs, aligned to a TB
+        * boundary.
+        */
+       vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
+                       sizeof(struct page);
+       kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
+
        /* Calculate entropy available between regions */
        remain_entropy = vaddr_end - vaddr_start;
        for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
index 32bfab4e21ebe88fbaa5eafa2c25d0da31b5a1c9..eaaed5bfc4a4429a92e980252c2f993047eb90bb 100644 (file)
@@ -186,9 +186,7 @@ struct jit_context {
 #define BPF_MAX_INSN_SIZE      128
 #define BPF_INSN_SAFETY                64
 
-#define AUX_STACK_SPACE                40 /* Space for RBX, R13, R14, R15, tailcnt */
-
-#define PROLOGUE_SIZE          37
+#define PROLOGUE_SIZE          20
 
 /*
  * Emit x86-64 prologue code for BPF program and check its size.
@@ -199,44 +197,19 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
        u8 *prog = *pprog;
        int cnt = 0;
 
-       /* push rbp */
-       EMIT1(0x55);
-
-       /* mov rbp,rsp */
-       EMIT3(0x48, 0x89, 0xE5);
-
-       /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
-       EMIT3_off32(0x48, 0x81, 0xEC,
-                   round_up(stack_depth, 8) + AUX_STACK_SPACE);
-
-       /* sub rbp, AUX_STACK_SPACE */
-       EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
-
-       /* mov qword ptr [rbp+0],rbx */
-       EMIT4(0x48, 0x89, 0x5D, 0);
-       /* mov qword ptr [rbp+8],r13 */
-       EMIT4(0x4C, 0x89, 0x6D, 8);
-       /* mov qword ptr [rbp+16],r14 */
-       EMIT4(0x4C, 0x89, 0x75, 16);
-       /* mov qword ptr [rbp+24],r15 */
-       EMIT4(0x4C, 0x89, 0x7D, 24);
-
+       EMIT1(0x55);             /* push rbp */
+       EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+       /* sub rsp, rounded_stack_depth */
+       EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
+       EMIT1(0x53);             /* push rbx */
+       EMIT2(0x41, 0x55);       /* push r13 */
+       EMIT2(0x41, 0x56);       /* push r14 */
+       EMIT2(0x41, 0x57);       /* push r15 */
        if (!ebpf_from_cbpf) {
-               /*
-                * Clear the tail call counter (tail_call_cnt): for eBPF tail
-                * calls we need to reset the counter to 0. It's done in two
-                * instructions, resetting RAX register to 0, and moving it
-                * to the counter location.
-                */
-
-               /* xor eax, eax */
-               EMIT2(0x31, 0xc0);
-               /* mov qword ptr [rbp+32], rax */
-               EMIT4(0x48, 0x89, 0x45, 32);
-
+               /* zero init tail_call_cnt */
+               EMIT2(0x6a, 0x00);
                BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
        }
-
        *pprog = prog;
 }
 
@@ -281,13 +254,13 @@ static void emit_bpf_tail_call(u8 **pprog)
         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
         *      goto out;
         */
-       EMIT2_off32(0x8B, 0x85, 36);              /* mov eax, dword ptr [rbp + 36] */
+       EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
        EMIT2(X86_JA, OFFSET2);                   /* ja out */
        label2 = cnt;
        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
-       EMIT2_off32(0x89, 0x85, 36);              /* mov dword ptr [rbp + 36], eax */
+       EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
 
        /* prog = array->ptrs[index]; */
        EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
@@ -1036,19 +1009,14 @@ xadd:                   if (is_imm8(insn->off))
                        seen_exit = true;
                        /* Update cleanup_addr */
                        ctx->cleanup_addr = proglen;
-                       /* mov rbx, qword ptr [rbp+0] */
-                       EMIT4(0x48, 0x8B, 0x5D, 0);
-                       /* mov r13, qword ptr [rbp+8] */
-                       EMIT4(0x4C, 0x8B, 0x6D, 8);
-                       /* mov r14, qword ptr [rbp+16] */
-                       EMIT4(0x4C, 0x8B, 0x75, 16);
-                       /* mov r15, qword ptr [rbp+24] */
-                       EMIT4(0x4C, 0x8B, 0x7D, 24);
-
-                       /* add rbp, AUX_STACK_SPACE */
-                       EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
-                       EMIT1(0xC9); /* leave */
-                       EMIT1(0xC3); /* ret */
+                       if (!bpf_prog_was_classic(bpf_prog))
+                               EMIT1(0x5B); /* get rid of tail_call_cnt */
+                       EMIT2(0x41, 0x5F);   /* pop r15 */
+                       EMIT2(0x41, 0x5E);   /* pop r14 */
+                       EMIT2(0x41, 0x5D);   /* pop r13 */
+                       EMIT1(0x5B);         /* pop rbx */
+                       EMIT1(0xC9);         /* leave */
+                       EMIT1(0xC3);         /* ret */
                        break;
 
                default:
index 1b220101a9cb6d2cb64d38b3c93713ea03243981..2466dcc3ef1dc55bc16220b65ce9eb8cd39d83f4 100644 (file)
@@ -73,6 +73,7 @@ config BLK_DEV_INTEGRITY
 
 config BLK_DEV_ZONED
        bool "Zoned block device support"
+       select MQ_IOSCHED_DEADLINE
        ---help---
        Block layer zoned block device support. This option enables
        support for ZAC/ZBC host-managed and host-aware zoned block devices.
index 59f46904cb114debff28c96e6c11dedef87fabd7..b3796a40a61a29e1de205a70dd7d516c95892630 100644 (file)
@@ -1046,8 +1046,7 @@ struct blkcg_policy blkcg_policy_bfq = {
 struct cftype bfq_blkcg_legacy_files[] = {
        {
                .name = "bfq.weight",
-               .link_name = "weight",
-               .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
+               .flags = CFTYPE_NOT_ON_ROOT,
                .seq_show = bfq_io_show_weight,
                .write_u64 = bfq_io_set_weight_legacy,
        },
@@ -1167,8 +1166,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
 struct cftype bfq_blkg_files[] = {
        {
                .name = "bfq.weight",
-               .link_name = "weight",
-               .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
+               .flags = CFTYPE_NOT_ON_ROOT,
                .seq_show = bfq_io_show_weight,
                .write = bfq_io_set_weight,
        },
index 683cbb40f0516a307f5dea53474f084403498379..ce797d73bb436afafdc0434d708426813836ea95 100644 (file)
@@ -636,7 +636,7 @@ EXPORT_SYMBOL(bio_clone_fast);
 
 static inline bool page_is_mergeable(const struct bio_vec *bv,
                struct page *page, unsigned int len, unsigned int off,
-               bool same_page)
+               bool *same_page)
 {
        phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
                bv->bv_offset + bv->bv_len - 1;
@@ -647,15 +647,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
        if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
                return false;
 
-       if ((vec_end_addr & PAGE_MASK) != page_addr) {
-               if (same_page)
-                       return false;
-               if (pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
-                       return false;
-       }
-
-       WARN_ON_ONCE(same_page && (len + off) > PAGE_SIZE);
-
+       *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
+       if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
+               return false;
        return true;
 }
 
@@ -701,6 +695,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
                bool put_same_page)
 {
        struct bio_vec *bvec;
+       bool same_page = false;
 
        /*
         * cloned bio must not modify vec list
@@ -729,7 +724,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
                if (bvec_gap_to_prev(q, bvec, offset))
                        return 0;
 
-               if (page_is_mergeable(bvec, page, len, offset, false) &&
+               if (page_is_mergeable(bvec, page, len, offset, &same_page) &&
                    can_add_page_to_seg(q, bvec, page, len, offset)) {
                        bvec->bv_len += len;
                        goto done;
@@ -767,8 +762,7 @@ EXPORT_SYMBOL(bio_add_pc_page);
  * @page: start page to add
  * @len: length of the data to add
  * @off: offset of the data relative to @page
- * @same_page: if %true only merge if the new data is in the same physical
- *             page as the last segment of the bio.
+ * @same_page: return if the segment has been merged inside the same page
  *
  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
  * a useful optimisation for file systems with a block size smaller than the
@@ -779,7 +773,7 @@ EXPORT_SYMBOL(bio_add_pc_page);
  * Return %true on success or %false on failure.
  */
 bool __bio_try_merge_page(struct bio *bio, struct page *page,
-               unsigned int len, unsigned int off, bool same_page)
+               unsigned int len, unsigned int off, bool *same_page)
 {
        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
                return false;
@@ -837,7 +831,9 @@ EXPORT_SYMBOL_GPL(__bio_add_page);
 int bio_add_page(struct bio *bio, struct page *page,
                 unsigned int len, unsigned int offset)
 {
-       if (!__bio_try_merge_page(bio, page, len, offset, false)) {
+       bool same_page = false;
+
+       if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
                if (bio_full(bio))
                        return 0;
                __bio_add_page(bio, page, len, offset);
@@ -900,6 +896,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
        struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
        struct page **pages = (struct page **)bv;
+       bool same_page = false;
        ssize_t size, left;
        unsigned len, i;
        size_t offset;
@@ -920,8 +917,15 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
                struct page *page = pages[i];
 
                len = min_t(size_t, PAGE_SIZE - offset, left);
-               if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
-                       return -EINVAL;
+
+               if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+                       if (same_page)
+                               put_page(page);
+               } else {
+                       if (WARN_ON_ONCE(bio_full(bio)))
+                                return -EINVAL;
+                       __bio_add_page(bio, page, len, offset);
+               }
                offset = 0;
        }
 
index 6aea0ebc3a738b4cb504c7aa95c4c6ef8242189e..2489ddbb21dbd060313188da35212d5d56819d41 100644 (file)
@@ -821,38 +821,28 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
        {},
 };
 
-static bool debugfs_create_files(struct dentry *parent, void *data,
+static void debugfs_create_files(struct dentry *parent, void *data,
                                 const struct blk_mq_debugfs_attr *attr)
 {
        if (IS_ERR_OR_NULL(parent))
-               return false;
+               return;
 
        d_inode(parent)->i_private = data;
 
-       for (; attr->name; attr++) {
-               if (!debugfs_create_file(attr->name, attr->mode, parent,
-                                        (void *)attr, &blk_mq_debugfs_fops))
-                       return false;
-       }
-       return true;
+       for (; attr->name; attr++)
+               debugfs_create_file(attr->name, attr->mode, parent,
+                                   (void *)attr, &blk_mq_debugfs_fops);
 }
 
-int blk_mq_debugfs_register(struct request_queue *q)
+void blk_mq_debugfs_register(struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
-       if (!blk_debugfs_root)
-               return -ENOENT;
-
        q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
                                            blk_debugfs_root);
-       if (!q->debugfs_dir)
-               return -ENOMEM;
 
-       if (!debugfs_create_files(q->debugfs_dir, q,
-                                 blk_mq_debugfs_queue_attrs))
-               goto err;
+       debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
 
        /*
         * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
@@ -864,11 +854,10 @@ int blk_mq_debugfs_register(struct request_queue *q)
 
        /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
-                       goto err;
-               if (q->elevator && !hctx->sched_debugfs_dir &&
-                   blk_mq_debugfs_register_sched_hctx(q, hctx))
-                       goto err;
+               if (!hctx->debugfs_dir)
+                       blk_mq_debugfs_register_hctx(q, hctx);
+               if (q->elevator && !hctx->sched_debugfs_dir)
+                       blk_mq_debugfs_register_sched_hctx(q, hctx);
        }
 
        if (q->rq_qos) {
@@ -879,12 +868,6 @@ int blk_mq_debugfs_register(struct request_queue *q)
                        rqos = rqos->next;
                }
        }
-
-       return 0;
-
-err:
-       blk_mq_debugfs_unregister(q);
-       return -ENOMEM;
 }
 
 void blk_mq_debugfs_unregister(struct request_queue *q)
@@ -894,52 +877,32 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
        q->debugfs_dir = NULL;
 }
 
-static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
-                                      struct blk_mq_ctx *ctx)
+static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
+                                       struct blk_mq_ctx *ctx)
 {
        struct dentry *ctx_dir;
        char name[20];
 
        snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
        ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
-       if (!ctx_dir)
-               return -ENOMEM;
 
-       if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
-               return -ENOMEM;
-
-       return 0;
+       debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
 }
 
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
-                                struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+                                 struct blk_mq_hw_ctx *hctx)
 {
        struct blk_mq_ctx *ctx;
        char name[20];
        int i;
 
-       if (!q->debugfs_dir)
-               return -ENOENT;
-
        snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
        hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
-       if (!hctx->debugfs_dir)
-               return -ENOMEM;
-
-       if (!debugfs_create_files(hctx->debugfs_dir, hctx,
-                                 blk_mq_debugfs_hctx_attrs))
-               goto err;
-
-       hctx_for_each_ctx(hctx, ctx, i) {
-               if (blk_mq_debugfs_register_ctx(hctx, ctx))
-                       goto err;
-       }
 
-       return 0;
+       debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
 
-err:
-       blk_mq_debugfs_unregister_hctx(hctx);
-       return -ENOMEM;
+       hctx_for_each_ctx(hctx, ctx, i)
+               blk_mq_debugfs_register_ctx(hctx, ctx);
 }
 
 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -949,17 +912,13 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
        hctx->debugfs_dir = NULL;
 }
 
-int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+void blk_mq_debugfs_register_hctxs(struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               if (blk_mq_debugfs_register_hctx(q, hctx))
-                       return -ENOMEM;
-       }
-
-       return 0;
+       queue_for_each_hw_ctx(q, hctx, i)
+               blk_mq_debugfs_register_hctx(q, hctx);
 }
 
 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
@@ -971,29 +930,16 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
                blk_mq_debugfs_unregister_hctx(hctx);
 }
 
-int blk_mq_debugfs_register_sched(struct request_queue *q)
+void blk_mq_debugfs_register_sched(struct request_queue *q)
 {
        struct elevator_type *e = q->elevator->type;
 
-       if (!q->debugfs_dir)
-               return -ENOENT;
-
        if (!e->queue_debugfs_attrs)
-               return 0;
+               return;
 
        q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
-       if (!q->sched_debugfs_dir)
-               return -ENOMEM;
 
-       if (!debugfs_create_files(q->sched_debugfs_dir, q,
-                                 e->queue_debugfs_attrs))
-               goto err;
-
-       return 0;
-
-err:
-       blk_mq_debugfs_unregister_sched(q);
-       return -ENOMEM;
+       debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
 }
 
 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
@@ -1008,36 +954,22 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
        rqos->debugfs_dir = NULL;
 }
 
-int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 {
        struct request_queue *q = rqos->q;
        const char *dir_name = rq_qos_id_to_name(rqos->id);
 
-       if (!q->debugfs_dir)
-               return -ENOENT;
-
        if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
-               return 0;
+               return;
 
-       if (!q->rqos_debugfs_dir) {
+       if (!q->rqos_debugfs_dir)
                q->rqos_debugfs_dir = debugfs_create_dir("rqos",
                                                         q->debugfs_dir);
-               if (!q->rqos_debugfs_dir)
-                       return -ENOMEM;
-       }
 
        rqos->debugfs_dir = debugfs_create_dir(dir_name,
                                               rqos->q->rqos_debugfs_dir);
-       if (!rqos->debugfs_dir)
-               return -ENOMEM;
 
-       if (!debugfs_create_files(rqos->debugfs_dir, rqos,
-                                 rqos->ops->debugfs_attrs))
-               goto err;
-       return 0;
- err:
-       blk_mq_debugfs_unregister_rqos(rqos);
-       return -ENOMEM;
+       debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
 }
 
 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
@@ -1046,27 +978,18 @@ void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
        q->rqos_debugfs_dir = NULL;
 }
 
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
-                                      struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+                                       struct blk_mq_hw_ctx *hctx)
 {
        struct elevator_type *e = q->elevator->type;
 
-       if (!hctx->debugfs_dir)
-               return -ENOENT;
-
        if (!e->hctx_debugfs_attrs)
-               return 0;
+               return;
 
        hctx->sched_debugfs_dir = debugfs_create_dir("sched",
                                                     hctx->debugfs_dir);
-       if (!hctx->sched_debugfs_dir)
-               return -ENOMEM;
-
-       if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
-                                 e->hctx_debugfs_attrs))
-               return -ENOMEM;
-
-       return 0;
+       debugfs_create_files(hctx->sched_debugfs_dir, hctx,
+                            e->hctx_debugfs_attrs);
 }
 
 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
index 8c9012a578c1228c74f48dda5abfb88a5fb621a5..a68aa6041a10dc616ca2bca43a3bb4588f4baf8c 100644 (file)
@@ -18,74 +18,68 @@ struct blk_mq_debugfs_attr {
 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
 
-int blk_mq_debugfs_register(struct request_queue *q);
+void blk_mq_debugfs_register(struct request_queue *q);
 void blk_mq_debugfs_unregister(struct request_queue *q);
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
-                                struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+                                 struct blk_mq_hw_ctx *hctx);
 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
-int blk_mq_debugfs_register_hctxs(struct request_queue *q);
+void blk_mq_debugfs_register_hctxs(struct request_queue *q);
 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
 
-int blk_mq_debugfs_register_sched(struct request_queue *q);
+void blk_mq_debugfs_register_sched(struct request_queue *q);
 void blk_mq_debugfs_unregister_sched(struct request_queue *q);
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
                                       struct blk_mq_hw_ctx *hctx);
 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
 
-int blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
 #else
-static inline int blk_mq_debugfs_register(struct request_queue *q)
+static inline void blk_mq_debugfs_register(struct request_queue *q)
 {
-       return 0;
 }
 
 static inline void blk_mq_debugfs_unregister(struct request_queue *q)
 {
 }
 
-static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
-                                              struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
+                                               struct blk_mq_hw_ctx *hctx)
 {
-       return 0;
 }
 
 static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
 {
 }
 
-static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
 {
-       return 0;
 }
 
 static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
 {
 }
 
-static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
+static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
 {
-       return 0;
 }
 
 static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
 {
 }
 
-static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
-                                                    struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+                                                     struct blk_mq_hw_ctx *hctx)
 {
-       return 0;
 }
 
 static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
 {
 }
 
-static inline int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
 {
-       return 0;
 }
 
 static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
index 500cb04901cc12906a8108c0795c5e3119e23d2c..2766066a15dbf9c35705e6d2278bb16fdc53fa7d 100644 (file)
@@ -555,7 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
        int i;
 
        lockdep_assert_held(&q->sysfs_lock);
-       WARN_ON(!q->elevator);
 
        queue_for_each_hw_ctx(q, hctx, i) {
                if (hctx->sched_tags)
index aaa57e0c809dc91894bc89db7ea9065acdcb31f3..4a2dff3038656a4520922595779ac79d40a21960 100644 (file)
@@ -4460,9 +4460,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
-       /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
-       { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
-       { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+       /* drives which fail FPDMA_AA activation (some may freeze afterwards)
+          the ST disks also have LPM issues */
+       { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+                                               ATA_HORKAGE_NOLPM, },
+       { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+                                               ATA_HORKAGE_NOLPM, },
        { "VB0250EAVER",        "HPG7",         ATA_HORKAGE_BROKEN_FPDMA_AA },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
index e038e2b3b7ea4a7d53d2d7c189f9dbac269ea228..0bbb328bd17f8867ffafbc7a7f33de818ea2997f 100644 (file)
@@ -755,10 +755,32 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
 
        WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
                               &devres));
-
 }
 EXPORT_SYMBOL_GPL(devm_remove_action);
 
+/**
+ * devm_release_action() - release previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Releases and removes instance of @action previously added by
+ * devm_add_action().  Both action and data should match one of the
+ * existing entries.
+ */
+void devm_release_action(struct device *dev, void (*action)(void *), void *data)
+{
+       struct action_devres devres = {
+               .data = data,
+               .action = action,
+       };
+
+       WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
+                              &devres));
+
+}
+EXPORT_SYMBOL_GPL(devm_release_action);
+
 /*
  * Managed kmalloc/kfree
  */
index 5d1c261a2cfd588b04284c3e18548e3b812d918e..fca0c97ff1aa09925cbf50ae8e199c5d91963386 100644 (file)
@@ -74,10 +74,6 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
        struct nullb_device *dev = nullb->dev;
        unsigned int zno, nrz = 0;
 
-       if (!dev->zoned)
-               /* Not a zoned null device */
-               return -EOPNOTSUPP;
-
        zno = null_zone_no(dev, sector);
        if (zno < dev->nr_zones) {
                nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
index 4c7f51b1eda94727beb08b9663de0328db714a9b..4628e1a27a2b7133254f8e6629d5360cd1628319 100644 (file)
@@ -767,7 +767,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
        strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
        set_capacity(gendisk, priv->size >> 9);
 
-       dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
+       dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
                 gendisk->disk_name, get_capacity(gendisk) >> 11);
 
        device_add_disk(&dev->core, gendisk, NULL);
index 308475ed4b32ad97033cb7e9df26817c2e3a3986..b72741668c927680493369ae2678f12805dca2d4 100644 (file)
@@ -660,12 +660,6 @@ static int sysc_check_registers(struct sysc *ddata)
                nr_regs++;
        }
 
-       if (nr_regs < 1) {
-               dev_err(ddata->dev, "missing registers\n");
-
-               return -EINVAL;
-       }
-
        if (nr_matches > nr_regs) {
                dev_err(ddata->dev, "overlapping registers: (%i/%i)",
                        nr_regs, nr_matches);
@@ -691,12 +685,18 @@ static int sysc_ioremap(struct sysc *ddata)
 {
        int size;
 
-       size = max3(ddata->offsets[SYSC_REVISION],
-                   ddata->offsets[SYSC_SYSCONFIG],
-                   ddata->offsets[SYSC_SYSSTATUS]);
+       if (ddata->offsets[SYSC_REVISION] < 0 &&
+           ddata->offsets[SYSC_SYSCONFIG] < 0 &&
+           ddata->offsets[SYSC_SYSSTATUS] < 0) {
+               size = ddata->module_size;
+       } else {
+               size = max3(ddata->offsets[SYSC_REVISION],
+                           ddata->offsets[SYSC_SYSCONFIG],
+                           ddata->offsets[SYSC_SYSSTATUS]);
 
-       if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
-               return -EINVAL;
+               if ((size + sizeof(u32)) > ddata->module_size)
+                       return -EINVAL;
+       }
 
        ddata->module_va = devm_ioremap(ddata->dev,
                                        ddata->module_pa,
@@ -1128,7 +1128,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
                   0xffff00f0, 0),
        SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
-       SYSC_QUIRK("dcan", 0, 0, -1, -1, 0x00001401, 0xffffffff, 0),
        SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
        SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
        SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
index b2a951a798e2e9b9cc60b3c1f81ba6e966735bbf..5c69c9a9a6a4f58fdafdc669201a85960f9a0b04 100644 (file)
@@ -149,22 +149,22 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
        return val;
 }
 
-static u64 arch_counter_get_cntpct_stable(void)
+static notrace u64 arch_counter_get_cntpct_stable(void)
 {
        return __arch_counter_get_cntpct_stable();
 }
 
-static u64 arch_counter_get_cntpct(void)
+static notrace u64 arch_counter_get_cntpct(void)
 {
        return __arch_counter_get_cntpct();
 }
 
-static u64 arch_counter_get_cntvct_stable(void)
+static notrace u64 arch_counter_get_cntvct_stable(void)
 {
        return __arch_counter_get_cntvct_stable();
 }
 
-static u64 arch_counter_get_cntvct(void)
+static notrace u64 arch_counter_get_cntvct(void)
 {
        return __arch_counter_get_cntvct();
 }
index e40b55a7086f717b3ad268e36a5d2b2f91e58536..5394d9dbdfbc633b4a9662dc1fbc09a2bfd40aed 100644 (file)
@@ -896,7 +896,7 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
        return ret;
 }
 
-const static struct omap_dm_timer_ops dmtimer_ops = {
+static const struct omap_dm_timer_ops dmtimer_ops = {
        .request_by_node = omap_dm_timer_request_by_node,
        .request_specific = omap_dm_timer_request_specific,
        .request = omap_dm_timer_request,
index 996d68ff992a88bf63d9f6ca1a6a617a83b5be6d..8465d12fecba9d3b06c3116ebd7614697becf409 100644 (file)
@@ -27,9 +27,8 @@ static void dev_dax_percpu_release(struct percpu_ref *ref)
        complete(&dev_dax->cmp);
 }
 
-static void dev_dax_percpu_exit(void *data)
+static void dev_dax_percpu_exit(struct percpu_ref *ref)
 {
-       struct percpu_ref *ref = data;
        struct dev_dax *dev_dax = ref_to_dev_dax(ref);
 
        dev_dbg(&dev_dax->dev, "%s\n", __func__);
@@ -466,18 +465,12 @@ int dev_dax_probe(struct device *dev)
        if (rc)
                return rc;
 
-       rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
-       if (rc)
-               return rc;
-
        dev_dax->pgmap.ref = &dev_dax->ref;
        dev_dax->pgmap.kill = dev_dax_percpu_kill;
+       dev_dax->pgmap.cleanup = dev_dax_percpu_exit;
        addr = devm_memremap_pages(dev, &dev_dax->pgmap);
-       if (IS_ERR(addr)) {
-               devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
-               percpu_ref_exit(&dev_dax->ref);
+       if (IS_ERR(addr))
                return PTR_ERR(addr);
-       }
 
        inode = dax_inode(dax_dev);
        cdev = inode->i_cdev;
index 9026df9235428801b5284750a91a42ff958f7700..d40ccc3af9e26a25dd99a99739f9553f234f7529 100644 (file)
@@ -256,7 +256,7 @@ config TI_SCI_PROTOCOL
 
 config TRUSTED_FOUNDATIONS
        bool "Trusted Foundations secure monitor support"
-       depends on ARM
+       depends on ARM && CPU_V7
        help
          Some devices (including most early Tegra-based consumer devices on
          the market) are booted with the Trusted Foundations secure monitor
index 043833ad3c1a1e1a1d97ff8c5be3458bef1feb1a..687121f8c4d5eb9ce9728a8dd783176316879737 100644 (file)
@@ -100,6 +100,9 @@ int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
        struct imx_sc_rpc_msg *hdr = &msg.hdr;
        int ret;
 
+       if (!imx_sc_irq_ipc_handle)
+               return -EPROBE_DEFER;
+
        hdr->ver = IMX_SC_RPC_VERSION;
        hdr->svc = IMX_SC_RPC_SVC_IRQ;
        hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
index 2328d04201a98b79f7dc3e8f771f00aa3846d053..cfe827cefad81ef32d732617feb0dd9ed71a2ec4 100644 (file)
@@ -305,7 +305,8 @@ static const struct regmap_config pca953x_i2c_regmap = {
        .volatile_reg = pca953x_volatile_register,
 
        .cache_type = REGCACHE_RBTREE,
-       .max_register = 0x7f,
+       /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
+       .max_register = 0xff,
 };
 
 static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
index 039cfa2ec89d9d9ada32d9a97731ac6204e8de95..abeaab4bf1bc2f33c8be01e73688b2fd5f59ef3f 100644 (file)
@@ -2492,7 +2492,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
 
 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
 {
-       int r = -EINVAL;
+       int r;
 
        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
                r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
@@ -2502,7 +2502,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
                }
                *smu_version = adev->pm.fw_version;
        }
-       return r;
+       return 0;
 }
 
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
index eaef5edefc347e4a04b492548b72b0966236db99..24c6e5fcda8660316f5a289c38c95e32c75de6df 100644 (file)
@@ -172,6 +172,8 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
 {
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
+       if (block >= AMDGPU_RAS_BLOCK_COUNT)
+               return 0;
        return ras && (ras->supported & (1 << block));
 }
 
index ecf6f96df2ad067054389ed3145f6b388f7997b0..e6b07ece391062343c2a74275ccf96794ce34915 100644 (file)
@@ -594,7 +594,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r;
 
@@ -602,6 +602,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
        if (r)
                return r;
 
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
index c9edddf9f88a8c67ce9742632120287504838b68..be70e6e5f9df724a1129b9f541e119f2a7ad7b21 100644 (file)
@@ -170,13 +170,16 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r;
 
        r = amdgpu_ring_alloc(ring, 16);
        if (r)
                return r;
+
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
index 2191d3d0a2190a4cbe5ac93835b2ab753f393c61..fc4f0bb9a2e7c0982f2ecf130a1171504b1b4b8f 100644 (file)
@@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       uint32_t rptr = amdgpu_ring_get_rptr(ring);
+       uint32_t rptr;
        unsigned i;
        int r;
 
@@ -185,6 +185,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
        r = amdgpu_ring_alloc(ring, 16);
        if (r)
                return r;
+
+       rptr = amdgpu_ring_get_rptr(ring);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
        amdgpu_ring_commit(ring);
 
index 649cfd8b42007cff821c2a39cca6dba391ca481c..e804ac5dec02b8b6d1b0c53574c759aba006bc46 100644 (file)
@@ -1570,6 +1570,50 @@ static void connector_bad_edid(struct drm_connector *connector,
        }
 }
 
+/* Get override or firmware EDID */
+static struct edid *drm_get_override_edid(struct drm_connector *connector)
+{
+       struct edid *override = NULL;
+
+       if (connector->override_edid)
+               override = drm_edid_duplicate(connector->edid_blob_ptr->data);
+
+       if (!override)
+               override = drm_load_edid_firmware(connector);
+
+       return IS_ERR(override) ? NULL : override;
+}
+
+/**
+ * drm_add_override_edid_modes - add modes from override/firmware EDID
+ * @connector: connector we're probing
+ *
+ * Add modes from the override/firmware EDID, if available. Only to be used from
+ * drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
+ * failed during drm_get_edid() and caused the override/firmware EDID to be
+ * skipped.
+ *
+ * Return: The number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_override_edid_modes(struct drm_connector *connector)
+{
+       struct edid *override;
+       int num_modes = 0;
+
+       override = drm_get_override_edid(connector);
+       if (override) {
+               drm_connector_update_edid_property(connector, override);
+               num_modes = drm_add_edid_modes(connector, override);
+               kfree(override);
+
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
+                             connector->base.id, connector->name, num_modes);
+       }
+
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_override_edid_modes);
+
 /**
  * drm_do_get_edid - get EDID data using a custom EDID block read function
  * @connector: connector we're probing
@@ -1597,15 +1641,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
 {
        int i, j = 0, valid_extensions = 0;
        u8 *edid, *new;
-       struct edid *override = NULL;
-
-       if (connector->override_edid)
-               override = drm_edid_duplicate(connector->edid_blob_ptr->data);
-
-       if (!override)
-               override = drm_load_edid_firmware(connector);
+       struct edid *override;
 
-       if (!IS_ERR_OR_NULL(override))
+       override = drm_get_override_edid(connector);
+       if (override)
                return override;
 
        if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
index 1ee208c2c85eb2fcfe5e163e841a978363826471..472ea5d81f825b25de7c5bf3702a8333921b4771 100644 (file)
@@ -255,7 +255,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
        if (obj->import_attach)
                shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
        else
-               shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
+               shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+                                   VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 
        if (!shmem->vaddr) {
                DRM_DEBUG_KMS("Failed to vmap pages\n");
index 521aff99b08a6aff22f463635c0d4395d23bacf6..d8a0bcd02f34c4314c669455e56634845a60ae56 100644 (file)
@@ -42,6 +42,14 @@ static const struct drm_dmi_panel_orientation_data asus_t100ha = {
        .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
 };
 
+static const struct drm_dmi_panel_orientation_data gpd_micropc = {
+       .width = 720,
+       .height = 1280,
+       .bios_dates = (const char * const []){ "04/26/2019",
+               NULL },
+       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
 static const struct drm_dmi_panel_orientation_data gpd_pocket = {
        .width = 1200,
        .height = 1920,
@@ -50,6 +58,14 @@ static const struct drm_dmi_panel_orientation_data gpd_pocket = {
        .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+static const struct drm_dmi_panel_orientation_data gpd_pocket2 = {
+       .width = 1200,
+       .height = 1920,
+       .bios_dates = (const char * const []){ "06/28/2018", "08/28/2018",
+               "12/07/2018", NULL },
+       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
 static const struct drm_dmi_panel_orientation_data gpd_win = {
        .width = 720,
        .height = 1280,
@@ -99,6 +115,14 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
                },
                .driver_data = (void *)&asus_t100ha,
+       }, {    /* GPD MicroPC (generic strings, also match on bios date) */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+                 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+                 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+               },
+               .driver_data = (void *)&gpd_micropc,
        }, {    /*
                 * GPD Pocket, note that the the DMI data is less generic then
                 * it seems, devices with a board-vendor of "AMI Corporation"
@@ -112,6 +136,14 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
                },
                .driver_data = (void *)&gpd_pocket,
+       }, {    /* GPD Pocket 2 (generic strings, also match on bios date) */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+                 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+                 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+               },
+               .driver_data = (void *)&gpd_pocket2,
        }, {    /* GPD Win (same note on DMI match as GPD Pocket) */
                .matches = {
                  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
index 6fd08e04b3231ea13169fe7c97d891f821d97e77..dd427c7ff96771f8170fe0572038b21dfa4f2b69 100644 (file)
@@ -479,6 +479,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 
        count = (*connector_funcs->get_modes)(connector);
 
+       /*
+        * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+        * override/firmware EDID.
+        */
+       if (count == 0 && connector->status == connector_status_connected)
+               count = drm_add_override_edid_modes(connector);
+
        if (count == 0 && connector->status == connector_status_connected)
                count = drm_add_modes_noedid(connector, 1024, 768);
        count += drm_helper_probe_add_cmdline_mode(connector);
index 39a4804091d70d61a5fd63e7dfcb60d3edde4060..dc4ce694c06a8cd2b75e004f10034f969df569bb 100644 (file)
@@ -3005,6 +3005,7 @@ static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
 static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
 {
        return gen8_is_valid_mux_addr(dev_priv, addr) ||
+               addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
                (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
                 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
 }
index 2aa69d347ec4070de9fd27aaf1e138d8a305dcd5..13d6bd4e17b208deda30c39114c9daba9223104d 100644 (file)
@@ -1062,6 +1062,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define NOA_DATA           _MMIO(0x986C)
 #define NOA_WRITE          _MMIO(0x9888)
+#define GEN10_NOA_WRITE_HIGH _MMIO(0x9884)
 
 #define _GEN7_PIPEA_DE_LOAD_SL 0x70068
 #define _GEN7_PIPEB_DE_LOAD_SL 0x71068
index f43c2a2563a5a54b0abf7a8510798049526c0a06..96618af470887d18ba93be9cfd848a212331e1b4 100644 (file)
@@ -303,10 +303,17 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
        u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
        u32 i;
        u32 *dmc_payload;
+       size_t fsize;
 
        if (!fw)
                return NULL;
 
+       fsize = sizeof(struct intel_css_header) +
+               sizeof(struct intel_package_header) +
+               sizeof(struct intel_dmc_header);
+       if (fsize > fw->size)
+               goto error_truncated;
+
        /* Extract CSS Header information*/
        css_header = (struct intel_css_header *)fw->data;
        if (sizeof(struct intel_css_header) !=
@@ -366,6 +373,9 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
        /* Convert dmc_offset into number of bytes. By default it is in dwords*/
        dmc_offset *= 4;
        readcount += dmc_offset;
+       fsize += dmc_offset;
+       if (fsize > fw->size)
+               goto error_truncated;
 
        /* Extract dmc_header information. */
        dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
@@ -397,6 +407,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
        nbytes = dmc_header->fw_size * 4;
+       fsize += nbytes;
+       if (fsize > fw->size)
+               goto error_truncated;
+
        if (nbytes > csr->max_fw_size) {
                DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
                return NULL;
@@ -410,6 +424,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
        }
 
        return memcpy(dmc_payload, &fw->data[readcount], nbytes);
+
+error_truncated:
+       DRM_ERROR("Truncated DMC firmware, rejecting.\n");
+       return NULL;
 }
 
 static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
index 5098228f1302d5bcd4818a80727b417a5423b747..b69440cf41ea3babeae2c4214e1c4ae742cb51ff 100644 (file)
@@ -2432,10 +2432,14 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
  * main surface.
  */
 static const struct drm_format_info ccs_formats[] = {
-       { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
-       { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
-       { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
-       { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+       { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+       { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+       { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+       { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+         .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
 };
 
 static const struct drm_format_info *
@@ -11942,7 +11946,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
        return 0;
 }
 
-static bool intel_fuzzy_clock_check(int clock1, int clock2)
+bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
        int diff;
 
index a38b9cff5cd0e9960f44e23ffdf88875386aaa48..e85cd377a6527aaa51f43e7f44cfaad24c06911a 100644 (file)
@@ -1742,6 +1742,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
                     const struct dpll *dpll);
 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
 int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+bool intel_fuzzy_clock_check(int clock1, int clock2);
 
 /* modesetting asserts */
 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
index 3074448446bc889f63b3fba62613cc995bc7df48..4b8e48db18430188ae9da802ee2894148e389f6e 100644 (file)
@@ -853,6 +853,17 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
                if (mipi_config->target_burst_mode_freq) {
                        u32 bitrate = intel_dsi_bitrate(intel_dsi);
 
+                       /*
+                        * Sometimes the VBT contains a slightly lower clock,
+                        * then the bitrate we have calculated, in this case
+                        * just replace it with the calculated bitrate.
+                        */
+                       if (mipi_config->target_burst_mode_freq < bitrate &&
+                           intel_fuzzy_clock_check(
+                                       mipi_config->target_burst_mode_freq,
+                                       bitrate))
+                               mipi_config->target_burst_mode_freq = bitrate;
+
                        if (mipi_config->target_burst_mode_freq < bitrate) {
                                DRM_ERROR("Burst mode freq is less than computed\n");
                                return false;
index 0e3d91d9ef1367bc7755c2a6c65941a206d50a44..9ecfba0a54a1c3bc0600e2c968e125f05ff468a2 100644 (file)
@@ -916,6 +916,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
        return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
 }
 
+static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
+                                      u8 audio_state)
+{
+       return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
+                                   &audio_state, 1);
+}
+
 #if 0
 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
 {
@@ -1487,11 +1494,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
        else
                sdvox |= SDVO_PIPE_SEL(crtc->pipe);
 
-       if (crtc_state->has_audio) {
-               WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
-               sdvox |= SDVO_AUDIO_ENABLE;
-       }
-
        if (INTEL_GEN(dev_priv) >= 4) {
                /* done in crtc_mode_set as the dpll_md reg must be written early */
        } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
@@ -1635,8 +1637,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
        if (sdvox & HDMI_COLOR_RANGE_16_235)
                pipe_config->limited_color_range = true;
 
-       if (sdvox & SDVO_AUDIO_ENABLE)
-               pipe_config->has_audio = true;
+       if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
+                                &val, 1)) {
+               u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
+
+               if ((val & mask) == mask)
+                       pipe_config->has_audio = true;
+       }
 
        if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
                                 &val, 1)) {
@@ -1647,6 +1654,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
        intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
 }
 
+static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+{
+       intel_sdvo_set_audio_state(intel_sdvo, 0);
+}
+
+static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+                                   const struct intel_crtc_state *crtc_state,
+                                   const struct drm_connector_state *conn_state)
+{
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->base.adjusted_mode;
+       struct drm_connector *connector = conn_state->connector;
+       u8 *eld = connector->eld;
+
+       eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+       intel_sdvo_set_audio_state(intel_sdvo, 0);
+
+       intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+                                  SDVO_HBUF_TX_DISABLED,
+                                  eld, drm_eld_size(eld));
+
+       intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
+                                  SDVO_AUDIO_PRESENCE_DETECT);
+}
+
 static void intel_disable_sdvo(struct intel_encoder *encoder,
                               const struct intel_crtc_state *old_crtc_state,
                               const struct drm_connector_state *conn_state)
@@ -1656,6 +1689,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        u32 temp;
 
+       if (old_crtc_state->has_audio)
+               intel_sdvo_disable_audio(intel_sdvo);
+
        intel_sdvo_set_active_outputs(intel_sdvo, 0);
        if (0)
                intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1741,6 +1777,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
                intel_sdvo_set_encoder_power_state(intel_sdvo,
                                                   DRM_MODE_DPMS_ON);
        intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+
+       if (pipe_config->has_audio)
+               intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
 }
 
 static enum drm_mode_status
@@ -2603,7 +2642,6 @@ static bool
 intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 {
        struct drm_encoder *encoder = &intel_sdvo->base.base;
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct drm_connector *connector;
        struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
        struct intel_connector *intel_connector;
@@ -2640,9 +2678,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
        encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
        connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
-       /* gen3 doesn't do the hdmi bits in the SDVO register */
-       if (INTEL_GEN(dev_priv) >= 4 &&
-           intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+       if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
                connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
                intel_sdvo_connector->is_hdmi = true;
        }
index db0ed499268ae216cb0ee8185467078907d954a6..e9ba3b047f932123f1a41342b9abf528713c4758 100644 (file)
@@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
 #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
 #define SDVO_CMD_SET_AUDIO_STAT                0x91
 #define SDVO_CMD_GET_AUDIO_STAT                0x92
+  #define SDVO_AUDIO_ELD_VALID         (1 << 0)
+  #define SDVO_AUDIO_PRESENCE_DETECT   (1 << 1)
+  #define SDVO_AUDIO_CP_READY          (1 << 2)
 #define SDVO_CMD_SET_HBUF_INDEX                0x93
   #define SDVO_HBUF_INDEX_ELD          0
   #define SDVO_HBUF_INDEX_AVI_IF       1
index f426dfdfb41893ec6e2db4b9e2c67606e8ae66a9..a9007210dda1e7d02ff87bd5d26398c2efb9ff5a 100644 (file)
@@ -90,10 +90,6 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       int i;
-
-       for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
-               clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
 
        mtk_disp_mutex_put(mtk_crtc->mutex);
 
@@ -186,7 +182,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
 
        DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
-               ret = clk_enable(mtk_crtc->ddp_comp[i]->clk);
+               ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
                if (ret) {
                        DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
                        goto err;
@@ -196,7 +192,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
        return 0;
 err:
        while (--i >= 0)
-               clk_disable(mtk_crtc->ddp_comp[i]->clk);
+               clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
        return ret;
 }
 
@@ -206,7 +202,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
 
        DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
-               clk_disable(mtk_crtc->ddp_comp[i]->clk);
+               clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
 }
 
 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
@@ -577,15 +573,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                if (!comp) {
                        dev_err(dev, "Component %pOF not initialized\n", node);
                        ret = -ENODEV;
-                       goto unprepare;
-               }
-
-               ret = clk_prepare(comp->clk);
-               if (ret) {
-                       dev_err(dev,
-                               "Failed to prepare clock for component %pOF: %d\n",
-                               node, ret);
-                       goto unprepare;
+                       return ret;
                }
 
                mtk_crtc->ddp_comp[i] = comp;
@@ -603,23 +591,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
                                     BIT(pipe), type);
                if (ret)
-                       goto unprepare;
+                       return ret;
        }
 
        ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
                                mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
                                NULL, pipe);
        if (ret < 0)
-               goto unprepare;
+               return ret;
        drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
        drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
        priv->num_pipes++;
 
        return 0;
-
-unprepare:
-       while (--i >= 0)
-               clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
-
-       return ret;
 }
index 7fcb7407096fb89ac7ccc4947bccc0808a1a327b..95fdbd0fbcace01f26e3c1c393210ba09e60a952 100644 (file)
@@ -303,6 +303,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
 static void mtk_drm_kms_deinit(struct drm_device *drm)
 {
        drm_kms_helper_poll_fini(drm);
+       drm_atomic_helper_shutdown(drm);
 
        component_unbind_all(drm->dev, drm);
        drm_mode_config_cleanup(drm);
@@ -389,7 +390,9 @@ static void mtk_drm_unbind(struct device *dev)
        struct mtk_drm_private *private = dev_get_drvdata(dev);
 
        drm_dev_unregister(private->drm);
+       mtk_drm_kms_deinit(private->drm);
        drm_dev_put(private->drm);
+       private->num_pipes = 0;
        private->drm = NULL;
 }
 
@@ -560,13 +563,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
 static int mtk_drm_remove(struct platform_device *pdev)
 {
        struct mtk_drm_private *private = platform_get_drvdata(pdev);
-       struct drm_device *drm = private->drm;
        int i;
 
-       drm_dev_unregister(drm);
-       mtk_drm_kms_deinit(drm);
-       drm_dev_put(drm);
-
        component_master_del(&pdev->dev, &mtk_drm_ops);
        pm_runtime_disable(&pdev->dev);
        of_node_put(private->mutex_node);
index 3eefb22206c7e88a41c7981968009597b7f6900e..0d69698f817322ccf5b2c68e53bced28db98af6e 100644 (file)
@@ -136,7 +136,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
         * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
         */
        vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_pgoff = 0;
 
        ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
                             mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
@@ -168,6 +167,12 @@ int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
        obj = vma->vm_private_data;
 
+       /*
+        * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+        * whole buffer from the start.
+        */
+       vma->vm_pgoff = 0;
+
        return mtk_drm_gem_object_mmap(obj, vma);
 }
 
index 4a0b9150a7bb7e0de29dcb0c2955e42caa00cef0..b91c4616644a9a793ed6ff566960bb1b777d132c 100644 (file)
@@ -622,6 +622,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
        if (--dsi->refcount != 0)
                return;
 
+       /*
+        * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+        * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+        * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+        * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+        * after dsi is fully set.
+        */
+       mtk_dsi_stop(dsi);
+
        if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
                if (dsi->panel) {
                        if (drm_panel_unprepare(dsi->panel)) {
@@ -688,7 +697,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
                }
        }
 
-       mtk_dsi_stop(dsi);
        mtk_dsi_poweroff(dsi);
 
        dsi->enabled = false;
@@ -836,6 +844,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
        /* Skip connector cleanup if creation was delegated to the bridge */
        if (dsi->conn.dev)
                drm_connector_cleanup(&dsi->conn);
+       if (dsi->panel)
+               drm_panel_detach(dsi->panel);
 }
 
 static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
index 6857151441568b4935618b6e0f6a7bd23eade9e3..aa8ea107524e65d9346777017280afd22d28e0f7 100644 (file)
@@ -107,8 +107,6 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
                        priv->io_base + _REG(VPP_OUT_H_V_SIZE));
 
        drm_crtc_vblank_on(crtc);
-
-       priv->viu.osd1_enabled = true;
 }
 
 static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -137,8 +135,6 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
                            priv->io_base + _REG(VPP_MISC));
 
        drm_crtc_vblank_on(crtc);
-
-       priv->viu.osd1_enabled = true;
 }
 
 static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -256,6 +252,8 @@ static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
        writel_relaxed(priv->viu.osb_blend1_size,
                       priv->io_base +
                       _REG(VIU_OSD_BLEND_BLEND1_SIZE));
+       writel_bits_relaxed(3 << 8, 3 << 8,
+                           priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
 }
 
 static void meson_crtc_enable_vd1(struct meson_drm *priv)
index 22490047932ec11a7f199cf653100a11b21d56b1..d90427b93a510e1958b0f06cff6bd8d9e4c92024 100644 (file)
@@ -305,6 +305,8 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
                meson_plane->enabled = true;
        }
 
+       priv->viu.osd1_enabled = true;
+
        spin_unlock_irqrestore(&priv->drm->event_lock, flags);
 }
 
@@ -316,14 +318,14 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
 
        /* Disable OSD1 */
        if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
-               writel_bits_relaxed(BIT(0) | BIT(21), 0,
-                       priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+               writel_bits_relaxed(3 << 8, 0,
+                                   priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
        else
                writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
                                    priv->io_base + _REG(VPP_MISC));
 
        meson_plane->enabled = false;
-
+       priv->viu.osd1_enabled = false;
 }
 
 static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
index 58b4af5fbb6d16280b74bd11723b3e5a6bd52d05..26732f038d19f7838ee24dabd21b00d9dec7068e 100644 (file)
@@ -503,8 +503,17 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
 
                /* G12A HDMI PLL Needs specific parameters for 5.4GHz */
                if (m >= 0xf7) {
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+                       if (frac < 0x10000) {
+                               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
+                                                       0x6a685c00);
+                               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
+                                                       0x11551293);
+                       } else {
+                               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
+                                                       0xea68dc00);
+                               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
+                                                       0x65771290);
+                       }
                        regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
                        regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
                } else {
index 462c7cb3e1bdbf7513e40319da5a43b1d0e30c38..4b2b3024d37146e2e39f88051a57624a556ea098 100644 (file)
@@ -405,8 +405,7 @@ void meson_viu_init(struct meson_drm *priv)
                                0 << 16 |
                                1,
                                priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
-               writel_relaxed(3 << 8 |
-                               1 << 20,
+               writel_relaxed(1 << 20,
                                priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
                writel_relaxed(1 << 20,
                                priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
index 81963e964b0fae14a9b1e7eec4b5bb902913e862..86cdc0ce79e6598010cb63c84440d288dbadf178 100644 (file)
@@ -10,6 +10,7 @@ config DRM_PANFROST
        select IOMMU_IO_PGTABLE_LPAE
        select DRM_GEM_SHMEM_HELPER
        select PM_DEVFREQ
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
        help
          DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
          Bifrost (G3x, G5x, G7x) GPUs.
index 29fcffdf2d571dc93d39a3384ec67181394e75fd..db798532b0b6b0bc4d20011a9ab403bb0a333176 100644 (file)
@@ -140,7 +140,9 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
                return 0;
 
        ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
-       if (ret)
+       if (ret == -ENODEV) /* Optional, continue without devfreq */
+               return 0;
+       else if (ret)
                return ret;
 
        panfrost_devfreq_reset(pfdev);
@@ -170,6 +172,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
 {
        int i;
 
+       if (!pfdev->devfreq.devfreq)
+               return;
+
        panfrost_devfreq_reset(pfdev);
        for (i = 0; i < NUM_JOB_SLOTS; i++)
                pfdev->devfreq.slot[i].busy = false;
@@ -179,6 +184,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
 
 void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
 {
+       if (!pfdev->devfreq.devfreq)
+               return;
+
        devfreq_suspend_device(pfdev->devfreq.devfreq);
 }
 
@@ -188,6 +196,9 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
        ktime_t now;
        ktime_t last;
 
+       if (!pfdev->devfreq.devfreq)
+               return;
+
        now = ktime_get();
        last = pfdev->devfreq.slot[slot].time_last_update;
 
index fd806f590bf8af4db2d62cbd689da398e3a21040..98bf694626f71a7e50318ca5b2f3aaf8f2eb6d4f 100644 (file)
@@ -35,8 +35,10 @@ static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
 {
        struct a4tech_sc *a4 = hid_get_drvdata(hdev);
 
-       if (usage->type == EV_REL && usage->code == REL_WHEEL)
+       if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
                set_bit(REL_HWHEEL, *bit);
+               set_bit(REL_HWHEEL_HI_RES, *bit);
+       }
 
        if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007)
                return -1;
@@ -57,7 +59,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
        input = field->hidinput->input;
 
        if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) {
-               if (usage->type == EV_REL && usage->code == REL_WHEEL) {
+               if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
                        a4->delayed_value = value;
                        return 1;
                }
@@ -65,6 +67,8 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
                if (usage->hid == 0x000100b8) {
                        input_event(input, EV_REL, value ? REL_HWHEEL :
                                        REL_WHEEL, a4->delayed_value);
+                       input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
+                                       REL_WHEEL_HI_RES, a4->delayed_value * 120);
                        return 1;
                }
        }
@@ -74,8 +78,9 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
                return 1;
        }
 
-       if (usage->code == REL_WHEEL && a4->hw_wheel) {
+       if (usage->code == REL_WHEEL_HI_RES && a4->hw_wheel) {
                input_event(input, usage->type, REL_HWHEEL, value);
+               input_event(input, usage->type, REL_HWHEEL_HI_RES, value * 120);
                return 1;
        }
 
index 08d310723e96c363f9f01911488864d5db66e521..210b81a56e1a111507737ac96cf5b510e416b3b6 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 #include <linux/semaphore.h>
-#include <linux/async.h>
 
 #include <linux/hid.h>
 #include <linux/hiddev.h>
@@ -1311,10 +1310,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
                        unsigned offset, unsigned n)
 {
-       if (n > 256) {
-               hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
+       if (n > 32) {
+               hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
                         n, current->comm);
-               n = 256;
+               n = 32;
        }
 
        return __extract(report, offset, n);
@@ -2362,15 +2361,6 @@ int hid_add_device(struct hid_device *hdev)
        dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
                     hdev->vendor, hdev->product, atomic_inc_return(&id));
 
-       /*
-        * Try loading the module for the device before the add, so that we do
-        * not first have hid-generic binding only to have it replaced
-        * immediately afterwards with a specialized driver.
-        */
-       if (!current_is_async())
-               request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
-                              hdev->group, hdev->vendor, hdev->product);
-
        hid_debug_register(hdev, dev_name(&hdev->dev));
        ret = device_add(&hdev->dev);
        if (!ret)
index 8f806f46c278f5c2dab3685587cb90fee50929ca..7795831d37c21f72b107bd7d135c5f04f8c2d107 100644 (file)
@@ -606,5 +606,7 @@ static void __exit mousevsc_exit(void)
 }
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic HID Driver");
+
 module_init(mousevsc_init);
 module_exit(mousevsc_exit);
index 84e0c78d73cd3cc02702d19f665b3667832173d9..eac0c54c59701889922adf858fa37295ab0060ad 100644 (file)
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
 
 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
index 904a4b0d90f509970045d416c5cb136355e996a9..e564bff865159af9c7bf4631f52cef9a209d536d 100644 (file)
@@ -113,6 +113,7 @@ enum recvr_type {
        recvr_type_dj,
        recvr_type_hidpp,
        recvr_type_gaming_hidpp,
+       recvr_type_mouse_only,
        recvr_type_27mhz,
        recvr_type_bluetooth,
 };
@@ -864,9 +865,12 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
        schedule_work(&djrcv_dev->work);
 }
 
-static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
+static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
+                                           struct hidpp_event *hidpp_report,
                                            struct dj_workitem *workitem)
 {
+       struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
        workitem->type = WORKITEM_TYPE_PAIRED;
        workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
                                HIDPP_DEVICE_TYPE_MASK;
@@ -880,6 +884,8 @@ static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
                break;
        case REPORT_TYPE_MOUSE:
                workitem->reports_supported |= STD_MOUSE | HIDPP;
+               if (djrcv_dev->type == recvr_type_mouse_only)
+                       workitem->reports_supported |= MULTIMEDIA;
                break;
        }
 }
@@ -923,7 +929,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
        case 0x01:
                device_type = "Bluetooth";
                /* Bluetooth connect packet contents is the same as (e)QUAD */
-               logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+               logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
                                                HIDPP_MANUFACTURER_MASK)) {
                        hid_info(hdev, "Non Logitech device connected on slot %d\n",
@@ -937,18 +943,18 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
                break;
        case 0x03:
                device_type = "QUAD or eQUAD";
-               logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+               logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                break;
        case 0x04:
                device_type = "eQUAD step 4 DJ";
-               logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+               logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                break;
        case 0x05:
                device_type = "DFU Lite";
                break;
        case 0x06:
                device_type = "eQUAD step 4 Lite";
-               logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+               logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                break;
        case 0x07:
                device_type = "eQUAD step 4 Gaming";
@@ -958,11 +964,11 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
                break;
        case 0x0a:
                device_type = "eQUAD nano Lite";
-               logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+               logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                break;
        case 0x0c:
                device_type = "eQUAD Lightspeed";
-               logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+               logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
                workitem.reports_supported |= STD_KEYBOARD;
                break;
        }
@@ -1313,7 +1319,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
        if (djdev->reports_supported & STD_MOUSE) {
                dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
                        __func__, djdev->reports_supported);
-               if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
+               if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp ||
+                   djdev->dj_receiver_dev->type == recvr_type_mouse_only)
                        rdcat(rdesc, &rsize, mse_high_res_descriptor,
                              sizeof(mse_high_res_descriptor));
                else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
@@ -1556,15 +1563,19 @@ static int logi_dj_raw_event(struct hid_device *hdev,
                        data[0] = data[1];
                        data[1] = 0;
                }
-               /* The 27 MHz mouse-only receiver sends unnumbered mouse data */
+               /*
+                * Mouse-only receivers send unnumbered mouse data. The 27 MHz
+                * receiver uses 6 byte packets, the nano receiver 8 bytes.
+                */
                if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
-                   size == 6) {
-                       u8 mouse_report[7];
+                   size <= 8) {
+                       u8 mouse_report[9];
 
                        /* Prepend report id */
                        mouse_report[0] = REPORT_TYPE_MOUSE;
-                       memcpy(mouse_report + 1, data, 6);
-                       logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
+                       memcpy(mouse_report + 1, data, size);
+                       logi_dj_recv_forward_input_report(hdev, mouse_report,
+                                                         size + 1);
                }
 
                return false;
@@ -1635,6 +1646,7 @@ static int logi_dj_probe(struct hid_device *hdev,
        case recvr_type_dj:             no_dj_interfaces = 3; break;
        case recvr_type_hidpp:          no_dj_interfaces = 2; break;
        case recvr_type_gaming_hidpp:   no_dj_interfaces = 3; break;
+       case recvr_type_mouse_only:     no_dj_interfaces = 2; break;
        case recvr_type_27mhz:          no_dj_interfaces = 2; break;
        case recvr_type_bluetooth:      no_dj_interfaces = 2; break;
        }
@@ -1808,10 +1820,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
        {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
         .driver_data = recvr_type_dj},
-       { /* Logitech Nano (non DJ) receiver */
+       { /* Logitech Nano mouse only receiver */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                         USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
-        .driver_data = recvr_type_hidpp},
+        .driver_data = recvr_type_mouse_only},
        { /* Logitech Nano (non DJ) receiver */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                         USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
@@ -1836,6 +1848,14 @@ static const struct hid_device_id logi_dj_receivers[] = {
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                0xc70a),
         .driver_data = recvr_type_bluetooth},
+       { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. */
+         HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               0xc71b),
+        .driver_data = recvr_type_bluetooth},
+       { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. */
+         HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               0xc71c),
+        .driver_data = recvr_type_bluetooth},
        {}
 };
 
index 3d2e6d254e7d7a806bc215233b7af29b61d67d2d..cf05816a601f5252e7c1308f13334e3d2a554572 100644 (file)
@@ -3728,6 +3728,9 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
          LDJ_DEVICE(0xb305),
          .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+       { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
+         LDJ_DEVICE(0xb30b),
+         .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
 
        { LDJ_DEVICE(HID_ANY_ID) },
 
@@ -3740,6 +3743,9 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* Keyboard MX3200 (Y-RAV80) */
          L27MHZ_DEVICE(0x005c),
          .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+       { /* S510 Media Remote */
+         L27MHZ_DEVICE(0x00fe),
+         .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
 
        { L27MHZ_DEVICE(HID_ANY_ID) },
 
@@ -3756,6 +3762,9 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* MX5000 keyboard over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
          .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+       { /* MX5500 keyboard over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+         .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
        {}
 };
 
index c34e972f629631bf68b5c375e4d1fb7142efb3dc..5df5dd56ecc8b67f4e43f09f7d70ead8a0dc49fb 100644 (file)
@@ -637,6 +637,13 @@ static void mt_store_field(struct hid_device *hdev,
        if (*target != DEFAULT_TRUE &&
            *target != DEFAULT_FALSE &&
            *target != DEFAULT_ZERO) {
+               if (usage->contactid == DEFAULT_ZERO ||
+                   usage->x == DEFAULT_ZERO ||
+                   usage->y == DEFAULT_ZERO) {
+                       hid_dbg(hdev,
+                               "ignoring duplicate usage on incomplete");
+                       return;
+               }
                usage = mt_allocate_usage(hdev, application);
                if (!usage)
                        return;
index 853842448f70a679d7cb9c5c0d011f8ba3a98229..7c6abd7e09797c8974dfb4f0db3551169a4d478c 100644 (file)
@@ -35,6 +35,7 @@
 /* device flags */
 #define RMI_DEVICE                     BIT(0)
 #define RMI_DEVICE_HAS_PHYS_BUTTONS    BIT(1)
+#define RMI_DEVICE_OUTPUT_SET_REPORT   BIT(2)
 
 /*
  * retrieve the ctrl registers
@@ -163,9 +164,19 @@ static int rmi_set_mode(struct hid_device *hdev, u8 mode)
 
 static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
 {
+       struct rmi_data *data = hid_get_drvdata(hdev);
        int ret;
 
-       ret = hid_hw_output_report(hdev, (void *)report, len);
+       if (data->device_flags & RMI_DEVICE_OUTPUT_SET_REPORT) {
+               /*
+                * Talk to device by using SET_REPORT requests instead.
+                */
+               ret = hid_hw_raw_request(hdev, report[0], report,
+                               len, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+       } else {
+               ret = hid_hw_output_report(hdev, (void *)report, len);
+       }
+
        if (ret < 0) {
                dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret);
                return ret;
@@ -747,6 +758,8 @@ static const struct hid_device_id rmi_id[] = {
                .driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5),
+               .driver_data = RMI_DEVICE_OUTPUT_SET_REPORT },
        { HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
        { }
 };
index fd1b6eea6d2fdcf85aeb25595e371fbfab5f8ed5..75078c83be1af579c9e7ad804271d620d3849dc2 100644 (file)
@@ -354,6 +354,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
                },
                .driver_data = (void *)&sipodev_desc
        },
+       {
+               .ident = "iBall Aer3",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "iBall"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Aer3"),
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
        { }     /* Terminate list */
 };
 
index 1f1ed276e388262e3d1a5e4367c4abab610f0cdc..43f6da35716599be1b823fcdd1670f755433f331 100644 (file)
@@ -1232,13 +1232,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
                /* Add back in missing bits of ID for non-USI pens */
                wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
        }
-       wacom->tool[0]   = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
 
        for (i = 0; i < pen_frames; i++) {
                unsigned char *frame = &data[i*pen_frame_len + 1];
                bool valid = frame[0] & 0x80;
                bool prox = frame[0] & 0x40;
                bool range = frame[0] & 0x20;
+               bool invert = frame[0] & 0x10;
 
                if (!valid)
                        continue;
@@ -1247,9 +1247,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
                        wacom->shared->stylus_in_proximity = false;
                        wacom_exit_report(wacom);
                        input_sync(pen_input);
+
+                       wacom->tool[0] = 0;
+                       wacom->id[0] = 0;
+                       wacom->serial[0] = 0;
                        return;
                }
+
                if (range) {
+                       if (!wacom->tool[0]) { /* first in range */
+                               /* Going into range select tool */
+                               if (invert)
+                                       wacom->tool[0] = BTN_TOOL_RUBBER;
+                               else if (wacom->id[0])
+                                       wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
+                               else
+                                       wacom->tool[0] = BTN_TOOL_PEN;
+                       }
+
                        input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
                        input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
 
@@ -1271,23 +1286,26 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
                                                 get_unaligned_le16(&frame[11]));
                        }
                }
-               input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
-               if (wacom->features.type == INTUOSP2_BT) {
-                       input_report_abs(pen_input, ABS_DISTANCE,
-                                        range ? frame[13] : wacom->features.distance_max);
-               } else {
-                       input_report_abs(pen_input, ABS_DISTANCE,
-                                        range ? frame[7] : wacom->features.distance_max);
-               }
 
-               input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
-               input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
-               input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+               if (wacom->tool[0]) {
+                       input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+                       if (wacom->features.type == INTUOSP2_BT) {
+                               input_report_abs(pen_input, ABS_DISTANCE,
+                                                range ? frame[13] : wacom->features.distance_max);
+                       } else {
+                               input_report_abs(pen_input, ABS_DISTANCE,
+                                                range ? frame[7] : wacom->features.distance_max);
+                       }
 
-               input_report_key(pen_input, wacom->tool[0], prox);
-               input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
-               input_report_abs(pen_input, ABS_MISC,
-                                wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+                       input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
+                       input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
+                       input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
+
+                       input_report_key(pen_input, wacom->tool[0], prox);
+                       input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
+                       input_report_abs(pen_input, ABS_MISC,
+                                        wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
+               }
 
                wacom->shared->stylus_in_proximity = prox;
 
@@ -1349,11 +1367,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
                if (wacom->num_contacts_left <= 0) {
                        wacom->num_contacts_left = 0;
                        wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
+                       input_sync(touch_input);
                }
        }
 
-       input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
-       input_sync(touch_input);
+       if (wacom->num_contacts_left == 0) {
+               // Be careful that we don't accidentally call input_sync with
+               // only a partial set of fingers of processed
+               input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
+               input_sync(touch_input);
+       }
+
 }
 
 static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
@@ -1361,7 +1385,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
        struct input_dev *pad_input = wacom->pad_input;
        unsigned char *data = wacom->data;
 
-       int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+       int buttons = data[282] | ((data[281] & 0x40) << 2);
        int ring = data[285] & 0x7F;
        bool ringstatus = data[285] & 0x80;
        bool prox = buttons || ringstatus;
@@ -3810,7 +3834,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
 static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
                                 int mask, int group)
 {
-       int button_per_group;
+       int group_button;
 
        /*
         * 21UX2 has LED group 1 to the left and LED group 0
@@ -3820,9 +3844,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
        if (wacom->wacom_wac.features.type == WACOM_21UX2)
                group = 1 - group;
 
-       button_per_group = button_count/wacom->led.count;
+       group_button = group * (button_count/wacom->led.count);
+
+       if (wacom->wacom_wac.features.type == INTUOSP2_BT)
+               group_button = 8;
 
-       return mask & (1 << (group * button_per_group));
+       return mask & (1 << group_button);
 }
 
 static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
index f4a5ae69bf6a48775f9bb58c488eeb158174a797..fa3763e4b3ee26ca470175937e56ac0a75313292 100644 (file)
@@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = {
 
 static struct i2c_adapter ioc_ops = {
        .nr                     = 0,
+       .name                   = "ioc",
        .algo_data              = &ioc_data,
 };
 
index de3fe6e828cbdcf585aae98989ea418e6c8725fd..f50afa8e3cbad7879ab0d6f8f454946cd941eaee 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/platform_device.h>
 #include <linux/i2c-algo-pca.h>
 #include <linux/platform_data/i2c-pca-platform.h>
-#include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -173,7 +172,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
        i2c->adap.dev.parent = &pdev->dev;
        i2c->adap.dev.of_node = np;
 
-       i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset-gpios", GPIOD_OUT_LOW);
+       i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
        if (IS_ERR(i2c->gpio))
                return PTR_ERR(i2c->gpio);
 
index 539cb670de41d6955e6ea5353749dac27d96b798..ae9c51cc85f997b92a6581a74a0ba65f6a673cb1 100644 (file)
@@ -526,11 +526,12 @@ static int imx_keypad_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused imx_kbd_suspend(struct device *dev)
+static int __maybe_unused imx_kbd_noirq_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct imx_keypad *kbd = platform_get_drvdata(pdev);
        struct input_dev *input_dev = kbd->input_dev;
+       unsigned short reg_val = readw(kbd->mmio_base + KPSR);
 
        /* imx kbd can wake up system even clock is disabled */
        mutex_lock(&input_dev->mutex);
@@ -540,13 +541,20 @@ static int __maybe_unused imx_kbd_suspend(struct device *dev)
 
        mutex_unlock(&input_dev->mutex);
 
-       if (device_may_wakeup(&pdev->dev))
+       if (device_may_wakeup(&pdev->dev)) {
+               if (reg_val & KBD_STAT_KPKD)
+                       reg_val |= KBD_STAT_KRIE;
+               if (reg_val & KBD_STAT_KPKR)
+                       reg_val |= KBD_STAT_KDIE;
+               writew(reg_val, kbd->mmio_base + KPSR);
+
                enable_irq_wake(kbd->irq);
+       }
 
        return 0;
 }
 
-static int __maybe_unused imx_kbd_resume(struct device *dev)
+static int __maybe_unused imx_kbd_noirq_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct imx_keypad *kbd = platform_get_drvdata(pdev);
@@ -570,7 +578,9 @@ static int __maybe_unused imx_kbd_resume(struct device *dev)
        return ret;
 }
 
-static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume);
+static const struct dev_pm_ops imx_kbd_pm_ops = {
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_kbd_noirq_suspend, imx_kbd_noirq_resume)
+};
 
 static struct platform_driver imx_keypad_driver = {
        .driver         = {
index 2ed559dd5e2522155b036fff8d4659ec67151bd7..84051f20b18a7790fea756c76f00d6b522010f71 100644 (file)
@@ -1038,13 +1038,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 #ifdef CONFIG_COMPAT
 
-#define UI_SET_PHYS_COMPAT     _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+/*
+ * These IOCTLs change their size and thus their numbers between
+ * 32 and 64 bits.
+ */
+#define UI_SET_PHYS_COMPAT             \
+       _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+#define UI_BEGIN_FF_UPLOAD_COMPAT      \
+       _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat)
+#define UI_END_FF_UPLOAD_COMPAT                \
+       _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat)
 
 static long uinput_compat_ioctl(struct file *file,
                                unsigned int cmd, unsigned long arg)
 {
-       if (cmd == UI_SET_PHYS_COMPAT)
+       switch (cmd) {
+       case UI_SET_PHYS_COMPAT:
                cmd = UI_SET_PHYS;
+               break;
+       case UI_BEGIN_FF_UPLOAD_COMPAT:
+               cmd = UI_BEGIN_FF_UPLOAD;
+               break;
+       case UI_END_FF_UPLOAD_COMPAT:
+               cmd = UI_END_FF_UPLOAD;
+               break;
+       }
 
        return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
 }
index f9525d6f0bfe810c9ab1c2bd0a2a971f2e9695b4..2c0561e20b7fa28767e4228c0ae0d2535ef4107d 100644 (file)
@@ -981,6 +981,8 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
        if (error)
                goto out;
 
+       pm_wakeup_event(dev, 0);
+
        switch (report[ETP_REPORT_ID_OFFSET]) {
        case ETP_REPORT_ID:
                elan_report_absolute(data, report);
index a7f8b16145595bd004b5fec8fdf27e2d6cf7f441..530142b5a115457588a7330131f462ca4b7830c6 100644 (file)
@@ -1189,6 +1189,8 @@ static const char * const middle_button_pnp_ids[] = {
        "LEN2132", /* ThinkPad P52 */
        "LEN2133", /* ThinkPad P72 w/ NFC */
        "LEN2134", /* ThinkPad P72 */
+       "LEN0407",
+       "LEN0408",
        NULL
 };
 
index b6da0c1267e36e96cdc3b808e1fae122bec7add4..8e6077d8e434a30cf76598164aa95cffcf2439fb 100644 (file)
@@ -179,6 +179,8 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
        "LEN200f", /* T450s */
+       "LEN2054", /* E480 */
+       "LEN2055", /* E580 */
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
        NULL
index b832fe0626453ce8c8dd6104a131211db0b5bfc8..4f6fe8cc8efaefeb8cce55f52251c35f1963f4d5 100644 (file)
@@ -502,14 +502,6 @@ static int iqs5xx_axis_init(struct i2c_client *client)
                input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
                input_set_capability(input, EV_ABS, ABS_MT_PRESSURE);
 
-               error = input_mt_init_slots(input,
-                               IQS5XX_NUM_CONTACTS, INPUT_MT_DIRECT);
-               if (error) {
-                       dev_err(&client->dev,
-                               "Failed to initialize slots: %d\n", error);
-                       return error;
-               }
-
                input_set_drvdata(input, iqs5xx);
                iqs5xx->input = input;
        }
@@ -591,9 +583,19 @@ static int iqs5xx_axis_init(struct i2c_client *client)
        if (error)
                return error;
 
-       return iqs5xx_write_word(client,
-                                prop.swap_x_y ? IQS5XX_X_RES : IQS5XX_Y_RES,
-                                max_y);
+       error = iqs5xx_write_word(client,
+                                 prop.swap_x_y ? IQS5XX_X_RES : IQS5XX_Y_RES,
+                                 max_y);
+       if (error)
+               return error;
+
+       error = input_mt_init_slots(iqs5xx->input, IQS5XX_NUM_CONTACTS,
+                                   INPUT_MT_DIRECT);
+       if (error)
+               dev_err(&client->dev, "Failed to initialize slots: %d\n",
+                       error);
+
+       return error;
 }
 
 static int iqs5xx_dev_init(struct i2c_client *client)
index a67d5e93fb773b31db0d667cfe0733a65efb2adb..ad8b6a2bfd3640bcc55f07fdfbdb25649a9f0dbd 100644 (file)
@@ -609,6 +609,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
        { "MSSL1680", 0 },
        { "MSSL0001", 0 },
        { "MSSL0002", 0 },
+       { "MSSL0017", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
index 5aeb1dbfaa08910a7b2869366fbc441b8561ad29..586dd5a46d9f593b33a9a46d7e57fbe4dbebc5ff 100644 (file)
 
 #include "arm-smmu-regs.h"
 
+/*
+ * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
+ * global register space are still, in fact, using a hypervisor to mediate it
+ * by trapping and emulating register accesses. Sadly, some deployed versions
+ * of said trapping code have bugs wherein they go horribly wrong for stores
+ * using r31 (i.e. XZR/WZR) as the source register.
+ */
+#define QCOM_DUMMY_VAL -1
+
 #define ARM_MMU500_ACTLR_CPRE          (1 << 1)
 
 #define ARM_MMU500_ACR_CACHE_LOCK      (1 << 26)
@@ -411,7 +420,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
 {
        unsigned int spin_cnt, delay;
 
-       writel_relaxed(0, sync);
+       writel_relaxed(QCOM_DUMMY_VAL, sync);
        for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
                for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
                        if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
@@ -1751,8 +1760,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        }
 
        /* Invalidate the TLB, just in case */
-       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
-       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+       writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+       writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
 
        reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
 
index 162b3236e72c3c8702fd44aac9b0e43f7fb813d5..56297298d6eeef1de4c96d3db0ee3efba7629312 100644 (file)
@@ -2504,6 +2504,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                }
        }
 
+       spin_lock(&iommu->lock);
        spin_lock_irqsave(&device_domain_lock, flags);
        if (dev)
                found = find_domain(dev);
@@ -2519,17 +2520,16 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 
        if (found) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
+               spin_unlock(&iommu->lock);
                free_devinfo_mem(info);
                /* Caller must free the original domain */
                return found;
        }
 
-       spin_lock(&iommu->lock);
        ret = domain_attach_iommu(domain, iommu);
-       spin_unlock(&iommu->lock);
-
        if (ret) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
+               spin_unlock(&iommu->lock);
                free_devinfo_mem(info);
                return NULL;
        }
@@ -2539,6 +2539,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        if (dev)
                dev->archdata.iommu = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
+       spin_unlock(&iommu->lock);
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
        if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
index 2fefeafda437b91541f09a3c5618754516fc96b6..fe51d8af457fc136a41520a506a79a69b8cc358d 100644 (file)
@@ -389,7 +389,7 @@ static inline void pasid_set_present(struct pasid_entry *pe)
  */
 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
 {
-       pasid_set_bits(&pe->val[1], 1 << 23, value);
+       pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
 }
 
 /*
index f9cacce909d3ae9a92a78fd0bcb597b98e10cbc4..9f0a2844371cf349e59e227a10bc17ead7e6f424 100644 (file)
@@ -329,7 +329,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
                        type = "unmanaged\n";
                        break;
                case IOMMU_DOMAIN_DMA:
-                       type = "DMA";
+                       type = "DMA\n";
                        break;
                }
        }
index d576809429ac800a13930ac6d7501b900ddc3fdd..6751c35b7e1d7c001acc7cb88234dc34d1eab3df 100644 (file)
@@ -252,10 +252,10 @@ static const struct ixp4xx_irq_chunk ixp4xx_irq_chunks[] = {
  * @fwnode: Corresponding fwnode abstraction for this controller
  * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
  */
-static int ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
-                           void __iomem *irqbase,
-                           struct fwnode_handle *fwnode,
-                           bool is_356)
+static int __init ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
+                                  void __iomem *irqbase,
+                                  struct fwnode_handle *fwnode,
+                                  bool is_356)
 {
        int nr_irqs;
 
index 8f07fa6e17394b5c0c54f7250f960f7361fd2111..268f1b6850840ad70711bca42276da11e6d5c18a 100644 (file)
@@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
        struct bset *i = bset_tree_last(b)->data;
        struct bkey *m, *prev = NULL;
        struct btree_iter iter;
+       struct bkey preceding_key_on_stack = ZERO_KEY;
+       struct bkey *preceding_key_p = &preceding_key_on_stack;
 
        BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
 
-       m = bch_btree_iter_init(b, &iter, b->ops->is_extents
-                               ? PRECEDING_KEY(&START_KEY(k))
-                               : PRECEDING_KEY(k));
+       /*
+        * If k has preceding key, preceding_key_p will be set to address
+        *  of k's preceding key; otherwise preceding_key_p will be set
+        * to NULL inside preceding_key().
+        */
+       if (b->ops->is_extents)
+               preceding_key(&START_KEY(k), &preceding_key_p);
+       else
+               preceding_key(k, &preceding_key_p);
+
+       m = bch_btree_iter_init(b, &iter, preceding_key_p);
 
        if (b->ops->insert_fixup(b, k, &iter, replace_key))
                return status;
index bac76aabca6d7977abfecfaee4d884719f3699f4..c71365e7c1fac7fc87b051b10e8697edbe29f8c4 100644 (file)
@@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
        return __bch_cut_back(where, k);
 }
 
-#define PRECEDING_KEY(_k)                                      \
-({                                                             \
-       struct bkey *_ret = NULL;                               \
-                                                               \
-       if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
-               _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
-                                                               \
-               if (!_ret->low)                                 \
-                       _ret->high--;                           \
-               _ret->low--;                                    \
-       }                                                       \
-                                                               \
-       _ret;                                                   \
-})
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+       if (KEY_INODE(k) || KEY_OFFSET(k)) {
+               (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+               if (!(*preceding_key_p)->low)
+                       (*preceding_key_p)->high--;
+               (*preceding_key_p)->low--;
+       } else {
+               (*preceding_key_p) = NULL;
+       }
+}
 
 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
 {
index 6cd44d3cf906a68b80e77ace3ef1ba2c92f7b77d..bfb437ffb13c939be0f6ccb75d82cabcc6b4cfcf 100644 (file)
@@ -431,8 +431,13 @@ STORE(bch_cached_dev)
                        bch_writeback_queue(dc);
        }
 
+       /*
+        * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
+        * a cache set, otherwise it doesn't make sense.
+        */
        if (attr == &sysfs_writeback_percent)
-               if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+               if ((dc->disk.c != NULL) &&
+                   (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
                        schedule_delayed_work(&dc->writeback_rate_update,
                                      dc->writeback_rate_update_seconds * HZ);
 
index 04f4f131f9d6d4410b4c63dec1a4ae1f009995c0..9801d540fea1cc286af135fdcef385a92b7a5ed2 100644 (file)
@@ -7607,9 +7607,9 @@ static void status_unused(struct seq_file *seq)
 static int status_resync(struct seq_file *seq, struct mddev *mddev)
 {
        sector_t max_sectors, resync, res;
-       unsigned long dt, db;
-       sector_t rt;
-       int scale;
+       unsigned long dt, db = 0;
+       sector_t rt, curr_mark_cnt, resync_mark_cnt;
+       int scale, recovery_active;
        unsigned int per_milli;
 
        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -7698,22 +7698,30 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
         * db: blocks written from mark until now
         * rt: remaining time
         *
-        * rt is a sector_t, so could be 32bit or 64bit.
-        * So we divide before multiply in case it is 32bit and close
-        * to the limit.
-        * We scale the divisor (db) by 32 to avoid losing precision
-        * near the end of resync when the number of remaining sectors
-        * is close to 'db'.
-        * We then divide rt by 32 after multiplying by db to compensate.
-        * The '+1' avoids division by zero if db is very small.
+        * rt is a sector_t, which is always 64bit now. We are keeping
+        * the original algorithm, but it is not really necessary.
+        *
+        * Original algorithm:
+        *   So we divide before multiply in case it is 32bit and close
+        *   to the limit.
+        *   We scale the divisor (db) by 32 to avoid losing precision
+        *   near the end of resync when the number of remaining sectors
+        *   is close to 'db'.
+        *   We then divide rt by 32 after multiplying by db to compensate.
+        *   The '+1' avoids division by zero if db is very small.
         */
        dt = ((jiffies - mddev->resync_mark) / HZ);
        if (!dt) dt++;
-       db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
-               - mddev->resync_mark_cnt;
+
+       curr_mark_cnt = mddev->curr_mark_cnt;
+       recovery_active = atomic_read(&mddev->recovery_active);
+       resync_mark_cnt = mddev->resync_mark_cnt;
+
+       if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
+               db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
 
        rt = max_sectors - resync;    /* number of remaining sectors */
-       sector_div(rt, db/32+1);
+       rt = div64_u64(rt, db/32+1);
        rt *= dt;
        rt >>= 5;
 
index ecd3277f2e895b39a0c2e46edb81a70c53a25113..6351a97f3d1877c8ccf378c4fefb4aeabc520e5a 100644 (file)
@@ -905,7 +905,7 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
                         "DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
                         fe->dvb->num, fe->id);
 
-       dprintk("frequency interval: tuner: %u...%u, frontend: %u...%u",
+       dev_dbg(fe->dvb->device, "frequency interval: tuner: %u...%u, frontend: %u...%u",
                tuner_min, tuner_max, frontend_min, frontend_max);
 
        /* If the standard is for satellite, convert frequencies to kHz */
index 04cc80b106d60b646e8e46f03436c9f124185e52..b70551e296b7ae6b11a68e7a98a7a3b780606251 100644 (file)
@@ -560,7 +560,7 @@ struct hfi_capability {
 
 struct hfi_capabilities {
        u32 num_capabilities;
-       struct hfi_capability *data;
+       struct hfi_capability data[];
 };
 
 #define HFI_DEBUG_MSG_LOW      0x01
@@ -717,7 +717,7 @@ struct hfi_profile_level {
 
 struct hfi_profile_level_supported {
        u32 profile_count;
-       struct hfi_profile_level *profile_level;
+       struct hfi_profile_level profile_level[];
 };
 
 struct hfi_quality_vs_speed {
index 6db36dc870b585d6ca19ed2e1710c34b73a59986..9020cb2490f71bd0cb3552d97514f7045a18497d 100644 (file)
@@ -144,8 +144,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
        int err = cmd->error;
 
        /* Flag re-tuning needed on CRC errors */
-       if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
-           cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+       if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+           cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+           !host->retune_crc_disable &&
            (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
            (mrq->data && mrq->data->error == -EILSEQ) ||
            (mrq->stop && mrq->stop->error == -EILSEQ)))
index 3e786ba204c3fd63a4f6b10b9f7acefb41276a88..671bfcceea6a6c1191014aa77e4ee9eccb0c9197 100644 (file)
@@ -1212,13 +1212,13 @@ static int mmc_select_hs400(struct mmc_card *card)
        mmc_set_timing(host, MMC_TIMING_MMC_HS400);
        mmc_set_bus_speed(card);
 
+       if (host->ops->hs400_complete)
+               host->ops->hs400_complete(host);
+
        err = mmc_switch_status(card);
        if (err)
                goto out_err;
 
-       if (host->ops->hs400_complete)
-               host->ops->hs400_complete(host);
-
        return 0;
 
 out_err:
index d1aa1c7577bb1adf98b8e807924fb0142b0b93c4..712a7742765e992023e804544dbc987f89e99521 100644 (file)
@@ -937,6 +937,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
  */
 static int mmc_sdio_suspend(struct mmc_host *host)
 {
+       /* Prevent processing of SDIO IRQs in suspended state. */
+       mmc_card_set_suspended(host->card);
+       cancel_delayed_work_sync(&host->sdio_irq_work);
+
        mmc_claim_host(host);
 
        if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
@@ -985,13 +989,20 @@ static int mmc_sdio_resume(struct mmc_host *host)
                err = sdio_enable_4bit_bus(host->card);
        }
 
-       if (!err && host->sdio_irqs) {
+       if (err)
+               goto out;
+
+       /* Allow SDIO IRQs to be processed again. */
+       mmc_card_clr_suspended(host->card);
+
+       if (host->sdio_irqs) {
                if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
                        wake_up_process(host->sdio_irq_thread);
                else if (host->caps & MMC_CAP_SDIO_IRQ)
                        host->ops->enable_sdio_irq(host, 1);
        }
 
+out:
        mmc_release_host(host);
 
        host->pm_flags &= ~MMC_PM_KEEP_POWER;
index f79f0b0caab8b2516afb112880069035fe267fe1..2ba00acf64e6421152b54ac226b13b72dae14d2e 100644 (file)
@@ -15,6 +15,7 @@
 #include "sdio_ops.h"
 #include "core.h"
 #include "card.h"
+#include "host.h"
 
 /**
  *     sdio_claim_host - exclusively claim a bus for a certain SDIO function
@@ -734,3 +735,79 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
        return 0;
 }
 EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
+
+/**
+ *     sdio_retune_crc_disable - temporarily disable retuning on CRC errors
+ *     @func: SDIO function attached to host
+ *
+ *     If the SDIO card is known to be in a state where it might produce
+ *     CRC errors on the bus in response to commands (like if we know it is
+ *     transitioning between power states), an SDIO function driver can
+ *     call this function to temporarily disable the SD/MMC core behavior of
+ *     triggering an automatic retuning.
+ *
+ *     This function should be called while the host is claimed and the host
+ *     should remain claimed until sdio_retune_crc_enable() is called.
+ *     Specifically, the expected sequence of calls is:
+ *     - sdio_claim_host()
+ *     - sdio_retune_crc_disable()
+ *     - some number of calls like sdio_writeb() and sdio_readb()
+ *     - sdio_retune_crc_enable()
+ *     - sdio_release_host()
+ */
+void sdio_retune_crc_disable(struct sdio_func *func)
+{
+       func->card->host->retune_crc_disable = true;
+}
+EXPORT_SYMBOL_GPL(sdio_retune_crc_disable);
+
+/**
+ *     sdio_retune_crc_enable - re-enable retuning on CRC errors
+ *     @func: SDIO function attached to host
+ *
+ *     This is the compement to sdio_retune_crc_disable().
+ */
+void sdio_retune_crc_enable(struct sdio_func *func)
+{
+       func->card->host->retune_crc_disable = false;
+}
+EXPORT_SYMBOL_GPL(sdio_retune_crc_enable);
+
+/**
+ *     sdio_retune_hold_now - start deferring retuning requests till release
+ *     @func: SDIO function attached to host
+ *
+ *     This function can be called if it's currently a bad time to do
+ *     a retune of the SDIO card.  Retune requests made during this time
+ *     will be held and we'll actually do the retune sometime after the
+ *     release.
+ *
+ *     This function could be useful if an SDIO card is in a power state
+ *     where it can respond to a small subset of commands that doesn't
+ *     include the retuning command.  Care should be taken when using
+ *     this function since (presumably) the retuning request we might be
+ *     deferring was made for a good reason.
+ *
+ *     This function should be called while the host is claimed.
+ */
+void sdio_retune_hold_now(struct sdio_func *func)
+{
+       mmc_retune_hold_now(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_retune_hold_now);
+
+/**
+ *     sdio_retune_release - signal that it's OK to retune now
+ *     @func: SDIO function attached to host
+ *
+ *     This is the complement to sdio_retune_hold_now().  Calling this
+ *     function won't make a retune happen right away but will allow
+ *     them to be scheduled normally.
+ *
+ *     This function should be called while the host is claimed.
+ */
+void sdio_retune_release(struct sdio_func *func)
+{
+       mmc_retune_release(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_retune_release);
index 931e6226c0b347be81982b6547039c30c28af794..9f54a259a1b36ed4053d56c1b6c5e054386bfd3d 100644 (file)
@@ -34,6 +34,10 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
        unsigned char pending;
        struct sdio_func *func;
 
+       /* Don't process SDIO IRQs if the card is suspended. */
+       if (mmc_card_suspended(card))
+               return 0;
+
        /*
         * Optimization, if there is only 1 function interrupt registered
         * and we know an IRQ was signaled then call irq handler directly.
index 5a6d885994e69165bd43fadff7e95147484c999a..33f4b6387ef714b0683fb0c9ef83394fcc94ca56 100644 (file)
@@ -1023,6 +1023,8 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
        msdc_track_cmd_data(host, mrq->cmd, mrq->data);
        if (mrq->data)
                msdc_unprepare_data(host, mrq);
+       if (host->error)
+               msdc_reset_hw(host);
        mmc_request_done(host->mmc, mrq);
 }
 
@@ -1375,24 +1377,25 @@ static void msdc_request_timeout(struct work_struct *work)
        }
 }
 
-static void __msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
+static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
 {
-       unsigned long flags;
-       struct msdc_host *host = mmc_priv(mmc);
-
-       spin_lock_irqsave(&host->lock, flags);
-       if (enb)
+       if (enb) {
                sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
-       else
+               sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+       } else {
                sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
-       spin_unlock_irqrestore(&host->lock, flags);
+               sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+       }
 }
 
 static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
 {
+       unsigned long flags;
        struct msdc_host *host = mmc_priv(mmc);
 
-       __msdc_enable_sdio_irq(mmc, enb);
+       spin_lock_irqsave(&host->lock, flags);
+       __msdc_enable_sdio_irq(host, enb);
+       spin_unlock_irqrestore(&host->lock, flags);
 
        if (enb)
                pm_runtime_get_noresume(host->dev);
@@ -1414,6 +1417,8 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
                spin_lock_irqsave(&host->lock, flags);
                events = readl(host->base + MSDC_INT);
                event_mask = readl(host->base + MSDC_INTEN);
+               if ((events & event_mask) & MSDC_INT_SDIOIRQ)
+                       __msdc_enable_sdio_irq(host, 0);
                /* clear interrupts */
                writel(events & event_mask, host->base + MSDC_INT);
 
@@ -1422,10 +1427,8 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
                data = host->data;
                spin_unlock_irqrestore(&host->lock, flags);
 
-               if ((events & event_mask) & MSDC_INT_SDIOIRQ) {
-                       __msdc_enable_sdio_irq(host->mmc, 0);
+               if ((events & event_mask) & MSDC_INT_SDIOIRQ)
                        sdio_signal_irq(host->mmc);
-               }
 
                if ((events & event_mask) & MSDC_INT_CDSC) {
                        if (host->internal_cd)
@@ -1564,10 +1567,7 @@ static void msdc_init_hw(struct msdc_host *host)
        sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
 
        /* Config SDIO device detect interrupt function */
-       if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
-               sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
-       else
-               sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+       sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
 
        /* Configure to default data timeout */
        sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
@@ -2095,7 +2095,12 @@ static void msdc_hw_reset(struct mmc_host *mmc)
 
 static void msdc_ack_sdio_irq(struct mmc_host *mmc)
 {
-       __msdc_enable_sdio_irq(mmc, 1);
+       unsigned long flags;
+       struct msdc_host *host = mmc_priv(mmc);
+
+       spin_lock_irqsave(&host->lock, flags);
+       __msdc_enable_sdio_irq(host, 1);
+       spin_unlock_irqrestore(&host->lock, flags);
 }
 
 static int msdc_get_cd(struct mmc_host *mmc)
index 5e9e36ed2107a01ca5da7aafe5d6a68c20ef2eb5..5f8d57ac084ff1217f11e4473e3144e74d306521 100644 (file)
@@ -620,11 +620,16 @@ static const struct renesas_sdhi_quirks sdhi_quirks_h3_es2 = {
        .hs400_4taps = true,
 };
 
+static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
+       .hs400_disabled = true,
+};
+
 static const struct soc_device_attribute sdhi_quirks_match[]  = {
        { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_h3_m3w_es1 },
        { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_h3_es2 },
-       { .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_h3_m3w_es1 },
-       { .soc_id = "r8a7796", .revision = "ES1.1", .data = &sdhi_quirks_h3_m3w_es1 },
+       { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_h3_m3w_es1 },
+       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_h3_m3w_es1 },
+       { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
        { /* Sentinel. */ },
 };
 
index b29bf4e7dcb485e8e035723a490815457503d096..dd21315922c87da795852e75ac24a7de4a9a19cf 100644 (file)
@@ -115,6 +115,7 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
         */
        if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
                current_bus_width = mmc->ios.bus_width;
+               mmc->ios.bus_width = MMC_BUS_WIDTH_4;
                sdhci_set_bus_width(host, MMC_BUS_WIDTH_4);
        }
 
@@ -126,8 +127,10 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        sdhci_end_tuning(host);
 
-       if (current_bus_width == MMC_BUS_WIDTH_8)
+       if (current_bus_width == MMC_BUS_WIDTH_8) {
+               mmc->ios.bus_width = MMC_BUS_WIDTH_8;
                sdhci_set_bus_width(host, current_bus_width);
+       }
 
        host->flags &= ~SDHCI_HS400_TUNING;
        return 0;
index 1c66fb2ad76bebbfbcdce793d0685fef5a627773..f2fe344593d58ce66697515326efae700dab22ce 100644 (file)
 #define FLEXCAN_MB_CNT_LENGTH(x)       (((x) & 0xf) << 16)
 #define FLEXCAN_MB_CNT_TIMESTAMP(x)    ((x) & 0xffff)
 
-#define FLEXCAN_TIMEOUT_US             (50)
+#define FLEXCAN_TIMEOUT_US             (250)
 
 /* FLEXCAN hardware feature flags
  *
@@ -1583,9 +1583,6 @@ static int flexcan_probe(struct platform_device *pdev)
                        dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
        }
 
-       dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
-                priv->regs, dev->irq);
-
        return 0;
 
  failed_register:
index 9b449400376bc536cd0d53ea4abfe13d14515ba6..deb274a19ba003c9061f127e28ec57521c4f877b 100644 (file)
@@ -822,6 +822,27 @@ static int m_can_poll(struct napi_struct *napi, int quota)
        if (!irqstatus)
                goto end;
 
+       /* Errata workaround for issue "Needless activation of MRAF irq"
+        * During frame reception while the MCAN is in Error Passive state
+        * and the Receive Error Counter has the value MCAN_ECR.REC = 127,
+        * it may happen that MCAN_IR.MRAF is set although there was no
+        * Message RAM access failure.
+        * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
+        * The Message RAM Access Failure interrupt routine needs to check
+        * whether MCAN_ECR.RP = â€™1’ and MCAN_ECR.REC = 127.
+        * In this case, reset MCAN_IR.MRAF. No further action is required.
+        */
+       if ((priv->version <= 31) && (irqstatus & IR_MRAF) &&
+           (m_can_read(priv, M_CAN_ECR) & ECR_RP)) {
+               struct can_berr_counter bec;
+
+               __m_can_get_berr_counter(dev, &bec);
+               if (bec.rxerr == 127) {
+                       m_can_write(priv, M_CAN_IR, IR_MRAF);
+                       irqstatus &= ~IR_MRAF;
+               }
+       }
+
        psr = m_can_read(priv, M_CAN_PSR);
        if (irqstatus & IR_ERR_STATE)
                work_done += m_can_handle_state_errors(dev, psr);
index 2e7e535e9237850aaa02dfb058d87308a988dbf2..1c50788055cb6fcc11225c882be4e4c41e8553eb 100644 (file)
@@ -9,9 +9,10 @@ config CAN_HI311X
          Driver for the Holt HI311x SPI CAN controllers.
 
 config CAN_MCP251X
-       tristate "Microchip MCP251x SPI CAN controllers"
+       tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
        depends on HAS_DMA
        ---help---
-         Driver for the Microchip MCP251x SPI CAN controllers.
+         Driver for the Microchip MCP251x and MCP25625 SPI CAN
+         controllers.
 
 endmenu
index 78e29fa06fe615dbdd8c5710f8e5c445299638d0..44e99e3d713487ff8b9cbc5c15046276274d6c04 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
+ * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
  *
  * MCP2510 support and bug fixes by Christian Pellegrin
  * <chripell@evolware.org>
@@ -28,7 +28,7 @@
  * static struct spi_board_info spi_board_info[] = {
  *         {
  *                 .modalias = "mcp2510",
- *                     // or "mcp2515" depending on your controller
+ *                     // "mcp2515" or "mcp25625" depending on your controller
  *                 .platform_data = &mcp251x_info,
  *                 .irq = IRQ_EINT13,
  *                 .max_speed_hz = 2*1000*1000,
@@ -224,6 +224,7 @@ static const struct can_bittiming_const mcp251x_bittiming_const = {
 enum mcp251x_model {
        CAN_MCP251X_MCP2510     = 0x2510,
        CAN_MCP251X_MCP2515     = 0x2515,
+       CAN_MCP251X_MCP25625    = 0x25625,
 };
 
 struct mcp251x_priv {
@@ -266,7 +267,6 @@ static inline int mcp251x_is_##_model(struct spi_device *spi) \
 }
 
 MCP251X_IS(2510);
-MCP251X_IS(2515);
 
 static void mcp251x_clean(struct net_device *net)
 {
@@ -625,7 +625,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
 
        /* Wait for oscillator startup timer after reset */
        mdelay(MCP251X_OST_DELAY_MS);
-       
+
        reg = mcp251x_read_reg(spi, CANSTAT);
        if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
                return -ENODEV;
@@ -806,9 +806,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
                /* receive buffer 0 */
                if (intf & CANINTF_RX0IF) {
                        mcp251x_hw_rx(spi, 0);
-                       /*
-                        * Free one buffer ASAP
-                        * (The MCP2515 does this automatically.)
+                       /* Free one buffer ASAP
+                        * (The MCP2515/25625 does this automatically.)
                         */
                        if (mcp251x_is_2510(spi))
                                mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
@@ -817,7 +816,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
                /* receive buffer 1 */
                if (intf & CANINTF_RX1IF) {
                        mcp251x_hw_rx(spi, 1);
-                       /* the MCP2515 does this automatically */
+                       /* The MCP2515/25625 does this automatically. */
                        if (mcp251x_is_2510(spi))
                                clear_intf |= CANINTF_RX1IF;
                }
@@ -992,6 +991,10 @@ static const struct of_device_id mcp251x_of_match[] = {
                .compatible     = "microchip,mcp2515",
                .data           = (void *)CAN_MCP251X_MCP2515,
        },
+       {
+               .compatible     = "microchip,mcp25625",
+               .data           = (void *)CAN_MCP251X_MCP25625,
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, mcp251x_of_match);
@@ -1005,6 +1008,10 @@ static const struct spi_device_id mcp251x_id_table[] = {
                .name           = "mcp2515",
                .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2515,
        },
+       {
+               .name           = "mcp25625",
+               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP25625,
+       },
        { }
 };
 MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
@@ -1245,5 +1252,5 @@ module_spi_driver(mcp251x_can_driver);
 
 MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
              "Christian Pellegrin <chripell@evolware.org>");
-MODULE_DESCRIPTION("Microchip 251x CAN driver");
+MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver");
 MODULE_LICENSE("GPL v2");
index ac3522b773034d7072611af20fe9621b13deb79d..4b3d0ddcda7940815494e7d126a952eb7b44d6f9 100644 (file)
@@ -102,12 +102,6 @@ config CAN_PEAK_USB
 
          (see also http://www.peak-system.com).
 
-config CAN_MCBA_USB
-       tristate "Microchip CAN BUS Analyzer interface"
-       ---help---
-         This driver supports the CAN BUS Analyzer interface
-         from Microchip (http://www.microchip.com/development-tools/).
-
 config CAN_UCAN
        tristate "Theobroma Systems UCAN interface"
        ---help---
index f2024404b8d65c1200fbf08a2db224c0833c25f7..63203ff452b58450964d0dc2a9d5b221075001c5 100644 (file)
@@ -1435,7 +1435,7 @@ static const struct xcan_devtype_data xcan_canfd_data = {
                 XCAN_FLAG_RXMNF |
                 XCAN_FLAG_TX_MAILBOXES |
                 XCAN_FLAG_RX_FIFO_MULTI,
-       .bittiming_const = &xcan_bittiming_const,
+       .bittiming_const = &xcan_bittiming_const_canfd,
        .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
        .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
        .bus_clk_name = "s_axi_aclk",
index fefb6aaa82ba1d72bad2203c03f6bb1e659dca4b..d99dc6de0006b0e6ec6815d79c2a6ba5dd32fa50 100644 (file)
@@ -9,8 +9,8 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
 obj-$(CONFIG_NET_DSA_MT7530)   += mt7530.o
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
 obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o
-realtek-objs                   := realtek-smi.o rtl8366.o rtl8366rb.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+realtek-smi-objs               := realtek-smi-core.o rtl8366.o rtl8366rb.o
 obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
 obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
 obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
index 39dace8e3512a4a49d5f5789e1b08477f92c030b..f46086fa906462e18913821153a7f1f936aa0799 100644 (file)
@@ -83,6 +83,9 @@ static void ksz_mib_read_work(struct work_struct *work)
        int i;
 
        for (i = 0; i < dev->mib_port_cnt; i++) {
+               if (dsa_is_unused_port(dev->ds, i))
+                       continue;
+
                p = &dev->ports[i];
                mib = &p->mib;
                mutex_lock(&mib->cnt_mutex);
similarity index 99%
rename from drivers/net/dsa/realtek-smi.c
rename to drivers/net/dsa/realtek-smi-core.c
index ad41ec63cc9f03aae553e43660afbe08dbc85522..dc0509c02d29405f54a5618d75605ec898b2fe91 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/bitops.h>
 #include <linux/if_bridge.h>
 
-#include "realtek-smi.h"
+#include "realtek-smi-core.h"
 
 #define REALTEK_SMI_ACK_RETRY_COUNT            5
 #define REALTEK_SMI_HW_STOP_DELAY              25      /* msecs */
index 6dedd43442cc5775980725bbda8c3c7d5ee68443..ca3d17e43ed8be057f3628b3548a58df11656c88 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/if_bridge.h>
 #include <net/dsa.h>
 
-#include "realtek-smi.h"
+#include "realtek-smi-core.h"
 
 int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
 {
@@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
        struct rtl8366_vlan_4k vlan4k;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
+       /* Use VLAN nr port + 1 since VLAN0 is not valid */
+       if (!smi->ops->is_vlan_valid(smi, port + 1))
                return -EINVAL;
 
        dev_info(smi->dev, "%s filtering on port %d\n",
@@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
         * The hardware support filter ID (FID) 0..7, I have no clue how to
         * support this in the driver when the callback only says on/off.
         */
-       ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
+       ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
        if (ret)
                return ret;
 
        /* Just set the filter to FID 1 for now then */
-       ret = rtl8366_set_vlan(smi, port,
+       ret = rtl8366_set_vlan(smi, port + 1,
                               vlan4k.member,
                               vlan4k.untag,
                               1);
index 40b3974970c686388f9505e08ccd6fa949b9350c..a268085ffad28cfdc8c6668c4e42595d03b0e363 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/of_irq.h>
 #include <linux/regmap.h>
 
-#include "realtek-smi.h"
+#include "realtek-smi-core.h"
 
 #define RTL8366RB_PORT_NUM_CPU         5
 #define RTL8366RB_NUM_PORTS            6
index bb09319feedf7a876750f9061a4f7e3ec23f2b7d..2a3e2450968eeb066a8636001442d12413f78f3c 100644 (file)
@@ -50,7 +50,7 @@ config XSURF100
        tristate "Amiga XSurf 100 AX88796/NE2000 clone support"
        depends on ZORRO
        select AX88796
-       select ASIX_PHY
+       select AX88796B_PHY
        help
          This driver is for the Individual Computers X-Surf 100 Ethernet
          card (based on the Asix AX88796 chip). If you have such a card,
index fb308b330dc1f6512f8a44cd99677bbd83d98d68..8a6785173228f3b5892522d6b338abee7a66bb15 100644 (file)
@@ -1101,7 +1101,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
                break;
        case ETHTOOL_GRXRINGS:
-               cmd->data = adapter->num_rx_qs - 1;
+               cmd->data = adapter->num_rx_qs;
                break;
        default:
                return -EINVAL;
index 3da392bfd6594fe9c2fdb2105131e5324352eee0..3da6800732656477bd5d932373dde23d1b4dde9c 100644 (file)
@@ -428,9 +428,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
                if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
                        free_long_term_buff(adapter, &rx_pool->long_term_buff);
                        rx_pool->buff_size = be64_to_cpu(size_array[i]);
-                       alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
-                                            rx_pool->size *
-                                            rx_pool->buff_size);
+                       rc = alloc_long_term_buff(adapter,
+                                                 &rx_pool->long_term_buff,
+                                                 rx_pool->size *
+                                                 rx_pool->buff_size);
                } else {
                        rc = reset_long_term_buff(adapter,
                                                  &rx_pool->long_term_buff);
@@ -696,9 +697,9 @@ static int init_tx_pools(struct net_device *netdev)
                        return rc;
                }
 
-               init_one_tx_pool(netdev, &adapter->tso_pool[i],
-                                IBMVNIC_TSO_BUFS,
-                                IBMVNIC_TSO_BUF_SZ);
+               rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
+                                     IBMVNIC_TSO_BUFS,
+                                     IBMVNIC_TSO_BUF_SZ);
                if (rc) {
                        release_tx_pools(adapter);
                        return rc;
@@ -1745,7 +1746,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
        ibmvnic_cleanup(netdev);
 
-       if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
+       if (reset_state == VNIC_OPEN &&
+           adapter->reset_reason != VNIC_RESET_MOBILITY &&
            adapter->reset_reason != VNIC_RESET_FAILOVER) {
                rc = __ibmvnic_close(netdev);
                if (rc)
@@ -1844,6 +1846,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                return 0;
        }
 
+       /* refresh device's multicast list */
+       ibmvnic_set_multi(netdev);
+
        /* kick napi */
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_schedule(&adapter->napi[i]);
index 392fd895f27826e81153f230603fa37b8e921fcc..ae2240074d8ef8c3ab763af481acb8936db7ff25 100644 (file)
@@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
 }
 
 /* Find tcam entry with matched pair <vid,port> */
-static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
-                                   u16 mask)
+static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
 {
        unsigned char byte[2], enable[2];
        struct mvpp2_prs_entry pe;
@@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
        int tid;
 
        /* Go through the all entries with MVPP2_PRS_LU_VID */
-       for (tid = MVPP2_PE_VID_FILT_RANGE_START;
-            tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
-               if (!priv->prs_shadow[tid].valid ||
-                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+       for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+            tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+               if (!port->priv->prs_shadow[tid].valid ||
+                   port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
                        continue;
 
-               mvpp2_prs_init_from_hw(priv, &pe, tid);
+               mvpp2_prs_init_from_hw(port->priv, &pe, tid);
 
                mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
                mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
        memset(&pe, 0, sizeof(pe));
 
        /* Scan TCAM and see if entry with this <vid,port> already exist */
-       tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
+       tid = mvpp2_prs_vid_range_find(port, vid, mask);
 
        reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
        if (reg_val & MVPP2_DSA_EXTENDED)
@@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
        int tid;
 
        /* Scan TCAM and see if entry with this <vid,port> already exist */
-       tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
+       tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
 
        /* No such entry */
        if (tid < 0)
@@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
 
        for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
             tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
-               if (priv->prs_shadow[tid].valid)
-                       mvpp2_prs_vid_entry_remove(port, tid);
+               if (priv->prs_shadow[tid].valid) {
+                       mvpp2_prs_hw_inv(priv, tid);
+                       priv->prs_shadow[tid].valid = false;
+               }
        }
 }
 
index d2ab8cd8ad9f0869753ff2e601b213be6ada25f8..e94686c420004c2bd44417fd21b2f8d67a5e69b5 100644 (file)
@@ -441,6 +441,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
        case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
        case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+       case MLX5_CMD_OP_CREATE_UCTX:
+       case MLX5_CMD_OP_DESTROY_UCTX:
+       case MLX5_CMD_OP_CREATE_UMEM:
+       case MLX5_CMD_OP_DESTROY_UMEM:
        case MLX5_CMD_OP_ALLOC_MEMIC:
                *status = MLX5_DRIVER_STATUS_ABORTED;
                *synd = MLX5_DRIVER_SYND;
@@ -629,6 +633,10 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
        MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
        MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
+       MLX5_COMMAND_STR_CASE(CREATE_UCTX);
+       MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
+       MLX5_COMMAND_STR_CASE(CREATE_UMEM);
+       MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
        default: return "unknown command opcode";
        }
 }
index ebc046fa97d353bce5f9802eb1889faa755bede3..f6b1da99e6c26a1a3065c5b8fd64a184606b646f 100644 (file)
@@ -248,11 +248,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
 }
 EXPORT_SYMBOL(mlx5_unregister_interface);
 
+/* Must be called with intf_mutex held */
+static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol)
+{
+       struct mlx5_device_context *dev_ctx;
+       struct mlx5_interface *intf;
+       bool found = false;
+
+       list_for_each_entry(intf, &intf_list, list) {
+               if (intf->protocol == protocol) {
+                       dev_ctx = mlx5_get_device(intf, &mdev->priv);
+                       if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+                               found = true;
+                       break;
+               }
+       }
+
+       return found;
+}
+
 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
 {
        mutex_lock(&mlx5_intf_mutex);
-       mlx5_remove_dev_by_protocol(mdev, protocol);
-       mlx5_add_dev_by_protocol(mdev, protocol);
+       if (mlx5_has_added_dev_by_protocol(mdev, protocol)) {
+               mlx5_remove_dev_by_protocol(mdev, protocol);
+               mlx5_add_dev_by_protocol(mdev, protocol);
+       }
        mutex_unlock(&mlx5_intf_mutex);
 }
 
index 3a183d690e235e6457c6148eaf030627b5f6d2ed..cc6797e24571d76d680e871f955cdf233aabba02 100644 (file)
@@ -385,6 +385,7 @@ struct mlx5e_txqsq {
        /* control path */
        struct mlx5_wq_ctrl        wq_ctrl;
        struct mlx5e_channel      *channel;
+       int                        ch_ix;
        int                        txq_ix;
        u32                        rate_limit;
        struct work_struct         recover_work;
@@ -1112,6 +1113,7 @@ void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
                                       struct net_device *netdev,
                                       netdev_features_t features);
+int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
 #ifdef CONFIG_MLX5_ESWITCH
 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
index fe5d4d7f15edc80426bed373e2bf646dfd39dde7..231e7cdfc6f7fb53e06c3aa92bdfc9ce4420622c 100644 (file)
@@ -11,24 +11,25 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
                                  struct net_device **route_dev,
                                  struct net_device **out_dev)
 {
+       struct net_device *uplink_dev, *uplink_upper, *real_dev;
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct net_device *uplink_dev, *uplink_upper;
        bool dst_is_lag_dev;
 
+       real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
        uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
        uplink_upper = netdev_master_upper_dev_get(uplink_dev);
        dst_is_lag_dev = (uplink_upper &&
                          netif_is_lag_master(uplink_upper) &&
-                         dev == uplink_upper &&
+                         real_dev == uplink_upper &&
                          mlx5_lag_is_sriov(priv->mdev));
 
        /* if the egress device isn't on the same HW e-switch or
         * it's a LAG device, use the uplink
         */
-       if (!netdev_port_same_parent_id(priv->netdev, dev) ||
+       if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
            dst_is_lag_dev) {
-               *route_dev = uplink_dev;
-               *out_dev = *route_dev;
+               *route_dev = dev;
+               *out_dev = uplink_dev;
        } else {
                *route_dev = dev;
                if (is_vlan_dev(*route_dev))
index c65cefd84eda7de00bc82038924672bd23a9240f..a8e8350b38aa01a5bb401c7d927da37aaca460fd 100644 (file)
@@ -1082,6 +1082,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->clock     = &mdev->clock;
        sq->mkey_be   = c->mkey_be;
        sq->channel   = c;
+       sq->ch_ix     = c->ix;
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
@@ -3635,8 +3636,7 @@ static int mlx5e_handle_feature(struct net_device *netdev,
        return 0;
 }
 
-static int mlx5e_set_features(struct net_device *netdev,
-                             netdev_features_t features)
+int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
 {
        netdev_features_t oper_features = netdev->features;
        int err = 0;
@@ -5108,6 +5108,11 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
        struct mlx5e_priv *priv = vpriv;
        struct net_device *netdev = priv->netdev;
 
+#ifdef CONFIG_MLX5_ESWITCH
+       if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
+               return;
+#endif
+
        if (!netif_device_present(netdev))
                return;
 
index 9aea9c5b2ce85d94083fb0184ceadad2b8c55cc8..2f406b161bcfe503366f10ad20f7c80b8bf4905e 100644 (file)
@@ -1351,6 +1351,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
        .ndo_set_vf_vlan         = mlx5e_uplink_rep_set_vf_vlan,
        .ndo_get_port_parent_id  = mlx5e_rep_get_port_parent_id,
+       .ndo_set_features        = mlx5e_set_features,
 };
 
 bool mlx5e_eswitch_rep(struct net_device *netdev)
@@ -1425,10 +1426,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
 
        netdev->watchdog_timeo    = 15 * HZ;
 
+       netdev->features       |= NETIF_F_NETNS_LOCAL;
 
-       netdev->features         |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
-       netdev->hw_features      |= NETIF_F_HW_TC;
-
+       netdev->hw_features    |= NETIF_F_HW_TC;
        netdev->hw_features    |= NETIF_F_SG;
        netdev->hw_features    |= NETIF_F_IP_CSUM;
        netdev->hw_features    |= NETIF_F_IPV6_CSUM;
@@ -1437,7 +1437,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
        netdev->hw_features    |= NETIF_F_TSO6;
        netdev->hw_features    |= NETIF_F_RXCSUM;
 
-       if (rep->vport != MLX5_VPORT_UPLINK)
+       if (rep->vport == MLX5_VPORT_UPLINK)
+               netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+       else
                netdev->features |= NETIF_F_VLAN_CHALLENGED;
 
        netdev->features |= netdev->hw_features;
index 31cd02f1149939bf6d2126583a49c01a5f310b73..e40c60d1631f803bee289b4fe6057038b9c5f555 100644 (file)
@@ -2812,9 +2812,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
        if (!flow_action_has_entries(flow_action))
                return -EINVAL;
 
-       attr->in_rep = rpriv->rep;
-       attr->in_mdev = priv->mdev;
-
        flow_action_for_each(i, act, flow_action) {
                switch (act->id) {
                case FLOW_ACTION_DROP:
index 195a7d903cecbb18bf14047b670bfb4552b27f52..701e5dc75bb05d61d442ad635de540f93ee4d87e 100644 (file)
@@ -113,13 +113,13 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       struct net_device *sb_dev)
 {
-       int channel_ix = netdev_pick_tx(dev, skb, NULL);
+       int txq_ix = netdev_pick_tx(dev, skb, NULL);
        struct mlx5e_priv *priv = netdev_priv(dev);
        u16 num_channels;
        int up = 0;
 
        if (!netdev_get_num_tc(dev))
-               return channel_ix;
+               return txq_ix;
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
@@ -129,14 +129,14 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                if (skb_vlan_tag_present(skb))
                        up = skb_vlan_tag_get_prio(skb);
 
-       /* channel_ix can be larger than num_channels since
+       /* txq_ix can be larger than num_channels since
         * dev->num_real_tx_queues = num_channels * num_tc
         */
        num_channels = priv->channels.params.num_channels;
-       if (channel_ix >= num_channels)
-               channel_ix = reciprocal_scale(channel_ix, num_channels);
+       if (txq_ix >= num_channels)
+               txq_ix = priv->txq2sq[txq_ix]->ch_ix;
 
-       return priv->channel_tc2txq[channel_ix][up];
+       return priv->channel_tc2txq[txq_ix][up];
 }
 
 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
index e8002bfc1e8f13a1d6352042f70f8a7c248c3a57..7ed63ed657c71dd1ef8e8f321d56fa5676b89200 100644 (file)
@@ -997,7 +997,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
        MLXSW_REG_ZERO(spaft, payload);
        mlxsw_reg_spaft_local_port_set(payload, local_port);
        mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
-       mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
+       mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged);
        mlxsw_reg_spaft_allow_tagged_set(payload, true);
 }
 
index dfe6b44baf635701e155809b5b9304e55c63faae..23204356ad888254e86ea5baa8f20ef5eeecb33e 100644 (file)
@@ -4280,13 +4280,16 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
        }
 }
 
+#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
+
 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
 {
        char slcr_pl[MLXSW_REG_SLCR_LEN];
        u32 seed;
        int err;
 
-       seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+       seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
+                    MLXSW_SP_LAG_SEED_INIT);
        mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
                                     MLXSW_REG_SLCR_LAG_HASH_DMAC |
                                     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
index 8512dd49e4201abbbda983b6cb2889ef2853e829..1537f70bc26d0fbef771b91e4c92f6d9dae1ac4f 100644 (file)
@@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
                           MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
 };
 
-#define MLXSW_SP2_SB_PR_INGRESS_SIZE   40960000
-#define MLXSW_SP2_SB_PR_EGRESS_SIZE    40960000
+#define MLXSW_SP2_SB_PR_INGRESS_SIZE   38128752
+#define MLXSW_SP2_SB_PR_EGRESS_SIZE    38128752
 #define MLXSW_SP2_SB_PR_CPU_SIZE       (256 * 1000)
 
 /* Order according to mlxsw_sp2_sb_pool_dess */
index 15f804453cd614c8b6c7ff26a214b0a5c7f1ffdf..96b23c856f4de1db4f447743020898ca02e0240f 100644 (file)
@@ -247,8 +247,8 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
                                       match.mask->tos & 0x3);
 
        mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
-                                      match.key->tos >> 6,
-                                      match.mask->tos >> 6);
+                                      match.key->tos >> 2,
+                                      match.mask->tos >> 2);
 
        return 0;
 }
index 1cda8a248b12e1a554ee713371eeedbdaa377a40..ef554739dd54fe1e5fbcc8992e6d02edde5f4259 100644 (file)
@@ -2363,7 +2363,7 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
 static void
 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
                              struct mlxsw_sp_neigh_entry *neigh_entry,
-                             bool removing);
+                             bool removing, bool dead);
 
 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
 {
@@ -2507,7 +2507,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
 
        memcpy(neigh_entry->ha, ha, ETH_ALEN);
        mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
-       mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
+       mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
+                                     dead);
 
        if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
                mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
@@ -3472,13 +3473,79 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
        nh->update = 1;
 }
 
+static int
+mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
+                                   struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+       struct neighbour *n, *old_n = neigh_entry->key.n;
+       struct mlxsw_sp_nexthop *nh;
+       bool entry_connected;
+       u8 nud_state, dead;
+       int err;
+
+       nh = list_first_entry(&neigh_entry->nexthop_list,
+                             struct mlxsw_sp_nexthop, neigh_list_node);
+
+       n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+       if (!n) {
+               n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
+                                nh->rif->dev);
+               if (IS_ERR(n))
+                       return PTR_ERR(n);
+               neigh_event_send(n, NULL);
+       }
+
+       mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+       neigh_entry->key.n = n;
+       err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+       if (err)
+               goto err_neigh_entry_insert;
+
+       read_lock_bh(&n->lock);
+       nud_state = n->nud_state;
+       dead = n->dead;
+       read_unlock_bh(&n->lock);
+       entry_connected = nud_state & NUD_VALID && !dead;
+
+       list_for_each_entry(nh, &neigh_entry->nexthop_list,
+                           neigh_list_node) {
+               neigh_release(old_n);
+               neigh_clone(n);
+               __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
+               mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+       }
+
+       neigh_release(n);
+
+       return 0;
+
+err_neigh_entry_insert:
+       neigh_entry->key.n = old_n;
+       mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+       neigh_release(n);
+       return err;
+}
+
 static void
 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
                              struct mlxsw_sp_neigh_entry *neigh_entry,
-                             bool removing)
+                             bool removing, bool dead)
 {
        struct mlxsw_sp_nexthop *nh;
 
+       if (list_empty(&neigh_entry->nexthop_list))
+               return;
+
+       if (dead) {
+               int err;
+
+               err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
+                                                         neigh_entry);
+               if (err)
+                       dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
+               return;
+       }
+
        list_for_each_entry(nh, &neigh_entry->nexthop_list,
                            neigh_list_node) {
                __mlxsw_sp_nexthop_neigh_update(nh, removing);
index b82b684f52ce6d3de7f2fc3462955af2267f3a4b..36a3bd30cfd9455a727d6c190814fc22edff2aad 100644 (file)
@@ -1867,6 +1867,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                        napi_gro_receive(&rx_ring->r_vec->napi, skb);
                } else {
                        skb->dev = netdev;
+                       skb_reset_network_header(skb);
                        __skb_push(skb, ETH_HLEN);
                        dev_queue_xmit(skb);
                }
index 98d1a45c06067d7aeaae5a31465a3cb036c7e080..25770122c2197298512a82af4ea94237bc44ad0a 100644 (file)
@@ -395,7 +395,7 @@ static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
        u8 zero_vni[3] = { 0 };
        u8 *vni = zero_vni;
 
-       if (skb->len < GENEVE_BASE_HLEN)
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + GENEVE_BASE_HLEN))
                return -EINVAL;
 
        geneveh = geneve_hdr(skb);
index 03ea5a7ed3a49a9ad8eee05a4e9e2da268093b30..afdcc5664ea6a0c69cadd71034aaf3d15ceaf9b5 100644 (file)
@@ -2407,7 +2407,7 @@ static struct  hv_driver netvsc_drv = {
        .probe = netvsc_probe,
        .remove = netvsc_remove,
        .driver = {
-               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+               .probe_type = PROBE_FORCE_SYNCHRONOUS,
        },
 };
 
index f99f27800fdba6c6a1603193ec6ef3c536ad5eb2..1d406c6df790d6cb9f5fa1821533cea57df1af6b 100644 (file)
@@ -254,7 +254,7 @@ config AQUANTIA_PHY
        ---help---
          Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
 
-config ASIX_PHY
+config AX88796B_PHY
        tristate "Asix PHYs"
        help
          Currently supports the Asix Electronics PHY found in the X-Surf 100
index 27d7f9f3b0de4c820ce1fdb6f7b1237df3601f5a..5b5c8669499e95b5c01d1cf6e2d19b5ad6aa66f6 100644 (file)
@@ -52,7 +52,7 @@ ifdef CONFIG_HWMON
 aquantia-objs                  += aquantia_hwmon.o
 endif
 obj-$(CONFIG_AQUANTIA_PHY)     += aquantia.o
-obj-$(CONFIG_ASIX_PHY)         += asix.o
+obj-$(CONFIG_AX88796B_PHY)     += ax88796b.o
 obj-$(CONFIG_AT803X_PHY)       += at803x.o
 obj-$(CONFIG_BCM63XX_PHY)      += bcm63xx.o
 obj-$(CONFIG_BCM7XXX_PHY)      += bcm7xxx.o
index d9a6699abe592d8ef82d03f3040734f7cc3c6d0f..780c10ee359baf68fba9041c46d889b087c8ff2a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/usb/cdc.h>
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc-wdm.h>
+#include <linux/u64_stats_sync.h>
 
 /* This driver supports wwan (3G/LTE/?) devices using a vendor
  * specific management protocol called Qualcomm MSM Interface (QMI) -
@@ -75,6 +76,7 @@ struct qmimux_hdr {
 struct qmimux_priv {
        struct net_device *real_dev;
        u8 mux_id;
+       struct pcpu_sw_netstats __percpu *stats64;
 };
 
 static int qmimux_open(struct net_device *dev)
@@ -101,19 +103,65 @@ static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev
        struct qmimux_priv *priv = netdev_priv(dev);
        unsigned int len = skb->len;
        struct qmimux_hdr *hdr;
+       netdev_tx_t ret;
 
        hdr = skb_push(skb, sizeof(struct qmimux_hdr));
        hdr->pad = 0;
        hdr->mux_id = priv->mux_id;
        hdr->pkt_len = cpu_to_be16(len);
        skb->dev = priv->real_dev;
-       return dev_queue_xmit(skb);
+       ret = dev_queue_xmit(skb);
+
+       if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+               struct pcpu_sw_netstats *stats64 = this_cpu_ptr(priv->stats64);
+
+               u64_stats_update_begin(&stats64->syncp);
+               stats64->tx_packets++;
+               stats64->tx_bytes += len;
+               u64_stats_update_end(&stats64->syncp);
+       } else {
+               dev->stats.tx_dropped++;
+       }
+
+       return ret;
+}
+
+static void qmimux_get_stats64(struct net_device *net,
+                              struct rtnl_link_stats64 *stats)
+{
+       struct qmimux_priv *priv = netdev_priv(net);
+       unsigned int start;
+       int cpu;
+
+       netdev_stats_to_stats64(stats, &net->stats);
+
+       for_each_possible_cpu(cpu) {
+               struct pcpu_sw_netstats *stats64;
+               u64 rx_packets, rx_bytes;
+               u64 tx_packets, tx_bytes;
+
+               stats64 = per_cpu_ptr(priv->stats64, cpu);
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&stats64->syncp);
+                       rx_packets = stats64->rx_packets;
+                       rx_bytes = stats64->rx_bytes;
+                       tx_packets = stats64->tx_packets;
+                       tx_bytes = stats64->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
+
+               stats->rx_packets += rx_packets;
+               stats->rx_bytes += rx_bytes;
+               stats->tx_packets += tx_packets;
+               stats->tx_bytes += tx_bytes;
+       }
 }
 
 static const struct net_device_ops qmimux_netdev_ops = {
-       .ndo_open       = qmimux_open,
-       .ndo_stop       = qmimux_stop,
-       .ndo_start_xmit = qmimux_start_xmit,
+       .ndo_open        = qmimux_open,
+       .ndo_stop        = qmimux_stop,
+       .ndo_start_xmit  = qmimux_start_xmit,
+       .ndo_get_stats64 = qmimux_get_stats64,
 };
 
 static void qmimux_setup(struct net_device *dev)
@@ -153,7 +201,7 @@ static bool qmimux_has_slaves(struct usbnet *dev)
 
 static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 {
-       unsigned int len, offset = 0;
+       unsigned int len, offset = 0, pad_len, pkt_len;
        struct qmimux_hdr *hdr;
        struct net_device *net;
        struct sk_buff *skbn;
@@ -171,10 +219,16 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                if (hdr->pad & 0x80)
                        goto skip;
 
+               /* extract padding length and check for valid length info */
+               pad_len = hdr->pad & 0x3f;
+               if (len == 0 || pad_len >= len)
+                       goto skip;
+               pkt_len = len - pad_len;
+
                net = qmimux_find_dev(dev, hdr->mux_id);
                if (!net)
                        goto skip;
-               skbn = netdev_alloc_skb(net, len);
+               skbn = netdev_alloc_skb(net, pkt_len);
                if (!skbn)
                        return 0;
                skbn->dev = net;
@@ -191,9 +245,20 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        goto skip;
                }
 
-               skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
-               if (netif_rx(skbn) != NET_RX_SUCCESS)
+               skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len);
+               if (netif_rx(skbn) != NET_RX_SUCCESS) {
+                       net->stats.rx_errors++;
                        return 0;
+               } else {
+                       struct pcpu_sw_netstats *stats64;
+                       struct qmimux_priv *priv = netdev_priv(net);
+
+                       stats64 = this_cpu_ptr(priv->stats64);
+                       u64_stats_update_begin(&stats64->syncp);
+                       stats64->rx_packets++;
+                       stats64->rx_bytes += pkt_len;
+                       u64_stats_update_end(&stats64->syncp);
+               }
 
 skip:
                offset += len + qmimux_hdr_sz;
@@ -217,6 +282,12 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
        priv->mux_id = mux_id;
        priv->real_dev = real_dev;
 
+       priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!priv->stats64) {
+               err = -ENOBUFS;
+               goto out_free_newdev;
+       }
+
        err = register_netdevice(new_dev);
        if (err < 0)
                goto out_free_newdev;
@@ -241,13 +312,15 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
        return err;
 }
 
-static void qmimux_unregister_device(struct net_device *dev)
+static void qmimux_unregister_device(struct net_device *dev,
+                                    struct list_head *head)
 {
        struct qmimux_priv *priv = netdev_priv(dev);
        struct net_device *real_dev = priv->real_dev;
 
+       free_percpu(priv->stats64);
        netdev_upper_dev_unlink(real_dev, dev);
-       unregister_netdevice(dev);
+       unregister_netdevice_queue(dev, head);
 
        /* Get rid of the reference to real_dev */
        dev_put(real_dev);
@@ -356,8 +429,8 @@ static ssize_t add_mux_store(struct device *d,  struct device_attribute *attr, c
        if (kstrtou8(buf, 0, &mux_id))
                return -EINVAL;
 
-       /* mux_id [1 - 0x7f] range empirically found */
-       if (mux_id < 1 || mux_id > 0x7f)
+       /* mux_id [1 - 254] for compatibility with ip(8) and the rmnet driver */
+       if (mux_id < 1 || mux_id > 254)
                return -EINVAL;
 
        if (!rtnl_trylock())
@@ -418,7 +491,7 @@ static ssize_t del_mux_store(struct device *d,  struct device_attribute *attr, c
                ret = -EINVAL;
                goto err;
        }
-       qmimux_unregister_device(del_dev);
+       qmimux_unregister_device(del_dev, NULL);
 
        if (!qmimux_has_slaves(dev))
                info->flags &= ~QMI_WWAN_FLAG_MUX;
@@ -1428,6 +1501,7 @@ static void qmi_wwan_disconnect(struct usb_interface *intf)
        struct qmi_wwan_state *info;
        struct list_head *iter;
        struct net_device *ldev;
+       LIST_HEAD(list);
 
        /* called twice if separate control and data intf */
        if (!dev)
@@ -1440,8 +1514,9 @@ static void qmi_wwan_disconnect(struct usb_interface *intf)
                }
                rcu_read_lock();
                netdev_for_each_upper_dev_rcu(dev->net, ldev, iter)
-                       qmimux_unregister_device(ldev);
+                       qmimux_unregister_device(ldev, &list);
                rcu_read_unlock();
+               unregister_netdevice_many(&list);
                rtnl_unlock();
                info->flags &= ~QMI_WWAN_FLAG_MUX;
        }
index 5994d5415a03324944472a9a875e3c1471bf5641..4c9bc29fe3d5897761dfd47c983c863bdd5b7b59 100644 (file)
@@ -1766,7 +1766,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
        struct vxlanhdr *hdr;
        __be32 vni;
 
-       if (skb->len < VXLAN_HLEN)
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + VXLAN_HLEN))
                return -EINVAL;
 
        hdr = vxlan_hdr(skb);
index 22b73da428225f67675ca9f6bb7d8d07b9cecb95..9a51f1ba87c348cc06d78e7db31521da3f37e08c 100644 (file)
@@ -678,6 +678,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 
        brcmf_dbg(TRACE, "Enter: on=%d\n", on);
 
+       sdio_retune_crc_disable(bus->sdiodev->func1);
+
+       /* Cannot re-tune if device is asleep; defer till we're awake */
+       if (on)
+               sdio_retune_hold_now(bus->sdiodev->func1);
+
        wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
        /* 1st KSO write goes to AOS wake up core if device is asleep  */
        brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
@@ -738,6 +744,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
        if (try_cnt > MAX_KSO_ATTEMPTS)
                brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
 
+       if (on)
+               sdio_retune_release(bus->sdiodev->func1);
+
+       sdio_retune_crc_enable(bus->sdiodev->func1);
+
        return err;
 }
 
@@ -3375,11 +3386,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
 
 static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
 {
-       if (bus->ci->chip == CY_CC_43012_CHIP_ID ||
-           bus->ci->chip == CY_CC_4373_CHIP_ID ||
-           bus->ci->chip == BRCM_CC_4339_CHIP_ID ||
-           bus->ci->chip == BRCM_CC_4345_CHIP_ID ||
-           bus->ci->chip == BRCM_CC_4354_CHIP_ID)
+       if (bus->ci->chip == CY_CC_43012_CHIP_ID)
                return true;
        else
                return false;
index 5f52e40a29032dd290991324a8f31f512c99fd16..33d7bc5500dbf00191bac4c27d632b1ec4960c71 100644 (file)
@@ -2747,3 +2747,42 @@ void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t)
                          jiffies + msecs_to_jiffies(collect_interval));
        }
 }
+
+#define FSEQ_REG(x) { .addr = (x), .str = #x, }
+
+void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       unsigned long flags;
+       int i;
+       struct {
+               u32 addr;
+               const char *str;
+       } fseq_regs[] = {
+               FSEQ_REG(FSEQ_ERROR_CODE),
+               FSEQ_REG(FSEQ_TOP_INIT_VERSION),
+               FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
+               FSEQ_REG(FSEQ_OTP_VERSION),
+               FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
+               FSEQ_REG(FSEQ_ALIVE_TOKEN),
+               FSEQ_REG(FSEQ_CNVI_ID),
+               FSEQ_REG(FSEQ_CNVR_ID),
+               FSEQ_REG(CNVI_AUX_MISC_CHIP),
+               FSEQ_REG(CNVR_AUX_MISC_CHIP),
+               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
+               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
+       };
+
+       if (!iwl_trans_grab_nic_access(trans, &flags))
+               return;
+
+       IWL_ERR(fwrt, "Fseq Registers:\n");
+
+       for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
+               IWL_ERR(fwrt, "0x%08X | %s\n",
+                       iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
+                       fseq_regs[i].str);
+
+       iwl_trans_release_nic_access(trans, &flags);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
index 2a9e560a906bb6c287190f68cb91ee0981262e0d..fd0ad220e961f5d51f64419ceb23e4ffbc9cc7c9 100644 (file)
@@ -471,4 +471,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
 }
 
 void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t);
+
+void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
 #endif  /* __iwl_fw_dbg_h__ */
index 852d3cbfc719925340c23457145c7b2a80b180e9..fba242284507b8212453400eeb1e37b702518ada 100644 (file)
@@ -1597,7 +1597,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        goto free;
 
  out_free_fw:
-       iwl_dealloc_ucode(drv);
        release_firmware(ucode_raw);
  out_unbind:
        complete(&drv->request_firmware_complete);
index 8e6a0c363c0dd514cca2c064a4568b5eee2bb537..8d930bfe0727579788985d8622e7a9ab3a6a5daf 100644 (file)
@@ -395,7 +395,11 @@ enum {
        WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK       = 0x80000000,
 };
 
-#define AUX_MISC_REG                   0xA200B0
+#define CNVI_AUX_MISC_CHIP                             0xA200B0
+#define CNVR_AUX_MISC_CHIP                             0xA2B800
+#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM         0xA29890
+#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR     0xA29938
+
 enum {
        HW_STEP_LOCATION_BITS = 24,
 };
@@ -408,7 +412,12 @@ enum aux_misc_master1_en {
 #define AUX_MISC_MASTER1_SMPHR_STATUS  0xA20800
 #define RSA_ENABLE                     0xA24B08
 #define PREG_AUX_BUS_WPROT_0           0xA04CC0
-#define PREG_PRPH_WPROT_0              0xA04CE0
+
+/* device family 9000 WPROT register */
+#define PREG_PRPH_WPROT_9000           0xA04CE0
+/* device family 22000 WPROT register */
+#define PREG_PRPH_WPROT_22000          0xA04D00
+
 #define SB_CPU_1_STATUS                        0xA01E30
 #define SB_CPU_2_STATUS                        0xA01E34
 #define UMAG_SB_CPU_1_STATUS           0xA038C0
@@ -442,4 +451,13 @@ enum {
 
 #define UREG_DOORBELL_TO_ISR6          0xA05C04
 #define UREG_DOORBELL_TO_ISR6_NMI_BIT  BIT(0)
+
+#define FSEQ_ERROR_CODE                        0xA340C8
+#define FSEQ_TOP_INIT_VERSION          0xA34038
+#define FSEQ_CNVIO_INIT_VERSION                0xA3403C
+#define FSEQ_OTP_VERSION               0xA340FC
+#define FSEQ_TOP_CONTENT_VERSION       0xA340F4
+#define FSEQ_ALIVE_TOKEN               0xA340F0
+#define FSEQ_CNVI_ID                   0xA3408C
+#define FSEQ_CNVR_ID                   0xA34090
 #endif                         /* __iwl_prph_h__ */
index 60f5d337f16d4f8c2dd65ec13332a3684d4f1a69..e7e68fb2bd29aa9989e814ac64b5114f3a170c57 100644 (file)
@@ -1972,26 +1972,6 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        }
 }
 
-static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
-{
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
-       u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
-       u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
-
-       if (!mvm->store_d3_resume_sram)
-               return;
-
-       if (!mvm->d3_resume_sram) {
-               mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
-               if (!mvm->d3_resume_sram)
-                       return;
-       }
-
-       iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
-#endif
-}
-
 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
                                       struct ieee80211_vif *vif)
 {
@@ -2054,8 +2034,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        }
 
        iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
-       /* query SRAM first in case we want event logging */
-       iwl_mvm_read_d3_sram(mvm);
 
        if (iwl_mvm_check_rt_status(mvm, vif)) {
                set_bit(STATUS_FW_ERROR, &mvm->trans->status);
index d4ff6b44de2c45e0d718ee5a3887472d6f752760..5b1bb76c5d28cf46339d1cbb8e3a3e4f764722db 100644 (file)
@@ -1557,59 +1557,6 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
 }
 #endif
 
-#ifdef CONFIG_PM_SLEEP
-static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
-                                      size_t count, loff_t *ppos)
-{
-       int store;
-
-       if (sscanf(buf, "%d", &store) != 1)
-               return -EINVAL;
-
-       mvm->store_d3_resume_sram = store;
-
-       return count;
-}
-
-static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
-                                     size_t count, loff_t *ppos)
-{
-       struct iwl_mvm *mvm = file->private_data;
-       const struct fw_img *img;
-       int ofs, len, pos = 0;
-       size_t bufsz, ret;
-       char *buf;
-       u8 *ptr = mvm->d3_resume_sram;
-
-       img = &mvm->fw->img[IWL_UCODE_WOWLAN];
-       len = img->sec[IWL_UCODE_SECTION_DATA].len;
-
-       bufsz = len * 4 + 256;
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
-                        mvm->store_d3_resume_sram ? "en" : "dis");
-
-       if (ptr) {
-               for (ofs = 0; ofs < len; ofs += 16) {
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                                        "0x%.4x %16ph\n", ofs, ptr + ofs);
-               }
-       } else {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "(no data captured)\n");
-       }
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-
-       kfree(buf);
-
-       return ret;
-}
-#endif
-
 #define PRINT_MVM_REF(ref) do {                                                \
        if (mvm->refs[ref])                                             \
                pos += scnprintf(buf + pos, bufsz - pos,                \
@@ -1940,9 +1887,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
 #endif
 
-#ifdef CONFIG_PM_SLEEP
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
-#endif
 #ifdef CONFIG_ACPI
 MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
 #endif
@@ -2159,7 +2103,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 #endif
 
 #ifdef CONFIG_PM_SLEEP
-       MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
        MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
        debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
                            &mvm->d3_wake_sysassert);
index ab68b5d53ec957d02156f3989c336d6b448a70be..153717587aebd31e89b69b4bf2853f8ac485910e 100644 (file)
@@ -311,6 +311,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        int ret;
        enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
        static const u16 alive_cmd[] = { MVM_ALIVE };
+       bool run_in_rfkill =
+               ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
 
        if (ucode_type == IWL_UCODE_REGULAR &&
            iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
@@ -328,7 +330,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                                   alive_cmd, ARRAY_SIZE(alive_cmd),
                                   iwl_alive_fn, &alive_data);
 
-       ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
+       /*
+        * We want to load the INIT firmware even in RFKILL
+        * For the unified firmware case, the ucode_type is not
+        * INIT, but we still need to run it.
+        */
+       ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
        if (ret) {
                iwl_fw_set_current_image(&mvm->fwrt, old_type);
                iwl_remove_notification(&mvm->notif_wait, &alive_wait);
@@ -433,7 +440,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
         * commands
         */
        ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
-                                               INIT_EXTENDED_CFG_CMD), 0,
+                                               INIT_EXTENDED_CFG_CMD),
+                                  CMD_SEND_IN_RFKILL,
                                   sizeof(init_cfg), &init_cfg);
        if (ret) {
                IWL_ERR(mvm, "Failed to run init config command: %d\n",
@@ -457,7 +465,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        }
 
        ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
-                                               NVM_ACCESS_COMPLETE), 0,
+                                               NVM_ACCESS_COMPLETE),
+                                  CMD_SEND_IN_RFKILL,
                                   sizeof(nvm_complete), &nvm_complete);
        if (ret) {
                IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
@@ -482,6 +491,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                }
        }
 
+       mvm->rfkill_safe_init_done = true;
+
        return 0;
 
 error:
@@ -526,7 +537,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (WARN_ON_ONCE(mvm->calibrating))
+       if (WARN_ON_ONCE(mvm->rfkill_safe_init_done))
                return 0;
 
        iwl_init_notification_wait(&mvm->notif_wait,
@@ -576,7 +587,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                goto remove_notif;
        }
 
-       mvm->calibrating = true;
+       mvm->rfkill_safe_init_done = true;
 
        /* Send TX valid antennas before triggering calibrations */
        ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
@@ -612,7 +623,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 remove_notif:
        iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 out:
-       mvm->calibrating = false;
+       mvm->rfkill_safe_init_done = false;
        if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
                /* we want to debug INIT and we have no NVM - fake */
                mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
index 5c52469288beb55ed5cd21f97a6c184b38c0c136..fdbabca0280e164e24e436fee97292976b7d1d20 100644 (file)
@@ -1209,7 +1209,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        mvm->scan_status = 0;
        mvm->ps_disabled = false;
-       mvm->calibrating = false;
+       mvm->rfkill_safe_init_done = false;
 
        /* just in case one was running */
        iwl_mvm_cleanup_roc_te(mvm);
index 8dc2a9850bc584c4a009caed8fd2c9725a7853ae..02efcf2189c489ad92a0ff1acaf8de436d3cc758 100644 (file)
@@ -880,7 +880,7 @@ struct iwl_mvm {
        struct iwl_mvm_vif *bf_allowed_vif;
 
        bool hw_registered;
-       bool calibrating;
+       bool rfkill_safe_init_done;
        bool support_umac_log;
 
        u32 ampdu_ref;
@@ -1039,8 +1039,6 @@ struct iwl_mvm {
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        bool d3_wake_sysassert;
        bool d3_test_active;
-       bool store_d3_resume_sram;
-       void *d3_resume_sram;
        u32 d3_test_pme_ptr;
        struct ieee80211_vif *keep_vif;
        u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
index acd2fda124665f651cb939281df40ef1c3163bc6..fad3bf563712e02c9ff0bbe426b3ca58ac46aab4 100644 (file)
@@ -918,9 +918,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        kfree(mvm->error_recovery_buf);
        mvm->error_recovery_buf = NULL;
 
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
-       kfree(mvm->d3_resume_sram);
-#endif
        iwl_trans_op_mode_leave(mvm->trans);
 
        iwl_phy_db_free(mvm->phy_db);
@@ -1212,7 +1209,8 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-       bool calibrating = READ_ONCE(mvm->calibrating);
+       bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
+       bool unified = iwl_mvm_has_unified_ucode(mvm);
 
        if (state)
                set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -1221,15 +1219,23 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 
        iwl_mvm_set_rfkill_state(mvm);
 
-       /* iwl_run_init_mvm_ucode is waiting for results, abort it */
-       if (calibrating)
+        /* iwl_run_init_mvm_ucode is waiting for results, abort it. */
+       if (rfkill_safe_init_done)
                iwl_abort_notification_waits(&mvm->notif_wait);
 
+       /*
+        * Don't ask the transport to stop the firmware. We'll do it
+        * after cfg80211 takes us down.
+        */
+       if (unified)
+               return false;
+
        /*
         * Stop the device if we run OPERATIONAL firmware or if we are in the
         * middle of the calibrations.
         */
-       return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
+       return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT ||
+                        rfkill_safe_init_done);
 }
 
 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
index 659e21b2d4e7293ed913d60f9285c25d42862187..be62f499c59510676ed9c56d2bc40633dc15af55 100644 (file)
@@ -441,7 +441,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
         */
        sta->max_amsdu_len = max_amsdu_len;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd),
+                                  &cfg_cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
 }
index b9914efc55c4b2f027db3954877645c4b7c27fb0..cc56ab88fb4394e31e6ca28635547198352727b7 100644 (file)
@@ -596,6 +596,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                iwl_mvm_dump_lmac_error_log(mvm, 1);
 
        iwl_mvm_dump_umac_error_log(mvm);
+
+       iwl_fw_error_print_fseq_regs(&mvm->fwrt);
 }
 
 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
index b513037dc0661e9eaae98f162841fbbe2e86e779..85973dd572341be089e66effcd6821be93ddacd8 100644 (file)
@@ -928,7 +928,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
                                           MSIX_HW_INT_CAUSES_REG_RF_KILL);
        }
 
-       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
+       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
                /*
                 * On 9000-series devices this bit isn't enabled by default, so
                 * when we power down the device we need set the bit to allow it
index 803fcbac415255d39e995a299e3ed031988e7436..dfa1bed124aab719ad18b1e9c5faeda924fb4c00 100644 (file)
@@ -1698,26 +1698,26 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
        return 0;
 }
 
-static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u32 hpm;
-       int err;
-
-       lockdep_assert_held(&trans_pcie->mutex);
+       u32 hpm, wprot;
 
-       err = iwl_pcie_prepare_card_hw(trans);
-       if (err) {
-               IWL_ERR(trans, "Error while preparing HW: %d\n", err);
-               return err;
+       switch (trans->cfg->device_family) {
+       case IWL_DEVICE_FAMILY_9000:
+               wprot = PREG_PRPH_WPROT_9000;
+               break;
+       case IWL_DEVICE_FAMILY_22000:
+               wprot = PREG_PRPH_WPROT_22000;
+               break;
+       default:
+               return 0;
        }
 
        hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
        if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
-               int wfpm_val = iwl_read_umac_prph_no_grab(trans,
-                                                         PREG_PRPH_WPROT_0);
+               u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
 
-               if (wfpm_val & PREG_WFPM_ACCESS) {
+               if (wprot_val & PREG_WFPM_ACCESS) {
                        IWL_ERR(trans,
                                "Error, can not clear persistence bit\n");
                        return -EPERM;
@@ -1726,6 +1726,26 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
                                            hpm & ~PERSISTENCE_BIT);
        }
 
+       return 0;
+}
+
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int err;
+
+       lockdep_assert_held(&trans_pcie->mutex);
+
+       err = iwl_pcie_prepare_card_hw(trans);
+       if (err) {
+               IWL_ERR(trans, "Error while preparing HW: %d\n", err);
+               return err;
+       }
+
+       err = iwl_trans_pcie_clear_persistence_bit(trans);
+       if (err)
+               return err;
+
        iwl_trans_pcie_sw_reset(trans);
 
        err = iwl_pcie_apm_init(trans);
@@ -3526,7 +3546,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                        hw_step |= ENABLE_WFPM;
                        iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG,
                                                    hw_step);
-                       hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
+                       hw_step = iwl_read_prph_no_grab(trans,
+                                                       CNVI_AUX_MISC_CHIP);
                        hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
                        if (hw_step == 0x3)
                                trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
@@ -3577,7 +3598,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  (trans->cfg != &iwl_ax200_cfg_cc ||
+                  ((trans->cfg != &iwl_ax200_cfg_cc &&
+                   trans->cfg != &killer1650x_2ax_cfg &&
+                   trans->cfg != &killer1650w_2ax_cfg) ||
                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
                u32 hw_status;
 
index 60ca13e0f15b7b02aca1e425f8a971555ac172e3..b5274d1f30fa7e472aa85665a9cefdb3a233dfd1 100644 (file)
@@ -3851,6 +3851,7 @@ static int __init init_mac80211_hwsim(void)
                        break;
                case HWSIM_REGTEST_STRICT_ALL:
                        param.reg_strict = true;
+                       /* fall through */
                case HWSIM_REGTEST_DRIVER_REG_ALL:
                        param.reg_alpha2 = hwsim_alpha2s[0];
                        break;
index 6845eb57b39a1ae4bae60cd78c9777adc17c0d09..653d347a9a19d3e77eb3fbd1ad5d7da3278d6d4a 100644 (file)
@@ -329,6 +329,8 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
        struct ieee80211_vendor_ie *vendorhdr;
        u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
        int left_len, parsed_len = 0;
+       unsigned int token_len;
+       int err = 0;
 
        if (!info->tail || !info->tail_len)
                return 0;
@@ -344,6 +346,12 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
         */
        while (left_len > sizeof(struct ieee_types_header)) {
                hdr = (void *)(info->tail + parsed_len);
+               token_len = hdr->len + sizeof(struct ieee_types_header);
+               if (token_len > left_len) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
                switch (hdr->element_id) {
                case WLAN_EID_SSID:
                case WLAN_EID_SUPP_RATES:
@@ -361,17 +369,20 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
                        if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
                                                    WLAN_OUI_TYPE_MICROSOFT_WMM,
                                                    (const u8 *)hdr,
-                                                   hdr->len + sizeof(struct ieee_types_header)))
+                                                   token_len))
                                break;
                        /* fall through */
                default:
-                       memcpy(gen_ie->ie_buffer + ie_len, hdr,
-                              hdr->len + sizeof(struct ieee_types_header));
-                       ie_len += hdr->len + sizeof(struct ieee_types_header);
+                       if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
+                       ie_len += token_len;
                        break;
                }
-               left_len -= hdr->len + sizeof(struct ieee_types_header);
-               parsed_len += hdr->len + sizeof(struct ieee_types_header);
+               left_len -= token_len;
+               parsed_len += token_len;
        }
 
        /* parse only WPA vendor IE from tail, WMM IE is configured by
@@ -381,15 +392,17 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
                                                    WLAN_OUI_TYPE_MICROSOFT_WPA,
                                                    info->tail, info->tail_len);
        if (vendorhdr) {
-               memcpy(gen_ie->ie_buffer + ie_len, vendorhdr,
-                      vendorhdr->len + sizeof(struct ieee_types_header));
-               ie_len += vendorhdr->len + sizeof(struct ieee_types_header);
+               token_len = vendorhdr->len + sizeof(struct ieee_types_header);
+               if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
+                       err = -EINVAL;
+                       goto out;
+               }
+               memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
+               ie_len += token_len;
        }
 
-       if (!ie_len) {
-               kfree(gen_ie);
-               return 0;
-       }
+       if (!ie_len)
+               goto out;
 
        gen_ie->ie_index = cpu_to_le16(gen_idx);
        gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
@@ -399,13 +412,15 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
 
        if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
                                         NULL, NULL)) {
-               kfree(gen_ie);
-               return -1;
+               err = -EINVAL;
+               goto out;
        }
 
        priv->gen_idx = gen_idx;
+
+ out:
        kfree(gen_ie);
-       return 0;
+       return err;
 }
 
 /* This function parses different IEs-head & tail IEs, beacon IEs,
index 935778ec9a1b4f419ecc672d703c67963ba9526f..c269a0de94137905d282b49d5d53664f969dd58f 100644 (file)
@@ -1247,6 +1247,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                }
                switch (element_id) {
                case WLAN_EID_SSID:
+                       if (element_len > IEEE80211_MAX_SSID_LEN)
+                               return -EINVAL;
                        bss_entry->ssid.ssid_len = element_len;
                        memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
                               element_len);
@@ -1256,6 +1258,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_SUPP_RATES:
+                       if (element_len > MWIFIEX_SUPPORTED_RATES)
+                               return -EINVAL;
                        memcpy(bss_entry->data_rates, current_ptr + 2,
                               element_len);
                        memcpy(bss_entry->supported_rates, current_ptr + 2,
@@ -1265,6 +1269,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_FH_PARAMS:
+                       if (element_len + 2 < sizeof(*fh_param_set))
+                               return -EINVAL;
                        fh_param_set =
                                (struct ieee_types_fh_param_set *) current_ptr;
                        memcpy(&bss_entry->phy_param_set.fh_param_set,
@@ -1273,6 +1279,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_DS_PARAMS:
+                       if (element_len + 2 < sizeof(*ds_param_set))
+                               return -EINVAL;
                        ds_param_set =
                                (struct ieee_types_ds_param_set *) current_ptr;
 
@@ -1284,6 +1292,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_CF_PARAMS:
+                       if (element_len + 2 < sizeof(*cf_param_set))
+                               return -EINVAL;
                        cf_param_set =
                                (struct ieee_types_cf_param_set *) current_ptr;
                        memcpy(&bss_entry->ss_param_set.cf_param_set,
@@ -1292,6 +1302,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_IBSS_PARAMS:
+                       if (element_len + 2 < sizeof(*ibss_param_set))
+                               return -EINVAL;
                        ibss_param_set =
                                (struct ieee_types_ibss_param_set *)
                                current_ptr;
@@ -1301,10 +1313,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_ERP_INFO:
+                       if (!element_len)
+                               return -EINVAL;
                        bss_entry->erp_flags = *(current_ptr + 2);
                        break;
 
                case WLAN_EID_PWR_CONSTRAINT:
+                       if (!element_len)
+                               return -EINVAL;
                        bss_entry->local_constraint = *(current_ptr + 2);
                        bss_entry->sensed_11h = true;
                        break;
@@ -1345,6 +1361,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        break;
 
                case WLAN_EID_VENDOR_SPECIFIC:
+                       if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
+                               return -EINVAL;
+
                        vendor_ie = (struct ieee_types_vendor_specific *)
                                        current_ptr;
 
index cf4265cda22468c9b99d2e14c7c3de8647d92849..6284779712131f41f6ef19b8e4f42157f81fbc1e 100644 (file)
@@ -8,7 +8,8 @@
 #include "reg.h"
 #include "debug.h"
 
-void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb)
+static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
+                                     struct sk_buff *skb)
 {
        struct rtw_c2h_cmd *c2h;
        u8 sub_cmd_id;
@@ -47,7 +48,8 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
        }
 }
 
-void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c)
+static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
+                                   u8 *h2c)
 {
        u8 box;
        u8 box_state;
index f447361f75734f37baa9a0fba35789535641c40b..b2dac460913802be8681199b85af0bf7ebd52192 100644 (file)
@@ -162,7 +162,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
        rtwdev->stats.tx_cnt = 0;
        rtwdev->stats.rx_cnt = 0;
 
-       rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
+       /* use atomic version to avoid taking local->iflist_mtx mutex */
+       rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
 
        /* fw supports only one station associated to enter lps, if there are
         * more than two stations associated to the AP, then we can not enter
index 4381b360b5b5358cc49b93b1aebe6cbaf304f4dd..404d89432c96d946ed8bdf0edcc1391f0d2e8d66 100644 (file)
@@ -144,10 +144,10 @@ static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
        struct rtw_phy_stat_iter_data *iter_data = data;
        struct rtw_dev *rtwdev = iter_data->rtwdev;
        struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
-       u8 rssi, rssi_level;
+       u8 rssi;
 
        rssi = ewma_rssi_read(&si->avg_rssi);
-       rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
+       si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
 
        rtw_fw_send_rssi_info(rtwdev, si);
 
@@ -423,6 +423,11 @@ static u64 rtw_phy_db_2_linear(u8 power_db)
        u8 i, j;
        u64 linear;
 
+       if (power_db > 96)
+               power_db = 96;
+       else if (power_db < 1)
+               return 1;
+
        /* 1dB ~ 96dB */
        i = (power_db - 1) >> 3;
        j = (power_db - 1) - (i << 3);
@@ -848,12 +853,13 @@ u8 rtw_vht_2s_rates[] = {
        DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
        DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
 };
-u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
-u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
-u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
-u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
-u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
-u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
+
+static u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
+static u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
+static u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
+static u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
+static u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
+static u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
        rtw_cck_rates, rtw_ofdm_rates,
        rtw_ht_1s_rates, rtw_ht_2s_rates,
index f9c67ed473d1f286e4c1acc8d694ba102ebb6b38..b42cd50b837ec0c396456989a20a3ca40977553b 100644 (file)
@@ -929,11 +929,15 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
        u32 addr;
        u8 *data;
 
+       data = kzalloc(RSI_9116_REG_SIZE, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
        status = rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR);
        if (status < 0) {
                rsi_dbg(ERR_ZONE,
                        "Unable to set ms word to common reg\n");
-               return status;
+               goto err;
        }
 
        rsi_dbg(INIT_ZONE, "%s: Bring TA out of reset\n", __func__);
@@ -944,7 +948,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
                                                  RSI_9116_REG_SIZE);
        if (status < 0) {
                rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n");
-               return status;
+               goto err;
        }
 
        put_unaligned_le32(TA_SOFT_RST_CLR, data);
@@ -954,7 +958,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
                                                  RSI_9116_REG_SIZE);
        if (status < 0) {
                rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n");
-               return status;
+               goto err;
        }
 
        put_unaligned_le32(TA_PC_ZERO, data);
@@ -964,7 +968,8 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
                                                  RSI_9116_REG_SIZE);
        if (status < 0) {
                rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n");
-               return -EINVAL;
+               status = -EINVAL;
+               goto err;
        }
 
        put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data);
@@ -974,17 +979,19 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
                                                  RSI_9116_REG_SIZE);
        if (status < 0) {
                rsi_dbg(ERR_ZONE, "Unable to release TA threads\n");
-               return status;
+               goto err;
        }
 
        status = rsi_sdio_master_access_msword(adapter, MISC_CFG_BASE_ADDR);
        if (status < 0) {
                rsi_dbg(ERR_ZONE, "Unable to set ms word to common reg\n");
-               return status;
+               goto err;
        }
        rsi_dbg(INIT_ZONE, "***** TA Reset done *****\n");
 
-       return 0;
+err:
+       kfree(data);
+       return status;
 }
 
 static struct rsi_host_intf_ops sdio_host_intf_ops = {
index 28cb44c61d4a37a8e77eaa8ed980b00a9d7a86ba..24d7fe7c74ed03e3bc528226e22a04dab7af2b1f 100644 (file)
@@ -303,11 +303,19 @@ static const struct attribute_group *pmem_attribute_groups[] = {
        NULL,
 };
 
-static void pmem_release_queue(void *q)
+static void __pmem_release_queue(struct percpu_ref *ref)
 {
+       struct request_queue *q;
+
+       q = container_of(ref, typeof(*q), q_usage_counter);
        blk_cleanup_queue(q);
 }
 
+static void pmem_release_queue(void *ref)
+{
+       __pmem_release_queue(ref);
+}
+
 static void pmem_freeze_queue(struct percpu_ref *ref)
 {
        struct request_queue *q;
@@ -399,12 +407,10 @@ static int pmem_attach_disk(struct device *dev,
        if (!q)
                return -ENOMEM;
 
-       if (devm_add_action_or_reset(dev, pmem_release_queue, q))
-               return -ENOMEM;
-
        pmem->pfn_flags = PFN_DEV;
        pmem->pgmap.ref = &q->q_usage_counter;
        pmem->pgmap.kill = pmem_freeze_queue;
+       pmem->pgmap.cleanup = __pmem_release_queue;
        if (is_nd_pfn(dev)) {
                if (setup_pagemap_fsdax(dev, &pmem->pgmap))
                        return -ENOMEM;
@@ -425,6 +431,9 @@ static int pmem_attach_disk(struct device *dev,
                pmem->pfn_flags |= PFN_MAP;
                memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
        } else {
+               if (devm_add_action_or_reset(dev, pmem_release_queue,
+                                       &q->q_usage_counter))
+                       return -ENOMEM;
                addr = devm_memremap(dev, pmem->phys_addr,
                                pmem->size, ARCH_MEMREMAP_PMEM);
                memcpy(&bb_res, &nsio->res, sizeof(bb_res));
index 742928d0053ec79bf886a24939db4cdc799096d8..a98126ad9c3aa1175073d25ccd89574b9c5fa690 100644 (file)
 #include <linux/seq_buf.h>
 
 struct pci_p2pdma {
-       struct percpu_ref devmap_ref;
-       struct completion devmap_ref_done;
        struct gen_pool *pool;
        bool p2pmem_published;
 };
 
+struct p2pdma_pagemap {
+       struct dev_pagemap pgmap;
+       struct percpu_ref ref;
+       struct completion ref_done;
+};
+
 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
@@ -74,41 +78,45 @@ static const struct attribute_group p2pmem_group = {
        .name = "p2pmem",
 };
 
+static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref)
+{
+       return container_of(ref, struct p2pdma_pagemap, ref);
+}
+
 static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
 {
-       struct pci_p2pdma *p2p =
-               container_of(ref, struct pci_p2pdma, devmap_ref);
+       struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
 
-       complete_all(&p2p->devmap_ref_done);
+       complete(&p2p_pgmap->ref_done);
 }
 
 static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
 {
-       /*
-        * pci_p2pdma_add_resource() may be called multiple times
-        * by a driver and may register the percpu_kill devm action multiple
-        * times. We only want the first action to actually kill the
-        * percpu_ref.
-        */
-       if (percpu_ref_is_dying(ref))
-               return;
-
        percpu_ref_kill(ref);
 }
 
+static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
+{
+       struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
+
+       wait_for_completion(&p2p_pgmap->ref_done);
+       percpu_ref_exit(&p2p_pgmap->ref);
+}
+
 static void pci_p2pdma_release(void *data)
 {
        struct pci_dev *pdev = data;
+       struct pci_p2pdma *p2pdma = pdev->p2pdma;
 
-       if (!pdev->p2pdma)
+       if (!p2pdma)
                return;
 
-       wait_for_completion(&pdev->p2pdma->devmap_ref_done);
-       percpu_ref_exit(&pdev->p2pdma->devmap_ref);
+       /* Flush and disable pci_alloc_p2p_mem() */
+       pdev->p2pdma = NULL;
+       synchronize_rcu();
 
-       gen_pool_destroy(pdev->p2pdma->pool);
+       gen_pool_destroy(p2pdma->pool);
        sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
-       pdev->p2pdma = NULL;
 }
 
 static int pci_p2pdma_setup(struct pci_dev *pdev)
@@ -124,12 +132,6 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
        if (!p2p->pool)
                goto out;
 
-       init_completion(&p2p->devmap_ref_done);
-       error = percpu_ref_init(&p2p->devmap_ref,
-                       pci_p2pdma_percpu_release, 0, GFP_KERNEL);
-       if (error)
-               goto out_pool_destroy;
-
        error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
        if (error)
                goto out_pool_destroy;
@@ -163,6 +165,7 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
                            u64 offset)
 {
+       struct p2pdma_pagemap *p2p_pgmap;
        struct dev_pagemap *pgmap;
        void *addr;
        int error;
@@ -185,18 +188,27 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
                        return error;
        }
 
-       pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
-       if (!pgmap)
+       p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
+       if (!p2p_pgmap)
                return -ENOMEM;
 
+       init_completion(&p2p_pgmap->ref_done);
+       error = percpu_ref_init(&p2p_pgmap->ref,
+                       pci_p2pdma_percpu_release, 0, GFP_KERNEL);
+       if (error)
+               goto pgmap_free;
+
+       pgmap = &p2p_pgmap->pgmap;
+
        pgmap->res.start = pci_resource_start(pdev, bar) + offset;
        pgmap->res.end = pgmap->res.start + size - 1;
        pgmap->res.flags = pci_resource_flags(pdev, bar);
-       pgmap->ref = &pdev->p2pdma->devmap_ref;
+       pgmap->ref = &p2p_pgmap->ref;
        pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
        pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
                pci_resource_start(pdev, bar);
        pgmap->kill = pci_p2pdma_percpu_kill;
+       pgmap->cleanup = pci_p2pdma_percpu_cleanup;
 
        addr = devm_memremap_pages(&pdev->dev, pgmap);
        if (IS_ERR(addr)) {
@@ -204,19 +216,22 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
                goto pgmap_free;
        }
 
-       error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
+       error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
                        pci_bus_address(pdev, bar) + offset,
-                       resource_size(&pgmap->res), dev_to_node(&pdev->dev));
+                       resource_size(&pgmap->res), dev_to_node(&pdev->dev),
+                       &p2p_pgmap->ref);
        if (error)
-               goto pgmap_free;
+               goto pages_free;
 
        pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
                 &pgmap->res);
 
        return 0;
 
+pages_free:
+       devm_memunmap_pages(&pdev->dev, pgmap);
 pgmap_free:
-       devm_kfree(&pdev->dev, pgmap);
+       devm_kfree(&pdev->dev, p2p_pgmap);
        return error;
 }
 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@@ -585,19 +600,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
  */
 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
 {
-       void *ret;
+       void *ret = NULL;
+       struct percpu_ref *ref;
 
+       /*
+        * Pairs with synchronize_rcu() in pci_p2pdma_release() to
+        * ensure pdev->p2pdma is non-NULL for the duration of the
+        * read-lock.
+        */
+       rcu_read_lock();
        if (unlikely(!pdev->p2pdma))
-               return NULL;
-
-       if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
-               return NULL;
-
-       ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
+               goto out;
 
-       if (unlikely(!ret))
-               percpu_ref_put(&pdev->p2pdma->devmap_ref);
+       ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
+                       (void **) &ref);
+       if (!ret)
+               goto out;
 
+       if (unlikely(!percpu_ref_tryget_live(ref))) {
+               gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
+               ret = NULL;
+               goto out;
+       }
+out:
+       rcu_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
@@ -610,8 +636,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
  */
 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
 {
-       gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
-       percpu_ref_put(&pdev->p2pdma->devmap_ref);
+       struct percpu_ref *ref;
+
+       gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
+                       (void **) &ref);
+       percpu_ref_put(ref);
 }
 EXPORT_SYMBOL_GPL(pci_free_p2pmem);
 
index 5eadbc3d0969f353792cfc697747b64f979b9397..98af9ecd4a90c728ebebabd6bcd96a581e06b657 100644 (file)
@@ -524,7 +524,6 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
        pci_power_up(pci_dev);
        pci_restore_state(pci_dev);
        pci_pme_restore(pci_dev);
-       pci_fixup_device(pci_fixup_resume_early, pci_dev);
 }
 
 /*
@@ -831,18 +830,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        if (pci_dev->skip_bus_pm) {
                /*
-                * The function is running for the second time in a row without
+                * Either the device is a bridge with a child in D0 below it, or
+                * the function is running for the second time in a row without
                 * going through full resume, which is possible only during
-                * suspend-to-idle in a spurious wakeup case.  Moreover, the
-                * device was originally left in D0, so its power state should
-                * not be changed here and the device register values saved
-                * originally should be restored on resume again.
+                * suspend-to-idle in a spurious wakeup case.  The device should
+                * be in D0 at this point, but if it is a bridge, it may be
+                * necessary to save its state.
                 */
-               pci_dev->state_saved = true;
-       } else if (pci_dev->state_saved) {
-               if (pci_dev->current_state == PCI_D0)
-                       pci_dev->skip_bus_pm = true;
-       } else {
+               if (!pci_dev->state_saved)
+                       pci_save_state(pci_dev);
+       } else if (!pci_dev->state_saved) {
                pci_save_state(pci_dev);
                if (pci_power_manageable(pci_dev))
                        pci_prepare_to_sleep(pci_dev);
@@ -851,6 +848,22 @@ static int pci_pm_suspend_noirq(struct device *dev)
        dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
                pci_power_name(pci_dev->current_state));
 
+       if (pci_dev->current_state == PCI_D0) {
+               pci_dev->skip_bus_pm = true;
+               /*
+                * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any
+                * downstream device is in D0, so avoid changing the power state
+                * of the parent bridge by setting the skip_bus_pm flag for it.
+                */
+               if (pci_dev->bus->self)
+                       pci_dev->bus->self->skip_bus_pm = true;
+       }
+
+       if (pci_dev->skip_bus_pm && !pm_suspend_via_firmware()) {
+               dev_dbg(dev, "PCI PM: Skipped\n");
+               goto Fixup;
+       }
+
        pci_pm_set_unknown_state(pci_dev);
 
        /*
@@ -898,7 +911,16 @@ static int pci_pm_resume_noirq(struct device *dev)
        if (dev_pm_smart_suspend_and_suspended(dev))
                pm_runtime_set_active(dev);
 
-       pci_pm_default_resume_early(pci_dev);
+       /*
+        * In the suspend-to-idle case, devices left in D0 during suspend will
+        * stay in D0, so it is not necessary to restore or update their
+        * configuration here and attempting to put them into D0 again may
+        * confuse some firmware, so avoid doing that.
+        */
+       if (!pci_dev->skip_bus_pm || pm_suspend_via_firmware())
+               pci_pm_default_resume_early(pci_dev);
+
+       pci_fixup_device(pci_fixup_resume_early, pci_dev);
 
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_resume_early(dev);
@@ -1194,6 +1216,7 @@ static int pci_pm_restore_noirq(struct device *dev)
        }
 
        pci_pm_default_resume_early(pci_dev);
+       pci_fixup_device(pci_fixup_resume_early, pci_dev);
 
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_resume_early(dev);
index 687ce6817d0d9b08af94aa62092fae085b26347a..f85a1b9d129b1a7c893197bebdce2ab7a9ff2bba 100644 (file)
@@ -694,6 +694,7 @@ static int mlxreg_hotplug_remove(struct platform_device *pdev)
 
        /* Clean interrupts setup. */
        mlxreg_hotplug_unset_irq(priv);
+       devm_free_irq(&pdev->dev, priv->irq, priv);
 
        return 0;
 }
index 81642102bf65a077a3f4e4882f7aca5f0c7f99a2..8d9e30dbb5af58df58c93c4f17a4a42382e92ba3 100644 (file)
@@ -65,10 +65,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
 
 static struct quirk_entry quirk_asus_unknown = {
        .wapf = 0,
+       .wmi_backlight_set_devstate = true,
 };
 
 static struct quirk_entry quirk_asus_q500a = {
        .i8042_filter = asus_q500a_i8042_filter,
+       .wmi_backlight_set_devstate = true,
 };
 
 /*
@@ -79,26 +81,32 @@ static struct quirk_entry quirk_asus_q500a = {
 static struct quirk_entry quirk_asus_x55u = {
        .wapf = 4,
        .wmi_backlight_power = true,
+       .wmi_backlight_set_devstate = true,
        .no_display_toggle = true,
 };
 
 static struct quirk_entry quirk_asus_wapf4 = {
        .wapf = 4,
+       .wmi_backlight_set_devstate = true,
 };
 
 static struct quirk_entry quirk_asus_x200ca = {
        .wapf = 2,
+       .wmi_backlight_set_devstate = true,
 };
 
 static struct quirk_entry quirk_asus_ux303ub = {
        .wmi_backlight_native = true,
+       .wmi_backlight_set_devstate = true,
 };
 
 static struct quirk_entry quirk_asus_x550lb = {
+       .wmi_backlight_set_devstate = true,
        .xusb2pr = 0x01D9,
 };
 
 static struct quirk_entry quirk_asus_forceals = {
+       .wmi_backlight_set_devstate = true,
        .wmi_force_als_set = true,
 };
 
index 3e4336025e8f35b9d790a38b8e124a22e1a120a4..9b18a184e0aac89c58232681bbd5ab689cae0e2f 100644 (file)
@@ -2146,7 +2146,7 @@ static int asus_wmi_add(struct platform_device *pdev)
                err = asus_wmi_backlight_init(asus);
                if (err && err != -ENODEV)
                        goto fail_backlight;
-       } else
+       } else if (asus->driver->quirks->wmi_backlight_set_devstate)
                err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
 
        if (asus_wmi_has_fnlock_key(asus)) {
index 0930be770688a1326a18cd27d4688e83dde507ed..4f31b68642a08380c63f993dd5702e51666d32c0 100644 (file)
@@ -31,6 +31,7 @@ struct quirk_entry {
        bool store_backlight_power;
        bool wmi_backlight_power;
        bool wmi_backlight_native;
+       bool wmi_backlight_set_devstate;
        bool wmi_force_als_set;
        int wapf;
        /*
index 06cd7e818ed5dd8b9c28c5f67289d8dc90b07644..a0d0cecff55fd908a3b3096238c529a2ed657217 100644 (file)
@@ -76,12 +76,24 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
        struct platform_device *device = context;
        struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
        unsigned int val = !(event & 1); /* Even=press, Odd=release */
-       const struct key_entry *ke_rel;
+       const struct key_entry *ke, *ke_rel;
        bool autorelease;
 
        if (priv->wakeup_mode) {
-               if (sparse_keymap_entry_from_scancode(priv->input_dev, event)) {
+               ke = sparse_keymap_entry_from_scancode(priv->input_dev, event);
+               if (ke) {
                        pm_wakeup_hard_event(&device->dev);
+
+                       /*
+                        * Switch events like tablet mode will wake the device
+                        * and report the new switch position to the input
+                        * subsystem.
+                        */
+                       if (ke->type == KE_SW)
+                               sparse_keymap_report_event(priv->input_dev,
+                                                          event,
+                                                          val,
+                                                          0);
                        return;
                }
                goto out_unknown;
index cee039f574994133b02e2e24400f73e134931968..983f02b5b1060dafdb6b8a230a511edad84cf24a 100644 (file)
@@ -2032,7 +2032,7 @@ static int __init mlxplat_init(void)
 
        for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
                priv->pdev_mux[i] = platform_device_register_resndata(
-                                               &mlxplat_dev->dev,
+                                               &priv->pdev_i2c->dev,
                                                "i2c-mux-reg", i, NULL,
                                                0, &mlxplat_mux_data[i],
                                                sizeof(mlxplat_mux_data[i]));
index 88e4f3ff0cb84c0cdf38ce5e1d8660dd882dc900..673f8a128397e4d3a9080fd7dcc82f8b0b5f35ef 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
+#include <linux/workqueue.h>
 
 #include <asm/mce.h>
 
@@ -123,16 +124,12 @@ static u64 dfs_pfn;
 /* Amount of errors after which we offline */
 static unsigned int count_threshold = COUNT_MASK;
 
-/*
- * The timer "decays" element count each timer_interval which is 24hrs by
- * default.
- */
-
-#define CEC_TIMER_DEFAULT_INTERVAL     24 * 60 * 60    /* 24 hrs */
-#define CEC_TIMER_MIN_INTERVAL          1 * 60 * 60    /* 1h */
-#define CEC_TIMER_MAX_INTERVAL    30 * 24 * 60 * 60    /* one month */
-static struct timer_list cec_timer;
-static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL;
+/* Each element "decays" each decay_interval which is 24hrs by default. */
+#define CEC_DECAY_DEFAULT_INTERVAL     24 * 60 * 60    /* 24 hrs */
+#define CEC_DECAY_MIN_INTERVAL          1 * 60 * 60    /* 1h */
+#define CEC_DECAY_MAX_INTERVAL    30 * 24 * 60 * 60    /* one month */
+static struct delayed_work cec_work;
+static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL;
 
 /*
  * Decrement decay value. We're using DECAY_BITS bits to denote decay of an
@@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca)
 /*
  * @interval in seconds
  */
-static void cec_mod_timer(struct timer_list *t, unsigned long interval)
+static void cec_mod_work(unsigned long interval)
 {
        unsigned long iv;
 
-       iv = interval * HZ + jiffies;
-
-       mod_timer(t, round_jiffies(iv));
+       iv = interval * HZ;
+       mod_delayed_work(system_wq, &cec_work, round_jiffies(iv));
 }
 
-static void cec_timer_fn(struct timer_list *unused)
+static void cec_work_fn(struct work_struct *work)
 {
+       mutex_lock(&ce_mutex);
        do_spring_cleaning(&ce_arr);
+       mutex_unlock(&ce_mutex);
 
-       cec_mod_timer(&cec_timer, timer_interval);
+       cec_mod_work(decay_interval);
 }
 
 /*
@@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused)
  */
 static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
 {
+       int min = 0, max = ca->n - 1;
        u64 this_pfn;
-       int min = 0, max = ca->n;
 
-       while (min < max) {
-               int tmp = (max + min) >> 1;
+       while (min <= max) {
+               int i = (min + max) >> 1;
 
-               this_pfn = PFN(ca->array[tmp]);
+               this_pfn = PFN(ca->array[i]);
 
                if (this_pfn < pfn)
-                       min = tmp + 1;
+                       min = i + 1;
                else if (this_pfn > pfn)
-                       max = tmp;
-               else {
-                       min = tmp;
-                       break;
+                       max = i - 1;
+               else if (this_pfn == pfn) {
+                       if (to)
+                               *to = i;
+
+                       return i;
                }
        }
 
+       /*
+        * When the loop terminates without finding @pfn, min has the index of
+        * the element slot where the new @pfn should be inserted. The loop
+        * terminates when min > max, which means the min index points to the
+        * bigger element while the max index to the smaller element, in-between
+        * which the new @pfn belongs to.
+        *
+        * For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3.
+        */
        if (to)
                *to = min;
 
-       this_pfn = PFN(ca->array[min]);
-
-       if (this_pfn == pfn)
-               return min;
-
        return -ENOKEY;
 }
 
@@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val)
 {
        *(u64 *)data = val;
 
-       if (val < CEC_TIMER_MIN_INTERVAL)
+       if (val < CEC_DECAY_MIN_INTERVAL)
                return -EINVAL;
 
-       if (val > CEC_TIMER_MAX_INTERVAL)
+       if (val > CEC_DECAY_MAX_INTERVAL)
                return -EINVAL;
 
-       timer_interval = val;
+       decay_interval = val;
 
-       cec_mod_timer(&cec_timer, timer_interval);
+       cec_mod_work(decay_interval);
        return 0;
 }
 DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
@@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v)
 
        seq_printf(m, "Flags: 0x%x\n", ca->flags);
 
-       seq_printf(m, "Timer interval: %lld seconds\n", timer_interval);
+       seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
        seq_printf(m, "Decays: %lld\n", ca->decays_done);
 
        seq_printf(m, "Action threshold: %d\n", count_threshold);
@@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void)
        }
 
        decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
-                                   &timer_interval, &decay_interval_ops);
+                                   &decay_interval, &decay_interval_ops);
        if (!decay) {
                pr_warn("Error creating decay_interval debugfs node!\n");
                goto err;
@@ -508,8 +512,8 @@ void __init cec_init(void)
        if (create_debugfs_nodes())
                return;
 
-       timer_setup(&cec_timer, cec_timer_fn, 0);
-       cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL);
+       INIT_DELAYED_WORK(&cec_work, cec_work_fn);
+       schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
 
        pr_info("Correctable Errors collector initialized.\n");
 }
index a1b7fab91dd4d836a2c13037c249f0d8b7c160c1..d2a8f69b2665f9c4123440678deaa0d31d2c6f0c 100644 (file)
@@ -403,12 +403,12 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
        /* common for all regulators */
        tps->mfd = tps6507x_dev;
 
-       for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
+       for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++) {
                /* Register the regulators */
                tps->info[i] = info;
-               if (init_data && init_data->driver_data) {
+               if (init_data && init_data[i].driver_data) {
                        struct tps6507x_reg_platform_data *data =
-                                       init_data->driver_data;
+                                       init_data[i].driver_data;
                        info->defdcdc_default = data->defdcdc_default;
                }
 
index ee8767f5845ae8c6050235b2f08ede977c539332..9125f7f4e64c981ab2f8d00088b4ab960f21f1af 100644 (file)
@@ -299,6 +299,7 @@ static void __exit vfio_ccw_sch_exit(void)
        css_driver_unregister(&vfio_ccw_sch_driver);
        isc_unregister(VFIO_CCW_ISC);
        kmem_cache_destroy(vfio_ccw_io_region);
+       kmem_cache_destroy(vfio_ccw_cmd_region);
        destroy_workqueue(vfio_ccw_work_q);
 }
 module_init(vfio_ccw_sch_init);
index 1bef1da273c2b6404c6d1622e5653e5dc04109c9..8068520cf89ed078a2dc4861f76d881e8efe26a5 100644 (file)
@@ -4940,7 +4940,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
                        curr_sg->reserved[0] = 0;
                        curr_sg->reserved[1] = 0;
                        curr_sg->reserved[2] = 0;
-                       curr_sg->chain_indicator = 0x80;
+                       curr_sg->chain_indicator = IOACCEL2_CHAIN;
 
                        curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
                }
@@ -4957,6 +4957,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
                        curr_sg++;
                }
 
+               /*
+                * Set the last s/g element bit
+                */
+               (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
+
                switch (cmd->sc_data_direction) {
                case DMA_TO_DEVICE:
                        cp->direction &= ~IOACCEL2_DIRECTION_MASK;
index 21a726e2eec6ecbd3fa66221a4635adc3060c747..f6afca4b231915cd93121ea77f906f2e14c91d02 100644 (file)
@@ -517,6 +517,7 @@ struct ioaccel2_sg_element {
        u8 reserved[3];
        u8 chain_indicator;
 #define IOACCEL2_CHAIN 0x80
+#define IOACCEL2_LAST_SG 0x40
 };
 
 /*
index b3dbdb3657494cad111c4c64bdc33ed90fb0aae7..d326915e0f40f38322d5fe0fe742832254341ee8 100644 (file)
@@ -48,7 +48,7 @@ static inline void cbc_writel(u32 val, int reg)
        if (offset == -1)
                return;
 
-       writel_relaxed(val,  cpubiuctrl_base + offset);
+       writel(val, cpubiuctrl_base + offset);
 }
 
 enum cpubiuctrl_regs {
@@ -238,7 +238,9 @@ static int __init brcmstb_biuctrl_init(void)
        if (!np)
                return 0;
 
-       setup_hifcpubiuctrl_regs(np);
+       ret = setup_hifcpubiuctrl_regs(np);
+       if (ret)
+               return ret;
 
        ret = mcp_write_pairing_set();
        if (ret) {
index bc10e3194809a64666f48819d2a51a40a681dda3..ec90b44fa0cd327cf32fcc76ba3eb5ba916eba94 100644 (file)
@@ -695,8 +695,8 @@ static int ixp4xx_npe_probe(struct platform_device *pdev)
                        continue; /* NPE already disabled or not present */
                }
                npe->regs = devm_ioremap_resource(dev, res);
-               if (!npe->regs)
-                       return -ENOMEM;
+               if (IS_ERR(npe->regs))
+                       return PTR_ERR(npe->regs);
 
                if (npe_reset(npe)) {
                        dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
index dad566bfe372171747960519dafaaf04f8ae891e..d84e22dd6f9f7d79384ae55617efbd75e84018e5 100644 (file)
@@ -406,7 +406,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
        if (ret)
                spi_master_put(master);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(spi_bitbang_start);
 
index 4c71df93a6f66f72c54c91786ec79f31660b420f..1d9b33aa1a3b53f8f4c8ff129afa8f15998c8af7 100644 (file)
@@ -428,7 +428,6 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
        }
 
        m->status = status;
-       spi_finalize_current_message(master);
 
        if (status || !cs_change) {
                ndelay(nsecs);
@@ -436,6 +435,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
        }
 
        fsl_spi_setup_transfer(spi, NULL);
+       spi_finalize_current_message(master);
        return 0;
 }
 
index 5e75944ad5d1cd5b813a01c683d192db0806266c..5e4654032bfa527787b66d6269b916e22b9a599b 100644 (file)
@@ -1181,10 +1181,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
        if (msg->status && ctlr->handle_err)
                ctlr->handle_err(ctlr, msg);
 
-       spi_finalize_current_message(ctlr);
-
        spi_res_release(ctlr, msg);
 
+       spi_finalize_current_message(ctlr);
+
        return ret;
 }
 
@@ -1307,10 +1307,15 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
                ret = ctlr->prepare_transfer_hardware(ctlr);
                if (ret) {
                        dev_err(&ctlr->dev,
-                               "failed to prepare transfer hardware\n");
+                               "failed to prepare transfer hardware: %d\n",
+                               ret);
 
                        if (ctlr->auto_runtime_pm)
                                pm_runtime_put(ctlr->dev.parent);
+
+                       ctlr->cur_msg->status = ret;
+                       spi_finalize_current_message(ctlr);
+
                        mutex_unlock(&ctlr->io_mutex);
                        return;
                }
index 6082b008969b73d2a5997f3b91e3bb07d0056b70..6b6413073584339ba9e271e17ffa63fc4f883aa4 100644 (file)
@@ -215,6 +215,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
        { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech HD Webcam C270 */
+       { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
index 16ffd9fd936145d41c8097d08a298de25f2393d6..bff48a8a19848695c73af2ab63dfd5a574f12924 100644 (file)
@@ -835,19 +835,22 @@ static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
  * with corresponding information based on transfer data.
  */
 static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
-                                                struct usb_request *ureq,
-                                                unsigned int offset,
+                                                dma_addr_t dma_buff,
                                                 unsigned int len)
 {
+       struct usb_request *ureq = NULL;
        struct dwc2_dma_desc *desc = hs_ep->desc_list;
        struct scatterlist *sg;
        int i;
        u8 desc_count = 0;
 
+       if (hs_ep->req)
+               ureq = &hs_ep->req->req;
+
        /* non-DMA sg buffer */
-       if (!ureq->num_sgs) {
+       if (!ureq || !ureq->num_sgs) {
                dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
-                       ureq->dma + offset, len, true);
+                       dma_buff, len, true);
                return;
        }
 
@@ -1135,7 +1138,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
                        offset = ureq->actual;
 
                /* Fill DDMA chain entries */
-               dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq, offset,
+               dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
                                                     length);
 
                /* write descriptor chain address to control register */
@@ -2037,12 +2040,13 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
                dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
                        index);
        if (using_desc_dma(hsotg)) {
+               /* Not specific buffer needed for ep0 ZLP */
+               dma_addr_t dma = hs_ep->desc_list_dma;
+
                if (!index)
                        dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
 
-               /* Not specific buffer needed for ep0 ZLP */
-               dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &hs_ep->desc_list,
-                       hs_ep->desc_list_dma, 0, true);
+               dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
        } else {
                dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
                            DXEPTSIZ_XFERSIZE(0),
@@ -2417,6 +2421,10 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
                        dwc2_gadget_incr_frame_num(hs_ep);
        }
 
+       /* Set actual frame number for completed transfers */
+       if (!using_desc_dma(hsotg) && hs_ep->isochronous)
+               req->frame_number = hsotg->frame_number;
+
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
 }
 
index b50ec3714fd8d8bff74110cf36799c1b6f952cf4..2192a2873c7c056e44f2a66a0fe75d29e29bfb50 100644 (file)
@@ -2480,8 +2480,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
                return;
 
        /* Restore urb->transfer_buffer from the end of the allocated area */
-       memcpy(&stored_xfer_buffer, urb->transfer_buffer +
-              urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
+       memcpy(&stored_xfer_buffer,
+              PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
+                        dma_get_cache_alignment()),
+              sizeof(urb->transfer_buffer));
 
        if (usb_urb_dir_in(urb)) {
                if (usb_pipeisoc(urb->pipe))
@@ -2513,6 +2515,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
         * DMA
         */
        kmalloc_size = urb->transfer_buffer_length +
+               (dma_get_cache_alignment() - 1) +
                sizeof(urb->transfer_buffer);
 
        kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
@@ -2523,7 +2526,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
         * Position value of original urb->transfer_buffer pointer to the end
         * of allocation for later referencing
         */
-       memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+       memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
+                        dma_get_cache_alignment()),
               &urb->transfer_buffer, sizeof(urb->transfer_buffer));
 
        if (usb_urb_dir_out(urb))
@@ -2608,7 +2612,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
        chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
        chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
        chan->speed = qh->dev_speed;
-       chan->max_packet = dwc2_max_packet(qh->maxp);
+       chan->max_packet = qh->maxp;
 
        chan->xfer_started = 0;
        chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
@@ -2686,7 +2690,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
                 * This value may be modified when the transfer is started
                 * to reflect the actual transfer length
                 */
-               chan->multi_count = dwc2_hb_mult(qh->maxp);
+               chan->multi_count = qh->maxp_mult;
 
        if (hsotg->params.dma_desc_enable) {
                chan->desc_list_addr = qh->desc_list_dma;
@@ -3806,19 +3810,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
 
 static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
                                      struct dwc2_hcd_urb *urb, u8 dev_addr,
-                                     u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
+                                     u8 ep_num, u8 ep_type, u8 ep_dir,
+                                     u16 maxp, u16 maxp_mult)
 {
        if (dbg_perio() ||
            ep_type == USB_ENDPOINT_XFER_BULK ||
            ep_type == USB_ENDPOINT_XFER_CONTROL)
                dev_vdbg(hsotg->dev,
-                        "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
-                        dev_addr, ep_num, ep_dir, ep_type, mps);
+                        "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
+                        dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
        urb->pipe_info.dev_addr = dev_addr;
        urb->pipe_info.ep_num = ep_num;
        urb->pipe_info.pipe_type = ep_type;
        urb->pipe_info.pipe_dir = ep_dir;
-       urb->pipe_info.mps = mps;
+       urb->pipe_info.maxp = maxp;
+       urb->pipe_info.maxp_mult = maxp_mult;
 }
 
 /*
@@ -3909,8 +3915,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
                                        dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
                                        "IN" : "OUT");
                                dev_dbg(hsotg->dev,
-                                       "      Max packet size: %d\n",
-                                       dwc2_hcd_get_mps(&urb->pipe_info));
+                                       "      Max packet size: %d (%d mult)\n",
+                                       dwc2_hcd_get_maxp(&urb->pipe_info),
+                                       dwc2_hcd_get_maxp_mult(&urb->pipe_info));
                                dev_dbg(hsotg->dev,
                                        "      transfer_buffer: %p\n",
                                        urb->buf);
@@ -4510,8 +4517,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
        }
 
        dev_vdbg(hsotg->dev, "  Speed: %s\n", speed);
-       dev_vdbg(hsotg->dev, "  Max packet size: %d\n",
-                usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+       dev_vdbg(hsotg->dev, "  Max packet size: %d (%d mult)\n",
+                usb_endpoint_maxp(&urb->ep->desc),
+                usb_endpoint_maxp_mult(&urb->ep->desc));
+
        dev_vdbg(hsotg->dev, "  Data buffer length: %d\n",
                 urb->transfer_buffer_length);
        dev_vdbg(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
@@ -4594,8 +4603,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
        dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
                                  usb_pipeendpoint(urb->pipe), ep_type,
                                  usb_pipein(urb->pipe),
-                                 usb_maxpacket(urb->dev, urb->pipe,
-                                               !(usb_pipein(urb->pipe))));
+                                 usb_endpoint_maxp(&ep->desc),
+                                 usb_endpoint_maxp_mult(&ep->desc));
 
        buf = urb->transfer_buffer;
 
index c089ffa1f0a8fab44057ab4a2bc671a391c91062..ce6445a065889bf4a4a459bec129f231fd854f07 100644 (file)
@@ -171,7 +171,8 @@ struct dwc2_hcd_pipe_info {
        u8 ep_num;
        u8 pipe_type;
        u8 pipe_dir;
-       u16 mps;
+       u16 maxp;
+       u16 maxp_mult;
 };
 
 struct dwc2_hcd_iso_packet_desc {
@@ -264,6 +265,7 @@ struct dwc2_hs_transfer_time {
  *                       - USB_ENDPOINT_XFER_ISOC
  * @ep_is_in:           Endpoint direction
  * @maxp:               Value from wMaxPacketSize field of Endpoint Descriptor
+ * @maxp_mult:          Multiplier for maxp
  * @dev_speed:          Device speed. One of the following values:
  *                       - USB_SPEED_LOW
  *                       - USB_SPEED_FULL
@@ -340,6 +342,7 @@ struct dwc2_qh {
        u8 ep_type;
        u8 ep_is_in;
        u16 maxp;
+       u16 maxp_mult;
        u8 dev_speed;
        u8 data_toggle;
        u8 ping_state;
@@ -503,9 +506,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
        return pipe->pipe_type;
 }
 
-static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
+static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
+{
+       return pipe->maxp;
+}
+
+static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
 {
-       return pipe->mps;
+       return pipe->maxp_mult;
 }
 
 static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
@@ -620,12 +628,6 @@ static inline bool dbg_urb(struct urb *urb)
 static inline bool dbg_perio(void) { return false; }
 #endif
 
-/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
-#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
-
-/* Packet size for any kind of endpoint descriptor */
-#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
-
 /*
  * Returns true if frame1 index is greater than frame2 index. The comparison
  * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
index 88b5dcf3aefc57f6ee91e2200cb95e91284d2919..a052d39b4375e469a59cde0e9220a5c900a0f54c 100644 (file)
@@ -1617,8 +1617,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
 
        dev_err(hsotg->dev, "  Speed: %s\n", speed);
 
-       dev_err(hsotg->dev, "  Max packet size: %d\n",
-               dwc2_hcd_get_mps(&urb->pipe_info));
+       dev_err(hsotg->dev, "  Max packet size: %d (mult %d)\n",
+               dwc2_hcd_get_maxp(&urb->pipe_info),
+               dwc2_hcd_get_maxp_mult(&urb->pipe_info));
        dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
        dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
                urb->buf, (unsigned long)urb->dma);
index ea3aa640c15c18535a8ea2299f1e26f74e8a29f9..68bbac64b7536aec43574d543be8ff758b718fa9 100644 (file)
@@ -708,7 +708,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
 static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
                                      struct dwc2_qh *qh)
 {
-       int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
+       int bytecount = qh->maxp_mult * qh->maxp;
        int ls_search_slice;
        int err = 0;
        int host_interval_in_sched;
@@ -1332,7 +1332,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
        u32 max_channel_xfer_size;
        int status = 0;
 
-       max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
+       max_xfer_size = qh->maxp * qh->maxp_mult;
        max_channel_xfer_size = hsotg->params.max_transfer_size;
 
        if (max_xfer_size > max_channel_xfer_size) {
@@ -1517,8 +1517,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
        u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
        bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
                         dev_speed != USB_SPEED_HIGH);
-       int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
-       int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
+       int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
+       int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
+       int bytecount = maxp_mult * maxp;
        char *speed, *type;
 
        /* Initialize QH */
@@ -1531,6 +1532,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 
        qh->data_toggle = DWC2_HC_PID_DATA0;
        qh->maxp = maxp;
+       qh->maxp_mult = maxp_mult;
        INIT_LIST_HEAD(&qh->qtd_list);
        INIT_LIST_HEAD(&qh->qh_list_entry);
 
index 263804d154a77d2d54e4809cde3a54e1387f3882..00e3f66836a982352bf3c3f31907fa3c9154df5a 100644 (file)
@@ -1342,12 +1342,15 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
 static int fusb300_remove(struct platform_device *pdev)
 {
        struct fusb300 *fusb300 = platform_get_drvdata(pdev);
+       int i;
 
        usb_del_gadget_udc(&fusb300->gadget);
        iounmap(fusb300->reg);
        free_irq(platform_get_irq(pdev, 0), fusb300);
 
        fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+       for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+               kfree(fusb300->ep[i]);
        kfree(fusb300);
 
        return 0;
@@ -1491,6 +1494,8 @@ static int fusb300_probe(struct platform_device *pdev)
                if (fusb300->ep0_req)
                        fusb300_free_request(&fusb300->ep[0]->ep,
                                fusb300->ep0_req);
+               for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+                       kfree(fusb300->ep[i]);
                kfree(fusb300);
        }
        if (reg)
index d8f1c60793ed668d4023163361e64cc1ab7dbc1d..5f1b14f3e5a07ba0282cbf0ec56535e1aef95608 100644 (file)
@@ -937,8 +937,7 @@ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
        dma_addr_t                      dma;
        struct lpc32xx_usbd_dd_gad      *dd;
 
-       dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
-                       udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
+       dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
        if (dd)
                dd->this_dma = dma;
 
@@ -3070,9 +3069,9 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
        }
 
        udc->udp_baseaddr = devm_ioremap_resource(dev, res);
-       if (!udc->udp_baseaddr) {
+       if (IS_ERR(udc->udp_baseaddr)) {
                dev_err(udc->dev, "IO map failure\n");
-               return -ENOMEM;
+               return PTR_ERR(udc->udp_baseaddr);
        }
 
        /* Get USB device clock */
index 1b1bb0ad40c3a5c53e25ff83ac628a7a95d9fca7..6fa16ab31e2e60a3c8ff4675afd127c6cb33de16 100644 (file)
@@ -63,6 +63,7 @@
 
 #define ANADIG_USB1_CHRG_DETECT_SET            0x1b4
 #define ANADIG_USB1_CHRG_DETECT_CLR            0x1b8
+#define ANADIG_USB2_CHRG_DETECT_SET            0x214
 #define ANADIG_USB1_CHRG_DETECT_EN_B           BIT(20)
 #define ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B     BIT(19)
 #define ANADIG_USB1_CHRG_DETECT_CHK_CONTACT    BIT(18)
@@ -250,6 +251,19 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
        if (mxs_phy->data->flags & MXS_PHY_NEED_IP_FIX)
                writel(BM_USBPHY_IP_FIX, base + HW_USBPHY_IP_SET);
 
+       if (mxs_phy->regmap_anatop) {
+               unsigned int reg = mxs_phy->port_id ?
+                       ANADIG_USB1_CHRG_DETECT_SET :
+                       ANADIG_USB2_CHRG_DETECT_SET;
+               /*
+                * The external charger detector needs to be disabled,
+                * or the signal at DP will be poor
+                */
+               regmap_write(mxs_phy->regmap_anatop, reg,
+                            ANADIG_USB1_CHRG_DETECT_EN_B |
+                            ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+       }
+
        mxs_phy_tx_init(mxs_phy);
 
        return 0;
index 83869065b8022ba68b145db6756cc6f2f9e3b941..a0aaf06353596cc35058236f0bd511e613dc67f0 100644 (file)
@@ -1171,6 +1171,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x1900),                          /* Telit LN940 (QMI) */
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
@@ -1772,6 +1776,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
          .driver_info = RSVD(5) | RSVD(6) },
        { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },   /* Simcom SIM7500/SIM7600 MBIM mode */
+       { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff),     /* Simcom SIM7500/SIM7600 RNDIS mode */
+         .driver_info = RSVD(7) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
index 55122ac84518ad62de2ff5458e9c7a4c744b4436..d7abde14b3cf50b1002665880738bc679ee6ece3 100644 (file)
@@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
        { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+       { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
        { }                                     /* Terminating entry */
 };
 
index 559941ca884daf353cc022e41755fc9bc6d54567..b0175f17d1a2b67c7c53611bc002e8802cd351a8 100644 (file)
 #define SMART_VENDOR_ID        0x0b8c
 #define SMART_PRODUCT_ID       0x2303
 
+/* Allied Telesis VT-Kit3 */
+#define AT_VENDOR_ID           0x0caa
+#define AT_VTKIT3_PRODUCT_ID   0x3001
index 6b2140f966ef87e751171373a5ab8e2756e5c33f..7e14c2d7cf734f7ac00c03ae7b5b2c87845cfe74 100644 (file)
@@ -17,6 +17,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
                "USB Card Reader",
                USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
 
+UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
+               "Realtek",
+               "USB Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
 UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
                "Realtek",
                "USB Card Reader",
index 76299b6ff06dae4eb29e49960d7403b66c39e3d8..74cb3c2ecb3476a7c2ea9f66574c782d6018b712 100644 (file)
@@ -192,7 +192,7 @@ EXPORT_SYMBOL_GPL(typec_altmode_vdm);
 const struct typec_altmode *
 typec_altmode_get_partner(struct typec_altmode *adev)
 {
-       return &to_altmode(adev)->partner->adev;
+       return adev ? &to_altmode(adev)->partner->adev : NULL;
 }
 EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
 
index 9d46aa9e4e35c197f6a0690a67c92ab6f54114f8..bf63074675fc4c04b6dd165d06885f9907dd0035 100644 (file)
@@ -862,8 +862,10 @@ static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
 
 not_signed_fw:
        wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
-       if (!wr_buf)
-               return -ENOMEM;
+       if (!wr_buf) {
+               err = -ENOMEM;
+               goto release_fw;
+       }
 
        err = ccg_cmd_enter_flashing(uc);
        if (err)
index 3cc1a05fde1c9de281900d8768dfd16bf0e3dbad..ae23151442cbd727d80633c0cbc6dd1ecb7ec7ff 100644 (file)
@@ -102,56 +102,35 @@ static void mdev_put_parent(struct mdev_parent *parent)
                kref_put(&parent->ref, mdev_release_parent);
 }
 
-static int mdev_device_create_ops(struct kobject *kobj,
-                                 struct mdev_device *mdev)
+/* Caller must hold parent unreg_sem read or write lock */
+static void mdev_device_remove_common(struct mdev_device *mdev)
 {
-       struct mdev_parent *parent = mdev->parent;
-       int ret;
-
-       ret = parent->ops->create(kobj, mdev);
-       if (ret)
-               return ret;
-
-       ret = sysfs_create_groups(&mdev->dev.kobj,
-                                 parent->ops->mdev_attr_groups);
-       if (ret)
-               parent->ops->remove(mdev);
-
-       return ret;
-}
-
-/*
- * mdev_device_remove_ops gets called from sysfs's 'remove' and when parent
- * device is being unregistered from mdev device framework.
- * - 'force_remove' is set to 'false' when called from sysfs's 'remove' which
- *   indicates that if the mdev device is active, used by VMM or userspace
- *   application, vendor driver could return error then don't remove the device.
- * - 'force_remove' is set to 'true' when called from mdev_unregister_device()
- *   which indicate that parent device is being removed from mdev device
- *   framework so remove mdev device forcefully.
- */
-static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
-{
-       struct mdev_parent *parent = mdev->parent;
+       struct mdev_parent *parent;
+       struct mdev_type *type;
        int ret;
 
-       /*
-        * Vendor driver can return error if VMM or userspace application is
-        * using this mdev device.
-        */
+       type = to_mdev_type(mdev->type_kobj);
+       mdev_remove_sysfs_files(&mdev->dev, type);
+       device_del(&mdev->dev);
+       parent = mdev->parent;
+       lockdep_assert_held(&parent->unreg_sem);
        ret = parent->ops->remove(mdev);
-       if (ret && !force_remove)
-               return ret;
+       if (ret)
+               dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
 
-       sysfs_remove_groups(&mdev->dev.kobj, parent->ops->mdev_attr_groups);
-       return 0;
+       /* Balances with device_initialize() */
+       put_device(&mdev->dev);
+       mdev_put_parent(parent);
 }
 
 static int mdev_device_remove_cb(struct device *dev, void *data)
 {
-       if (dev_is_mdev(dev))
-               mdev_device_remove(dev, true);
+       if (dev_is_mdev(dev)) {
+               struct mdev_device *mdev;
 
+               mdev = to_mdev_device(dev);
+               mdev_device_remove_common(mdev);
+       }
        return 0;
 }
 
@@ -193,6 +172,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
        }
 
        kref_init(&parent->ref);
+       init_rwsem(&parent->unreg_sem);
 
        parent->dev = dev;
        parent->ops = ops;
@@ -251,21 +231,23 @@ void mdev_unregister_device(struct device *dev)
        dev_info(dev, "MDEV: Unregistering\n");
 
        list_del(&parent->next);
+       mutex_unlock(&parent_list_lock);
+
+       down_write(&parent->unreg_sem);
+
        class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
 
        device_for_each_child(dev, NULL, mdev_device_remove_cb);
 
        parent_remove_sysfs_files(parent);
+       up_write(&parent->unreg_sem);
 
-       mutex_unlock(&parent_list_lock);
        mdev_put_parent(parent);
 }
 EXPORT_SYMBOL(mdev_unregister_device);
 
-static void mdev_device_release(struct device *dev)
+static void mdev_device_free(struct mdev_device *mdev)
 {
-       struct mdev_device *mdev = to_mdev_device(dev);
-
        mutex_lock(&mdev_list_lock);
        list_del(&mdev->next);
        mutex_unlock(&mdev_list_lock);
@@ -274,6 +256,13 @@ static void mdev_device_release(struct device *dev)
        kfree(mdev);
 }
 
+static void mdev_device_release(struct device *dev)
+{
+       struct mdev_device *mdev = to_mdev_device(dev);
+
+       mdev_device_free(mdev);
+}
+
 int mdev_device_create(struct kobject *kobj,
                       struct device *dev, const guid_t *uuid)
 {
@@ -310,46 +299,55 @@ int mdev_device_create(struct kobject *kobj,
 
        mdev->parent = parent;
 
+       /* Check if parent unregistration has started */
+       if (!down_read_trylock(&parent->unreg_sem)) {
+               mdev_device_free(mdev);
+               ret = -ENODEV;
+               goto mdev_fail;
+       }
+
+       device_initialize(&mdev->dev);
        mdev->dev.parent  = dev;
        mdev->dev.bus     = &mdev_bus_type;
        mdev->dev.release = mdev_device_release;
        dev_set_name(&mdev->dev, "%pUl", uuid);
+       mdev->dev.groups = parent->ops->mdev_attr_groups;
+       mdev->type_kobj = kobj;
 
-       ret = device_register(&mdev->dev);
-       if (ret) {
-               put_device(&mdev->dev);
-               goto mdev_fail;
-       }
+       ret = parent->ops->create(kobj, mdev);
+       if (ret)
+               goto ops_create_fail;
 
-       ret = mdev_device_create_ops(kobj, mdev);
+       ret = device_add(&mdev->dev);
        if (ret)
-               goto create_fail;
+               goto add_fail;
 
        ret = mdev_create_sysfs_files(&mdev->dev, type);
-       if (ret) {
-               mdev_device_remove_ops(mdev, true);
-               goto create_fail;
-       }
+       if (ret)
+               goto sysfs_fail;
 
-       mdev->type_kobj = kobj;
        mdev->active = true;
        dev_dbg(&mdev->dev, "MDEV: created\n");
+       up_read(&parent->unreg_sem);
 
        return 0;
 
-create_fail:
-       device_unregister(&mdev->dev);
+sysfs_fail:
+       device_del(&mdev->dev);
+add_fail:
+       parent->ops->remove(mdev);
+ops_create_fail:
+       up_read(&parent->unreg_sem);
+       put_device(&mdev->dev);
 mdev_fail:
        mdev_put_parent(parent);
        return ret;
 }
 
-int mdev_device_remove(struct device *dev, bool force_remove)
+int mdev_device_remove(struct device *dev)
 {
        struct mdev_device *mdev, *tmp;
        struct mdev_parent *parent;
-       struct mdev_type *type;
-       int ret;
 
        mdev = to_mdev_device(dev);
 
@@ -372,19 +370,13 @@ int mdev_device_remove(struct device *dev, bool force_remove)
        mdev->active = false;
        mutex_unlock(&mdev_list_lock);
 
-       type = to_mdev_type(mdev->type_kobj);
        parent = mdev->parent;
+       /* Check if parent unregistration has started */
+       if (!down_read_trylock(&parent->unreg_sem))
+               return -ENODEV;
 
-       ret = mdev_device_remove_ops(mdev, force_remove);
-       if (ret) {
-               mdev->active = true;
-               return ret;
-       }
-
-       mdev_remove_sysfs_files(dev, type);
-       device_unregister(dev);
-       mdev_put_parent(parent);
-
+       mdev_device_remove_common(mdev);
+       up_read(&parent->unreg_sem);
        return 0;
 }
 
index 36cbbdb754deade31338b715a0750b0172553617..39876752627679149180e19492893a24109f5597 100644 (file)
@@ -23,6 +23,8 @@ struct mdev_parent {
        struct list_head next;
        struct kset *mdev_types_kset;
        struct list_head type_list;
+       /* Synchronize device creation/removal with parent unregistration */
+       struct rw_semaphore unreg_sem;
 };
 
 struct mdev_device {
@@ -60,6 +62,6 @@ void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
 
 int  mdev_device_create(struct kobject *kobj,
                        struct device *dev, const guid_t *uuid);
-int  mdev_device_remove(struct device *dev, bool force_remove);
+int  mdev_device_remove(struct device *dev);
 
 #endif /* MDEV_PRIVATE_H */
index cbf94b8165ea8a78c77cbd45515b3945a250fcb8..ffa3dcebf20187609bb3c47a27aa984607ca26d1 100644 (file)
@@ -236,11 +236,9 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
        if (val && device_remove_file_self(dev, attr)) {
                int ret;
 
-               ret = mdev_device_remove(dev, false);
-               if (ret) {
-                       device_create_file(dev, attr);
+               ret = mdev_device_remove(dev);
+               if (ret)
                        return ret;
-               }
        }
 
        return count;
index 0657b0b57daeca6f275ec5497a836990d5c45888..d53f3493a6b98aedc1d6404239fa1bfb8216c431 100644 (file)
@@ -202,6 +202,15 @@ int __ref xen_swiotlb_init(int verbose, bool early)
 retry:
        bytes = xen_set_nslabs(xen_io_tlb_nslabs);
        order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
+
+       /*
+        * IO TLB memory already allocated. Just use it.
+        */
+       if (io_tlb_start != 0) {
+               xen_io_tlb_start = phys_to_virt(io_tlb_start);
+               goto end;
+       }
+
        /*
         * Get IO TLB memory from any location.
         */
@@ -231,7 +240,6 @@ int __ref xen_swiotlb_init(int verbose, bool early)
                m_ret = XEN_SWIOTLB_ENOMEM;
                goto error;
        }
-       xen_io_tlb_end = xen_io_tlb_start + bytes;
        /*
         * And replace that memory with pages under 4GB.
         */
@@ -258,6 +266,8 @@ int __ref xen_swiotlb_init(int verbose, bool early)
        } else
                rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
 
+end:
+       xen_io_tlb_end = xen_io_tlb_start + bytes;
        if (!rc)
                swiotlb_set_max_segment(PAGE_SIZE);
 
index 1aee51a9f3bfa28f23fbb26d131be6bfc650ba33..5faf057f6f37f11adbfbb7ce48d9dacc74c52605 100644 (file)
@@ -10831,17 +10831,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        remove_em = (atomic_read(&block_group->trimming) == 0);
        spin_unlock(&block_group->lock);
 
-       if (remove_em) {
-               struct extent_map_tree *em_tree;
-
-               em_tree = &fs_info->mapping_tree.map_tree;
-               write_lock(&em_tree->lock);
-               remove_extent_mapping(em_tree, em);
-               write_unlock(&em_tree->lock);
-               /* once for the tree */
-               free_extent_map(em);
-       }
-
        mutex_unlock(&fs_info->chunk_mutex);
 
        ret = remove_block_group_free_space(trans, block_group);
@@ -10858,6 +10847,19 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                goto out;
 
        ret = btrfs_del_item(trans, root, path);
+       if (ret)
+               goto out;
+
+       if (remove_em) {
+               struct extent_map_tree *em_tree;
+
+               em_tree = &fs_info->mapping_tree.map_tree;
+               write_lock(&em_tree->lock);
+               remove_extent_mapping(em_tree, em);
+               write_unlock(&em_tree->lock);
+               /* once for the tree */
+               free_extent_map(em);
+       }
 out:
        if (remove_rsv)
                btrfs_delayed_refs_rsv_release(fs_info, 1);
@@ -11137,13 +11139,11 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
  * it while performing the free space search since we have already
  * held back allocations.
  */
-static int btrfs_trim_free_extents(struct btrfs_device *device,
-                                  struct fstrim_range *range, u64 *trimmed)
+static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
 {
-       u64 start, len = 0, end = 0;
+       u64 start = SZ_1M, len = 0, end = 0;
        int ret;
 
-       start = max_t(u64, range->start, SZ_1M);
        *trimmed = 0;
 
        /* Discard not supported = nothing to do. */
@@ -11186,22 +11186,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
                        break;
                }
 
-               /* Keep going until we satisfy minlen or reach end of space */
-               if (len < range->minlen) {
-                       mutex_unlock(&fs_info->chunk_mutex);
-                       start += len;
-                       continue;
-               }
-
-               /* If we are out of the passed range break */
-               if (start > range->start + range->len - 1) {
-                       mutex_unlock(&fs_info->chunk_mutex);
-                       break;
-               }
-
-               start = max(range->start, start);
-               len = min(range->len, len);
-
                ret = btrfs_issue_discard(device->bdev, start, len,
                                          &bytes);
                if (!ret)
@@ -11216,10 +11200,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
                start += len;
                *trimmed += bytes;
 
-               /* We've trimmed enough */
-               if (*trimmed >= range->len)
-                       break;
-
                if (fatal_signal_pending(current)) {
                        ret = -ERESTARTSYS;
                        break;
@@ -11303,7 +11283,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        mutex_lock(&fs_info->fs_devices->device_list_mutex);
        devices = &fs_info->fs_devices->devices;
        list_for_each_entry(device, devices, dev_list) {
-               ret = btrfs_trim_free_extents(device, range, &group_trimmed);
+               ret = btrfs_trim_free_extents(device, &group_trimmed);
                if (ret) {
                        dev_failed++;
                        dev_ret = ret;
index 6dafa857bbb9a9bd003fc55aa8a0677a86a22fd5..2a1be0d1a69866754fe2932ee72d03ff4fe98a0e 100644 (file)
@@ -312,8 +312,6 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
                        btrfs_abort_transaction(trans, ret);
                        goto out_end_trans;
                }
-               set_bit(BTRFS_INODE_COPY_EVERYTHING,
-                       &BTRFS_I(inode)->runtime_flags);
        } else {
                ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL,
                                     0, 0);
index 10d9589001a9d7a833b756214f2bc25ea742303a..bb5bd49573b4cc116274d92d0bef180b0c1ec19f 100644 (file)
@@ -747,6 +747,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
        u64 total = 0;
        int i;
 
+again:
        do {
                enqueued = 0;
                mutex_lock(&fs_devices->device_list_mutex);
@@ -758,6 +759,10 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
                mutex_unlock(&fs_devices->device_list_mutex);
                total += enqueued;
        } while (enqueued && total < 10000);
+       if (fs_devices->seed) {
+               fs_devices = fs_devices->seed;
+               goto again;
+       }
 
        if (enqueued == 0)
                return;
index 78b6ba2029e8f0bcfbbdc0dd66a153db442fa335..95d9aebff2c4b0270d7db51f1da30b8247079b6a 100644 (file)
@@ -213,6 +213,9 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
        }
 out:
        btrfs_free_path(path);
+       if (!ret)
+               set_bit(BTRFS_INODE_COPY_EVERYTHING,
+                       &BTRFS_I(inode)->runtime_flags);
        return ret;
 }
 
@@ -236,7 +239,6 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
 
        inode_inc_iversion(inode);
        inode->i_ctime = current_time(inode);
-       set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 out:
@@ -388,8 +390,6 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
        if (!ret) {
                inode_inc_iversion(inode);
                inode->i_ctime = current_time(inode);
-               set_bit(BTRFS_INODE_COPY_EVERYTHING,
-                       &BTRFS_I(inode)->runtime_flags);
                ret = btrfs_update_inode(trans, root, inode);
                BUG_ON(ret);
        }
index 24ea19cfe07e216b650aa407c4ad8ffff85fa6b1..ea8237513dfaf3342079ded75a4f562a4de339b6 100644 (file)
@@ -1317,16 +1317,6 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
        unsigned reqsize;
        unsigned int hash;
 
-       /*
-        * Require sane minimum read buffer - that has capacity for fixed part
-        * of any request header + negotated max_write room for data. If the
-        * requirement is not satisfied return EINVAL to the filesystem server
-        * to indicate that it is not following FUSE server/client contract.
-        * Don't dequeue / abort any request.
-        */
-       if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, 4096 + fc->max_write))
-               return -EINVAL;
-
  restart:
        spin_lock(&fiq->waitq.lock);
        err = -EAGAIN;
index c78ccaf83ef85f28b4a41f0edb3e56010cd1f724..93ea1d529aa398e68937f3d10fd84b7d2ff7fbdc 100644 (file)
@@ -991,9 +991,12 @@ static void gfs2_write_unlock(struct inode *inode)
 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
                                   unsigned len, struct iomap *iomap)
 {
+       unsigned int blockmask = i_blocksize(inode) - 1;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
+       unsigned int blocks;
 
-       return gfs2_trans_begin(sdp, RES_DINODE + (len >> inode->i_blkbits), 0);
+       blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
+       return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
 }
 
 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
index 0fbb486a320e927bb47aa1ac63fcea6ef6284760..86a2bd7219005b149f76b811117d5312ef8a63b8 100644 (file)
@@ -2777,8 +2777,10 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_eventfd_unregister(ctx);
 
 #if defined(CONFIG_UNIX)
-       if (ctx->ring_sock)
+       if (ctx->ring_sock) {
+               ctx->ring_sock->file = NULL; /* so that iput() is called */
                sock_release(ctx->ring_sock);
+       }
 #endif
 
        io_mem_free(ctx->sq_ring);
index 23ef63fd166951e1c05f34875f661288fefb6ae7..12654c2e78f8ec4a7c14a03293b5ae19addfd9a2 100644 (file)
@@ -287,7 +287,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
        struct iomap_readpage_ctx *ctx = data;
        struct page *page = ctx->cur_page;
        struct iomap_page *iop = iomap_page_create(inode, page);
-       bool is_contig = false;
+       bool same_page = false, is_contig = false;
        loff_t orig_pos = pos;
        unsigned poff, plen;
        sector_t sector;
@@ -315,10 +315,14 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
         * Try to merge into a previous segment if we can.
         */
        sector = iomap_sector(iomap, pos);
-       if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
-               if (__bio_try_merge_page(ctx->bio, page, plen, poff, true))
-                       goto done;
+       if (ctx->bio && bio_end_sector(ctx->bio) == sector)
                is_contig = true;
+
+       if (is_contig &&
+           __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
+               if (!same_page && iop)
+                       atomic_inc(&iop->read_count);
+               goto done;
        }
 
        /*
index b26778bdc236e83595d6c2129d4f61ae45dbc0f2..7660c2749c96f075c721b701e0ee4839fe62cb82 100644 (file)
@@ -2105,6 +2105,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                /* Notice when we are propagating across user namespaces */
                if (child->mnt_parent->mnt_ns->user_ns != user_ns)
                        lock_mnt_tree(child);
+               child->mnt.mnt_flags &= ~MNT_LOCKED;
                commit_tree(child);
        }
        put_mountpoint(smp);
@@ -3445,6 +3446,7 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
        ns->root = mnt;
        ns->mounts = 1;
        list_add(&mnt->mnt_list, &ns->list);
+       mntget(newmount.mnt);
 
        /* Attach to an apparent O_PATH fd with a note that we need to unmount
         * it, not just simply put it.
index e6fde1a5c0724912886e8c41e964942a543e49f1..b428c295d13f8cf311431a2ece7bb72f3a44d242 100644 (file)
@@ -355,6 +355,10 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
                /* Mark is just getting destroyed or created? */
                if (!conn)
                        continue;
+               if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID))
+                       continue;
+               /* Pairs with smp_wmb() in fsnotify_add_mark_list() */
+               smp_rmb();
                fsid = conn->fsid;
                if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
                        continue;
index 25eb247ea85abee864e29b375e9ebda790ef71e3..99ddd126f6f0c61cccc3dfc57cdf7bb7e6ef94e6 100644 (file)
@@ -482,10 +482,13 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
        conn->type = type;
        conn->obj = connp;
        /* Cache fsid of filesystem containing the object */
-       if (fsid)
+       if (fsid) {
                conn->fsid = *fsid;
-       else
+               conn->flags = FSNOTIFY_CONN_FLAG_HAS_FSID;
+       } else {
                conn->fsid.val[0] = conn->fsid.val[1] = 0;
+               conn->flags = 0;
+       }
        if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
                inode = igrab(fsnotify_conn_inode(conn));
        /*
@@ -560,7 +563,12 @@ static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
                if (err)
                        return err;
                goto restart;
-       } else if (fsid && (conn->fsid.val[0] || conn->fsid.val[1]) &&
+       } else if (fsid && !(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) {
+               conn->fsid = *fsid;
+               /* Pairs with smp_rmb() in fanotify_get_fsid() */
+               smp_wmb();
+               conn->flags |= FSNOTIFY_CONN_FLAG_HAS_FSID;
+       } else if (fsid && (conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID) &&
                   (fsid->val[0] != conn->fsid.val[0] ||
                    fsid->val[1] != conn->fsid.val[1])) {
                /*
index 2d016937fddab856b402c12c686c422a4d7e9ff5..42a61eecdacdd68993a83ceebca5d19939d2d651 100644 (file)
@@ -296,6 +296,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
 
 out_attach:
        spin_lock(&dentry_attach_lock);
+       if (unlikely(dentry->d_fsdata && !alias)) {
+               /* d_fsdata is set by a racing thread which is doing
+                * the same thing as this thread is doing. Leave the racing
+                * thread going ahead and we return here.
+                */
+               spin_unlock(&dentry_attach_lock);
+               iput(dl->dl_inode);
+               ocfs2_lock_res_free(&dl->dl_lockres);
+               kfree(dl);
+               return 0;
+       }
+
        dentry->d_fsdata = dl;
        dl->dl_count++;
        spin_unlock(&dentry_attach_lock);
index 56feaa739979766aa8aab02546c38757a63a4de5..7ec3acd90d0b666a839f44f0850e3c5759560d98 100644 (file)
@@ -37,7 +37,7 @@ static int ovl_ccup_get(char *buf, const struct kernel_param *param)
 }
 
 module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644);
-MODULE_PARM_DESC(ovl_check_copy_up, "Obsolete; does nothing");
+MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing");
 
 int ovl_copy_xattr(struct dentry *old, struct dentry *new)
 {
index 93872bb502303c396e0a175ec86a74a3daa88d8b..2f39046ebfa48586f772b7971579c6acc0acb70f 100644 (file)
@@ -21,7 +21,7 @@
 
 static unsigned short ovl_redirect_max = 256;
 module_param_named(redirect_max, ovl_redirect_max, ushort, 0644);
-MODULE_PARM_DESC(ovl_redirect_max,
+MODULE_PARM_DESC(redirect_max,
                 "Maximum length of absolute redirect xattr value");
 
 static int ovl_set_redirect(struct dentry *dentry, bool samedir);
index 340a6ad45914fb01d59fbb386597d9f5d7830a95..75d8d00fa087ea517ca54a49a620de6f0813a43d 100644 (file)
@@ -409,37 +409,16 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd,
        return ret;
 }
 
-static unsigned int ovl_get_inode_flags(struct inode *inode)
-{
-       unsigned int flags = READ_ONCE(inode->i_flags);
-       unsigned int ovl_iflags = 0;
-
-       if (flags & S_SYNC)
-               ovl_iflags |= FS_SYNC_FL;
-       if (flags & S_APPEND)
-               ovl_iflags |= FS_APPEND_FL;
-       if (flags & S_IMMUTABLE)
-               ovl_iflags |= FS_IMMUTABLE_FL;
-       if (flags & S_NOATIME)
-               ovl_iflags |= FS_NOATIME_FL;
-
-       return ovl_iflags;
-}
-
 static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
-                               unsigned long arg)
+                               unsigned long arg, unsigned int iflags)
 {
        long ret;
        struct inode *inode = file_inode(file);
-       unsigned int flags;
-       unsigned int old_flags;
+       unsigned int old_iflags;
 
        if (!inode_owner_or_capable(inode))
                return -EACCES;
 
-       if (get_user(flags, (int __user *) arg))
-               return -EFAULT;
-
        ret = mnt_want_write_file(file);
        if (ret)
                return ret;
@@ -448,8 +427,8 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
 
        /* Check the capability before cred override */
        ret = -EPERM;
-       old_flags = ovl_get_inode_flags(inode);
-       if (((flags ^ old_flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
+       old_iflags = READ_ONCE(inode->i_flags);
+       if (((iflags ^ old_iflags) & (S_APPEND | S_IMMUTABLE)) &&
            !capable(CAP_LINUX_IMMUTABLE))
                goto unlock;
 
@@ -469,6 +448,63 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
 
 }
 
+static unsigned int ovl_fsflags_to_iflags(unsigned int flags)
+{
+       unsigned int iflags = 0;
+
+       if (flags & FS_SYNC_FL)
+               iflags |= S_SYNC;
+       if (flags & FS_APPEND_FL)
+               iflags |= S_APPEND;
+       if (flags & FS_IMMUTABLE_FL)
+               iflags |= S_IMMUTABLE;
+       if (flags & FS_NOATIME_FL)
+               iflags |= S_NOATIME;
+
+       return iflags;
+}
+
+static long ovl_ioctl_set_fsflags(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       unsigned int flags;
+
+       if (get_user(flags, (int __user *) arg))
+               return -EFAULT;
+
+       return ovl_ioctl_set_flags(file, cmd, arg,
+                                  ovl_fsflags_to_iflags(flags));
+}
+
+static unsigned int ovl_fsxflags_to_iflags(unsigned int xflags)
+{
+       unsigned int iflags = 0;
+
+       if (xflags & FS_XFLAG_SYNC)
+               iflags |= S_SYNC;
+       if (xflags & FS_XFLAG_APPEND)
+               iflags |= S_APPEND;
+       if (xflags & FS_XFLAG_IMMUTABLE)
+               iflags |= S_IMMUTABLE;
+       if (xflags & FS_XFLAG_NOATIME)
+               iflags |= S_NOATIME;
+
+       return iflags;
+}
+
+static long ovl_ioctl_set_fsxflags(struct file *file, unsigned int cmd,
+                                  unsigned long arg)
+{
+       struct fsxattr fa;
+
+       memset(&fa, 0, sizeof(fa));
+       if (copy_from_user(&fa, (void __user *) arg, sizeof(fa)))
+               return -EFAULT;
+
+       return ovl_ioctl_set_flags(file, cmd, arg,
+                                  ovl_fsxflags_to_iflags(fa.fsx_xflags));
+}
+
 static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        long ret;
@@ -480,8 +516,11 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
 
        case FS_IOC_SETFLAGS:
+               ret = ovl_ioctl_set_fsflags(file, cmd, arg);
+               break;
+
        case FS_IOC_FSSETXATTR:
-               ret = ovl_ioctl_set_flags(file, cmd, arg);
+               ret = ovl_ioctl_set_fsxflags(file, cmd, arg);
                break;
 
        default:
index f7eba21effa5dbf8c0d5fdb4d922d81f32111cc3..f0389849fd807879cfb61dd2f185dff4d097e0c7 100644 (file)
@@ -553,15 +553,15 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
        int xinobits = ovl_xino_bits(inode->i_sb);
 
        /*
-        * When NFS export is enabled and d_ino is consistent with st_ino
-        * (samefs or i_ino has enough bits to encode layer), set the same
-        * value used for d_ino to i_ino, because nfsd readdirplus compares
-        * d_ino values to i_ino values of child entries. When called from
+        * When d_ino is consistent with st_ino (samefs or i_ino has enough
+        * bits to encode layer), set the same value used for st_ino to i_ino,
+        * so inode number exposed via /proc/locks and a like will be
+        * consistent with d_ino and st_ino values. An i_ino value inconsistent
+        * with d_ino also causes nfsd readdirplus to fail.  When called from
         * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
         * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
         */
-       if (inode->i_sb->s_export_op &&
-           (ovl_same_sb(inode->i_sb) || xinobits)) {
+       if (ovl_same_sb(inode->i_sb) || xinobits) {
                inode->i_ino = ino;
                if (xinobits && fsid && !(ino >> (64 - xinobits)))
                        inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
index 746ea36f3171d5e5ef75bbf108ee3b2e004af93a..e300c1371b7b90dc61e761d1eb452e12085deaf3 100644 (file)
@@ -31,29 +31,29 @@ struct ovl_dir_cache;
 
 static bool ovl_redirect_dir_def = IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_DIR);
 module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
-MODULE_PARM_DESC(ovl_redirect_dir_def,
+MODULE_PARM_DESC(redirect_dir,
                 "Default to on or off for the redirect_dir feature");
 
 static bool ovl_redirect_always_follow =
        IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
 module_param_named(redirect_always_follow, ovl_redirect_always_follow,
                   bool, 0644);
-MODULE_PARM_DESC(ovl_redirect_always_follow,
+MODULE_PARM_DESC(redirect_always_follow,
                 "Follow redirects even if redirect_dir feature is turned off");
 
 static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
 module_param_named(index, ovl_index_def, bool, 0644);
-MODULE_PARM_DESC(ovl_index_def,
+MODULE_PARM_DESC(index,
                 "Default to on or off for the inodes index feature");
 
 static bool ovl_nfs_export_def = IS_ENABLED(CONFIG_OVERLAY_FS_NFS_EXPORT);
 module_param_named(nfs_export, ovl_nfs_export_def, bool, 0644);
-MODULE_PARM_DESC(ovl_nfs_export_def,
+MODULE_PARM_DESC(nfs_export,
                 "Default to on or off for the NFS export feature");
 
 static bool ovl_xino_auto_def = IS_ENABLED(CONFIG_OVERLAY_FS_XINO_AUTO);
 module_param_named(xino_auto, ovl_xino_auto_def, bool, 0644);
-MODULE_PARM_DESC(ovl_xino_auto_def,
+MODULE_PARM_DESC(xino_auto,
                 "Auto enable xino feature");
 
 static void ovl_entry_stack_free(struct ovl_entry *oe)
@@ -66,7 +66,7 @@ static void ovl_entry_stack_free(struct ovl_entry *oe)
 
 static bool ovl_metacopy_def = IS_ENABLED(CONFIG_OVERLAY_FS_METACOPY);
 module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
-MODULE_PARM_DESC(ovl_metacopy_def,
+MODULE_PARM_DESC(metacopy,
                 "Default to on or off for the metadata only copy up feature");
 
 static void ovl_dentry_release(struct dentry *dentry)
@@ -995,8 +995,8 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
        int err;
 
        trap = ovl_get_trap_inode(sb, dir);
-       err = PTR_ERR(trap);
-       if (IS_ERR(trap)) {
+       err = PTR_ERR_OR_ZERO(trap);
+       if (err) {
                if (err == -ELOOP)
                        pr_err("overlayfs: conflicting %s path\n", name);
                return err;
@@ -1471,23 +1471,20 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
  * Check if this layer root is a descendant of:
  * - another layer of this overlayfs instance
  * - upper/work dir of any overlayfs instance
- * - a disconnected dentry (detached root)
  */
 static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
                           const char *name)
 {
-       struct dentry *next, *parent;
-       bool is_root = false;
+       struct dentry *next = dentry, *parent;
        int err = 0;
 
-       if (!dentry || dentry == dentry->d_sb->s_root)
+       if (!dentry)
                return 0;
 
-       next = dget(dentry);
-       /* Walk back ancestors to fs root (inclusive) looking for traps */
-       do {
-               parent = dget_parent(next);
-               is_root = (parent == next);
+       parent = dget_parent(next);
+
+       /* Walk back ancestors to root (inclusive) looking for traps */
+       while (!err && parent != next) {
                if (ovl_is_inuse(parent)) {
                        err = -EBUSY;
                        pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
@@ -1496,17 +1493,12 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
                        err = -ELOOP;
                        pr_err("overlayfs: overlapping %s path\n", name);
                }
-               dput(next);
                next = parent;
-       } while (!err && !is_root);
-
-       /* Did we really walk to fs root or found a detached root? */
-       if (!err && next != dentry->d_sb->s_root) {
-               err = -ESTALE;
-               pr_err("overlayfs: disconnected %s path\n", name);
+               parent = dget_parent(next);
+               dput(next);
        }
 
-       dput(next);
+       dput(parent);
 
        return err;
 }
index 595857a1883e1a68a207fb32beef9c20a8bcd0e2..49f6d7ff21394f5e15e440ade00ba3363e27cf68 100644 (file)
@@ -261,7 +261,6 @@ static int propagate_one(struct mount *m)
        child = copy_tree(last_source, last_source->mnt.mnt_root, type);
        if (IS_ERR(child))
                return PTR_ERR(child);
-       child->mnt.mnt_flags &= ~MNT_LOCKED;
        mnt_set_mountpoint(m, mp, child);
        last_dest = m;
        last_source = child;
index 9ad72ea7f71f5d71608c470992c871aa7bbe2f15..58f15a083dd11d4f945be0c05f40437d103d6288 100644 (file)
@@ -1996,8 +1996,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
                                       &warn_to[cnt]);
                if (ret)
                        goto over_quota;
-               ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0,
-                                     &warn_to[cnt]);
+               ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
+                                     DQUOT_SPACE_WARN, &warn_to[cnt]);
                if (ret) {
                        spin_lock(&transfer_to[cnt]->dq_dqb_lock);
                        dquot_decr_inodes(transfer_to[cnt], inode_usage);
index a6f0f4761a3721b97fd2a40daee3714058ec64f5..8da5e6637771c62f62920f6db413a2a81fc7af83 100644 (file)
@@ -758,6 +758,7 @@ xfs_add_to_ioend(
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
        unsigned                len = i_blocksize(inode);
        unsigned                poff = offset & (PAGE_SIZE - 1);
+       bool                    merged, same_page = false;
        sector_t                sector;
 
        sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
@@ -774,9 +775,13 @@ xfs_add_to_ioend(
                                wpc->imap.br_state, offset, bdev, sector);
        }
 
-       if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, true)) {
-               if (iop)
-                       atomic_inc(&iop->write_count);
+       merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
+                       &same_page);
+
+       if (iop && !same_page)
+               atomic_inc(&iop->write_count);
+
+       if (!merged) {
                if (bio_full(wpc->ioend->io_bio))
                        xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
                bio_add_page(wpc->ioend->io_bio, page, len, poff);
index 9d3b5b93102cffc54dbb4f0f2f1c1a1031e14337..c9ca0be54d9a6e760db69cdeda112600d0e54a9d 100644 (file)
@@ -471,6 +471,7 @@ struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
                                     struct i2c_adapter *adapter);
 struct edid *drm_edid_duplicate(const struct edid *edid);
 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+int drm_add_override_edid_modes(struct drm_connector *connector);
 
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
 enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
index 0f23b56826403b5dd2fdb781671fd696e6e0cb68..f87abaa898f0035f21613dc66e12594d04201f0b 100644 (file)
@@ -423,7 +423,7 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
                           unsigned int, unsigned int);
 bool __bio_try_merge_page(struct bio *bio, struct page *page,
-               unsigned int len, unsigned int off, bool same_page);
+               unsigned int len, unsigned int off, bool *same_page);
 void __bio_add_page(struct bio *bio, struct page *page,
                unsigned int len, unsigned int off);
 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
index cb3c6b3b89c802941edea7f732e2009b00a4b356..a7f7a98ec39d39c6e55d1fb194bee482da4767c7 100644 (file)
@@ -238,6 +238,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)                       \
        BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
 
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)                       \
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)                       \
+       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+
 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)                                \
 ({                                                                            \
        int __ret = 0;                                                         \
@@ -339,6 +345,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
index 5df8e9e2a3933949af17dda1d77a4daccd5df611..b92ef9f73e42f1bcf0141aa21d0e9c17c5c7f05b 100644 (file)
@@ -600,7 +600,6 @@ void bpf_map_area_free(void *base);
 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 
 extern int sysctl_unprivileged_bpf_disabled;
-extern int sysctl_bpf_stats_enabled;
 
 int bpf_map_new_fd(struct bpf_map *map, int flags);
 int bpf_prog_new_fd(struct bpf_prog *prog);
index d71b079bb021a22621c2f5cbe484ab9b499db998..b4e766e93f6e9abdf01c5e28c82bd3bc63c72f98 100644 (file)
@@ -106,8 +106,6 @@ enum {
        CFTYPE_WORLD_WRITABLE   = (1 << 4),     /* (DON'T USE FOR NEW FILES) S_IWUGO */
        CFTYPE_DEBUG            = (1 << 5),     /* create when cgroup_debug */
 
-       CFTYPE_SYMLINKED        = (1 << 6),     /* pointed to by symlink too */
-
        /* internal flags, do not use outside cgroup core proper */
        __CFTYPE_ONLY_ON_DFL    = (1 << 16),    /* only on default hierarchy */
        __CFTYPE_NOT_ON_DFL     = (1 << 17),    /* not on default hierarchy */
@@ -223,6 +221,7 @@ struct css_set {
         */
        struct list_head tasks;
        struct list_head mg_tasks;
+       struct list_head dying_tasks;
 
        /* all css_task_iters currently walking this cset */
        struct list_head task_iters;
@@ -545,7 +544,6 @@ struct cftype {
         * end of cftype array.
         */
        char name[MAX_CFTYPE_NAME];
-       char link_name[MAX_CFTYPE_NAME];
        unsigned long private;
 
        /*
index c0077adeea8334dc136233de439351ca3e742eff..0297f930a56e1b08a46141a1d535b7d214183aca 100644 (file)
@@ -43,6 +43,9 @@
 /* walk all threaded css_sets in the domain */
 #define CSS_TASK_ITER_THREADED         (1U << 1)
 
+/* internal flags */
+#define CSS_TASK_ITER_SKIPPED          (1U << 16)
+
 /* a css_task_iter should be treated as an opaque object */
 struct css_task_iter {
        struct cgroup_subsys            *ss;
@@ -57,6 +60,7 @@ struct css_task_iter {
        struct list_head                *task_pos;
        struct list_head                *tasks_head;
        struct list_head                *mg_tasks_head;
+       struct list_head                *dying_tasks_head;
 
        struct css_set                  *cur_cset;
        struct css_set                  *cur_dcset;
@@ -487,7 +491,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
  *
  * Find the css for the (@task, @subsys_id) combination, increment a
  * reference on and return it.  This function is guaranteed to return a
- * valid css.
+ * valid css.  The returned css may already have been offlined.
  */
 static inline struct cgroup_subsys_state *
 task_get_css(struct task_struct *task, int subsys_id)
@@ -497,7 +501,13 @@ task_get_css(struct task_struct *task, int subsys_id)
        rcu_read_lock();
        while (true) {
                css = task_css(task, subsys_id);
-               if (likely(css_tryget_online(css)))
+               /*
+                * Can't use css_tryget_online() here.  A task which has
+                * PF_EXITING set may stay associated with an offline css.
+                * If such task calls this function, css_tryget_online()
+                * will keep failing.
+                */
+               if (likely(css_tryget(css)))
                        break;
                cpu_relax();
        }
index 6a381594608cf2d4a63c4163b825d589703e3de1..5c6062206760a4b1470f616e0df52c0d205e4990 100644 (file)
@@ -101,6 +101,7 @@ enum cpuhp_state {
        CPUHP_AP_IRQ_BCM2836_STARTING,
        CPUHP_AP_IRQ_MIPS_GIC_STARTING,
        CPUHP_AP_ARM_MVEBU_COHERENCY,
+       CPUHP_AP_MICROCODE_LOADER,
        CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_STARTING,
        CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
index e85264fb66161408aee2815001d8d5159ab04ac1..848fc71c6ba60ba57afc6f08efecabcc1e643350 100644 (file)
@@ -713,6 +713,7 @@ void __iomem *devm_of_iomap(struct device *dev,
 /* allows to add/remove a custom action to devres stack */
 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
 
 static inline int devm_add_action_or_reset(struct device *dev,
                                           void (*action)(void *), void *data)
index a9f9dcc1e5151353a9664fa99fb9eb7b1c3ed46d..d4844cad2c2bb92ca885f2d3c113f21a47c3c285 100644 (file)
@@ -292,7 +292,9 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
  */
 struct fsnotify_mark_connector {
        spinlock_t lock;
-       unsigned int type;      /* Type of object [lock] */
+       unsigned short type;    /* Type of object [lock] */
+#define FSNOTIFY_CONN_FLAG_HAS_FSID    0x01
+       unsigned short flags;   /* flags [lock] */
        __kernel_fsid_t fsid;   /* fsid of filesystem containing object */
        union {
                /* Object pointer [lock] */
index dd0a452373e71541c670c916ac886183582b517e..a337313e064f4ec9e851d761554ecac1951178db 100644 (file)
@@ -75,6 +75,7 @@ struct gen_pool_chunk {
        struct list_head next_chunk;    /* next chunk in pool */
        atomic_long_t avail;
        phys_addr_t phys_addr;          /* physical starting address of memory chunk */
+       void *owner;                    /* private data to retrieve at alloc time */
        unsigned long start_addr;       /* start address of memory chunk */
        unsigned long end_addr;         /* end address of memory chunk (inclusive) */
        unsigned long bits[0];          /* bitmap for allocating memory chunk */
@@ -96,8 +97,15 @@ struct genpool_data_fixed {
 
 extern struct gen_pool *gen_pool_create(int, int);
 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
-extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
-                            size_t, int);
+extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
+                            size_t, int, void *);
+
+static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
+               phys_addr_t phys, size_t size, int nid)
+{
+       return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
+}
+
 /**
  * gen_pool_add - add a new chunk of special memory to the pool
  * @pool: pool to add new memory chunk to
@@ -116,12 +124,47 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
        return gen_pool_add_virt(pool, addr, -1, size, nid);
 }
 extern void gen_pool_destroy(struct gen_pool *);
-extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
-extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
-               genpool_algo_t algo, void *data);
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+               genpool_algo_t algo, void *data, void **owner);
+
+static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
+               size_t size, void **owner)
+{
+       return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
+                       owner);
+}
+
+static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
+               size_t size, genpool_algo_t algo, void *data)
+{
+       return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
+}
+
+/**
+ * gen_pool_alloc - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+       return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+
 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
                dma_addr_t *dma);
-extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
+               size_t size, void **owner);
+static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
+                size_t size)
+{
+       gen_pool_free_owner(pool, addr, size, NULL);
+}
+
 extern void gen_pool_for_each_chunk(struct gen_pool *,
        void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
 extern size_t gen_pool_avail(struct gen_pool *);
index edf9e8f32d7025fd53412f414f4d136fb6867d1f..1dcb763bb610a85295893645fe285f2659532f1a 100644 (file)
@@ -117,9 +117,12 @@ struct memcg_shrinker_map {
 struct mem_cgroup_per_node {
        struct lruvec           lruvec;
 
+       /* Legacy local VM stats */
+       struct lruvec_stat __percpu *lruvec_stat_local;
+
+       /* Subtree VM stats (batched updates) */
        struct lruvec_stat __percpu *lruvec_stat_cpu;
        atomic_long_t           lruvec_stat[NR_VM_NODE_STAT_ITEMS];
-       atomic_long_t           lruvec_stat_local[NR_VM_NODE_STAT_ITEMS];
 
        unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
 
@@ -265,17 +268,18 @@ struct mem_cgroup {
        atomic_t                moving_account;
        struct task_struct      *move_lock_task;
 
-       /* memory.stat */
+       /* Legacy local VM stats and events */
+       struct memcg_vmstats_percpu __percpu *vmstats_local;
+
+       /* Subtree VM stats and events (batched updates) */
        struct memcg_vmstats_percpu __percpu *vmstats_percpu;
 
        MEMCG_PADDING(_pad2_);
 
        atomic_long_t           vmstats[MEMCG_NR_STAT];
-       atomic_long_t           vmstats_local[MEMCG_NR_STAT];
-
        atomic_long_t           vmevents[NR_VM_EVENT_ITEMS];
-       atomic_long_t           vmevents_local[NR_VM_EVENT_ITEMS];
 
+       /* memory.events */
        atomic_long_t           memory_events[MEMCG_NR_MEMORY_EVENTS];
 
        unsigned long           socket_pressure;
@@ -567,7 +571,11 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
                                                   int idx)
 {
-       long x = atomic_long_read(&memcg->vmstats_local[idx]);
+       long x = 0;
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
 #ifdef CONFIG_SMP
        if (x < 0)
                x = 0;
@@ -641,13 +649,15 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
                                                    enum node_stat_item idx)
 {
        struct mem_cgroup_per_node *pn;
-       long x;
+       long x = 0;
+       int cpu;
 
        if (mem_cgroup_disabled())
                return node_page_state(lruvec_pgdat(lruvec), idx);
 
        pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       x = atomic_long_read(&pn->lruvec_stat_local[idx]);
+       for_each_possible_cpu(cpu)
+               x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
 #ifdef CONFIG_SMP
        if (x < 0)
                x = 0;
index f0628660d54100ffd890b7878c863bc698569a41..1732dea030b218f96114a2f1e5201715856a3008 100644 (file)
@@ -81,6 +81,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
  * @res: physical address range covered by @ref
  * @ref: reference count that pins the devm_memremap_pages() mapping
  * @kill: callback to transition @ref to the dead state
+ * @cleanup: callback to wait for @ref to be idle and reap it
  * @dev: host device of the mapping for debug
  * @data: private data pointer for page_free()
  * @type: memory type: see MEMORY_* in memory_hotplug.h
@@ -92,6 +93,7 @@ struct dev_pagemap {
        struct resource res;
        struct percpu_ref *ref;
        void (*kill)(struct percpu_ref *ref);
+       void (*cleanup)(struct percpu_ref *ref);
        struct device *dev;
        void *data;
        enum memory_type type;
@@ -100,6 +102,7 @@ struct dev_pagemap {
 
 #ifdef CONFIG_ZONE_DEVICE
 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
                struct dev_pagemap *pgmap);
 
@@ -118,6 +121,11 @@ static inline void *devm_memremap_pages(struct device *dev,
        return ERR_PTR(-ENXIO);
 }
 
+static inline void devm_memunmap_pages(struct device *dev,
+               struct dev_pagemap *pgmap)
+{
+}
+
 static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
                struct dev_pagemap *pgmap)
 {
index 43d0f0c496f661880e19a19fff8a651148442d60..ecb7972e2423baa3891cc19ea14ca0a183e8d559 100644 (file)
@@ -398,6 +398,7 @@ struct mmc_host {
        unsigned int            retune_now:1;   /* do re-tuning at next req */
        unsigned int            retune_paused:1; /* re-tuning is temporarily disabled */
        unsigned int            use_blk_mq:1;   /* use blk-mq */
+       unsigned int            retune_crc_disable:1; /* don't trigger retune upon crc */
 
        int                     rescan_disable; /* disable card detection */
        int                     rescan_entered; /* used with nonremovable devices */
index e9dfdd501cd1541dde317b60e3bf7d4fa9eb127d..5a177f7a83c3acc75b1e93daf2ddf9534be9285d 100644 (file)
@@ -167,4 +167,10 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
 extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func);
 extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags);
 
+extern void sdio_retune_crc_disable(struct sdio_func *func);
+extern void sdio_retune_crc_enable(struct sdio_func *func);
+
+extern void sdio_retune_hold_now(struct sdio_func *func);
+extern void sdio_retune_release(struct sdio_func *func);
+
 #endif /* LINUX_MMC_SDIO_FUNC_H */
index 6411c624f63acbc4f549ad7e884c4c36f9d26efe..2d2e55dfea9445ca56babf2cc1dad0089b4b647e 100644 (file)
@@ -123,11 +123,20 @@ int mac_link_state(struct net_device *ndev,
  * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
  * @state: a pointer to a &struct phylink_link_state.
  *
+ * Note - not all members of @state are valid.  In particular,
+ * @state->lp_advertising, @state->link, @state->an_complete are never
+ * guaranteed to be correct, and so any mac_config() implementation must
+ * never reference these fields.
+ *
  * The action performed depends on the currently selected mode:
  *
  * %MLO_AN_FIXED, %MLO_AN_PHY:
  *   Configure the specified @state->speed, @state->duplex and
- *   @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode.
+ *   @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) modes over a link
+ *   specified by @state->interface.  @state->advertising may be used,
+ *   but is not required.  Other members of @state must be ignored.
+ *
+ *   Valid state members: interface, speed, duplex, pause, advertising.
  *
  * %MLO_AN_INBAND:
  *   place the link in an inband negotiation mode (such as 802.3z
@@ -150,6 +159,8 @@ int mac_link_state(struct net_device *ndev,
  *   responsible for reading the configuration word and configuring
  *   itself accordingly.
  *
+ *   Valid state members: interface, an_enabled, pause, advertising.
+ *
  * Implementations are expected to update the MAC to reflect the
  * requested settings - i.o.w., if nothing has changed between two
  * calls, no action is expected.  If only flow control settings have
index a3fda9f024c3c1988b6ff60954d7f7e74a9c1ecf..4a7944078cc35dddcea4963f1099e81d147c6936 100644 (file)
@@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm)
  * followed by taking the mmap_sem for writing before modifying the
  * vmas or anything the coredump pretends not to change from under it.
  *
+ * It also has to be called when mmgrab() is used in the context of
+ * the process, but then the mm_count refcount is transferred outside
+ * the context of the process to run down_write() on that pinned mm.
+ *
  * NOTE: find_extend_vma() called from GUP context is the only place
  * that can modify the "mm" (notably the vm_start/end) under mmap_sem
  * for reading and outside the context of the process, so it is also
index 178a3933a71b871982a58b5052740ca38f4ea34a..50ced8aba9dbf6c2cd4a0a1ef1598bdd58822821 100644 (file)
@@ -351,6 +351,8 @@ static inline void sk_psock_update_proto(struct sock *sk,
 static inline void sk_psock_restore_proto(struct sock *sk,
                                          struct sk_psock *psock)
 {
+       sk->sk_write_space = psock->saved_write_space;
+
        if (psock->sk_proto) {
                sk->sk_prot = psock->sk_proto;
                psock->sk_proto = NULL;
index b769ecfcc3bd41aad6fd339ba824c6bb622ac24d..aadd310769d080f1d45db14b2a72fc8ad36f1196 100644 (file)
@@ -63,6 +63,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
                                      void __user *, size_t *, loff_t *);
 extern int proc_do_large_bitmap(struct ctl_table *, int,
                                void __user *, size_t *, loff_t *);
+extern int proc_do_static_key(struct ctl_table *table, int write,
+                             void __user *buffer, size_t *lenp,
+                             loff_t *ppos);
 
 /*
  * Register a set of sysctl names by calling register_sysctl_table
index 711361af9ce019f08c8b6accc33220b673b34d56..9a478a0cd3a20b40ed344f178e35228a0b8ee203 100644 (file)
@@ -484,4 +484,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
 
        return (user_mss && user_mss < mss) ? user_mss : mss;
 }
+
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
+                 int shiftlen);
+
 #endif /* _LINUX_TCP_H */
index 2f67ae854ff01e24bc57221c400613d096916d3b..becdad57685908a4b07bb3df890260483aa3e36a 100644 (file)
@@ -309,6 +309,22 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
        return rcu_dereference_rtnl(dev->ip6_ptr);
 }
 
+/**
+ * __in6_dev_stats_get - get inet6_dev pointer for stats
+ * @dev: network device
+ * @skb: skb for original incoming interface if neeeded
+ *
+ * Caller must hold rcu_read_lock or RTNL, because this function
+ * does not take a reference on the inet6_dev.
+ */
+static inline struct inet6_dev *__in6_dev_stats_get(const struct net_device *dev,
+                                                   const struct sk_buff *skb)
+{
+       if (netif_is_l3_master(dev))
+               dev = dev_get_by_index_rcu(dev_net(dev), inet6_iif(skb));
+       return __in6_dev_get(dev);
+}
+
 /**
  * __in6_dev_get_safely - get inet6_dev pointer from netdevice
  * @dev: network device
index 87dae868707e2a7fb92e483651c4e6cb2a524011..948139690a5867e3665ba4883340221f23e9363b 100644 (file)
@@ -3839,7 +3839,8 @@ struct cfg80211_ops {
  *     on wiphy_new(), but can be changed by the driver if it has a good
  *     reason to override the default
  * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
- *     on a VLAN interface)
+ *     on a VLAN interface). This flag also serves an extra purpose of
+ *     supporting 4ADDR AP mode on devices which do not support AP/VLAN iftype.
  * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
  * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
  *     control port protocol ethertype. The device also honours the
index 7c5a8d9a8d2a77eb65d11b5358a4d26a11591bdb..dfabc0503446505472b3f156d770698dfb39b8f3 100644 (file)
@@ -46,6 +46,7 @@ struct flow_dissector_key_tags {
 
 struct flow_dissector_key_vlan {
        u16     vlan_id:12,
+               vlan_dei:1,
                vlan_priority:3;
        __be16  vlan_tpid;
 };
index 7698460a3dd1e5070e12d406b3ee58834688cdc9..623cfbb7b8dcbb2a6d8325ec010aff78bbdf8839 100644 (file)
@@ -117,6 +117,7 @@ struct netns_ipv4 {
 #endif
        int sysctl_tcp_mtu_probing;
        int sysctl_tcp_base_mss;
+       int sysctl_tcp_min_snd_mss;
        int sysctl_tcp_probe_threshold;
        u32 sysctl_tcp_probe_interval;
 
index e9d769c04637a3c0b967c9bfa6def724834796b9..6cbc16136357d158cf1e84b98ecb7e06898269a6 100644 (file)
@@ -1463,12 +1463,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
                __sk_mem_reclaim(sk, 1 << 20);
 }
 
+DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
 {
        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
        sk->sk_wmem_queued -= skb->truesize;
        sk_mem_uncharge(sk, skb->truesize);
-       if (!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
+       if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
+           !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
                skb_zcopy_clear(skb, true);
                sk->sk_tx_skb_cache = skb;
                return;
@@ -2433,13 +2435,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
  * This routine must be called with interrupts disabled or with the socket
  * locked so that the sk_buff queue operation is ok.
 */
+DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
 {
        __skb_unlink(skb, &sk->sk_receive_queue);
-       if (
-#ifdef CONFIG_RPS
-           !static_branch_unlikely(&rps_needed) &&
-#endif
+       if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
            !sk->sk_rx_skb_cache) {
                sk->sk_rx_skb_cache = skb;
                skb_orphan(skb);
@@ -2534,6 +2534,8 @@ extern int sysctl_optmem_max;
 extern __u32 sysctl_wmem_default;
 extern __u32 sysctl_rmem_default;
 
+DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
+
 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_wmem ? */
index ac2f53fbfa6b4cbf1fc615c952a5e1cac1124300..582c0caa98116740b5bde8c5dbb5d94fc69d1caa 100644 (file)
@@ -51,6 +51,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 #define MAX_TCP_HEADER (128 + MAX_HEADER)
 #define MAX_TCP_OPTION_SPACE 40
+#define TCP_MIN_SND_MSS                48
+#define TCP_MIN_GSO_SIZE       (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
 
 /*
  * Never offer a window over 32767 without using window scaling. Some
index 3b67c93ff101b523018a7a3c66eecf8ad91a287a..3d174e20aa530983c77f3554ae968e80c246c39b 100644 (file)
@@ -49,6 +49,7 @@ enum sof_ipc_dai_type {
        SOF_DAI_INTEL_SSP,              /**< Intel SSP */
        SOF_DAI_INTEL_DMIC,             /**< Intel DMIC */
        SOF_DAI_INTEL_HDA,              /**< Intel HD/A */
+       SOF_DAI_INTEL_SOUNDWIRE,        /**< Intel SoundWire */
 };
 
 /* general purpose DAI configuration */
index ccb6a004b37b789e58bb6d6d13ce04fe04915adf..1efcf7b18ec20cdf186f94b24b094aac1f4df734 100644 (file)
@@ -48,6 +48,7 @@
 #define SOF_IPC_FW_READY                       SOF_GLB_TYPE(0x7U)
 #define SOF_IPC_GLB_DAI_MSG                    SOF_GLB_TYPE(0x8U)
 #define SOF_IPC_GLB_TRACE_MSG                  SOF_GLB_TYPE(0x9U)
+#define SOF_IPC_GLB_GDB_DEBUG                   SOF_GLB_TYPE(0xAU)
 
 /*
  * DSP Command Message Types
@@ -78,6 +79,7 @@
 #define SOF_IPC_COMP_GET_VALUE                 SOF_CMD_TYPE(0x002)
 #define SOF_IPC_COMP_SET_DATA                  SOF_CMD_TYPE(0x003)
 #define SOF_IPC_COMP_GET_DATA                  SOF_CMD_TYPE(0x004)
+#define SOF_IPC_COMP_NOTIFICATION              SOF_CMD_TYPE(0x005)
 
 /* DAI messages */
 #define SOF_IPC_DAI_CONFIG                     SOF_CMD_TYPE(0x001)
@@ -153,6 +155,27 @@ struct sof_ipc_compound_hdr {
        uint32_t count;         /**< count of 0 means end of compound sequence */
 }  __packed;
 
+/**
+ * OOPS header architecture specific data.
+ */
+struct sof_ipc_dsp_oops_arch_hdr {
+       uint32_t arch;          /* Identifier of architecture */
+       uint32_t totalsize;     /* Total size of oops message */
+}  __packed;
+
+/**
+ * OOPS header platform specific data.
+ */
+struct sof_ipc_dsp_oops_plat_hdr {
+       uint32_t configidhi;    /* ConfigID hi 32bits */
+       uint32_t configidlo;    /* ConfigID lo 32bits */
+       uint32_t numaregs;      /* Special regs num */
+       uint32_t stackoffset;   /* Offset to stack pointer from beginning of
+                                * oops message
+                                */
+       uint32_t stackptr;      /* Stack ptr */
+}  __packed;
+
 /** @}*/
 
 #endif
index 21dae04d818398aecdaa3d9bc0c1ad9c64e29d94..16528d2b4a504ea455e86ed904a30cbc2d6394f0 100644 (file)
 
 #define SOF_IPC_MAX_ELEMS      16
 
+/*
+ * Firmware boot info flag bits (64-bit)
+ */
+#define SOF_IPC_INFO_BUILD             BIT(0)
+#define SOF_IPC_INFO_LOCKS             BIT(1)
+#define SOF_IPC_INFO_LOCKSV            BIT(2)
+#define SOF_IPC_INFO_GDB               BIT(3)
+
 /* extended data types that can be appended onto end of sof_ipc_fw_ready */
 enum sof_ipc_ext_data {
        SOF_IPC_EXT_DMA_BUFFER = 0,
@@ -49,16 +57,8 @@ struct sof_ipc_fw_ready {
        uint32_t hostbox_size;
        struct sof_ipc_fw_version version;
 
-       /* Miscellaneous debug flags showing build/debug features enabled */
-       union {
-               uint64_t reserved;
-               struct {
-                       uint64_t build:1;
-                       uint64_t locks:1;
-                       uint64_t locks_verbose:1;
-                       uint64_t gdb:1;
-               } bits;
-       } debug;
+       /* Miscellaneous flags */
+       uint64_t flags;
 
        /* reserved for future use */
        uint32_t reserved[4];
index a7189984000d5d5bac6c51e51aba193edcf3261e..d25c764b10e8b4be247ee8e317e0bfaf8cdfd9c9 100644 (file)
@@ -17,7 +17,8 @@
 
 /* Xtensa Firmware Oops data */
 struct sof_ipc_dsp_oops_xtensa {
-       struct sof_ipc_hdr hdr;
+       struct sof_ipc_dsp_oops_arch_hdr arch_hdr;
+       struct sof_ipc_dsp_oops_plat_hdr plat_hdr;
        uint32_t exccause;
        uint32_t excvaddr;
        uint32_t ps;
@@ -38,7 +39,11 @@ struct sof_ipc_dsp_oops_xtensa {
        uint32_t intenable;
        uint32_t interrupt;
        uint32_t sar;
-       uint32_t stack;
+       uint32_t debugcause;
+       uint32_t windowbase;
+       uint32_t windowstart;
+       uint32_t excsave1;
+       uint32_t ar[];
 }  __packed;
 
 #endif
index 63e0cf66f01a9698ff6bc41ac5492809bb83d834..a8b823c30b434d8022ebeab666d7072a09473be5 100644 (file)
@@ -192,6 +192,8 @@ enum bpf_attach_type {
        BPF_LIRC_MODE2,
        BPF_FLOW_DISSECTOR,
        BPF_CGROUP_SYSCTL,
+       BPF_CGROUP_UDP4_RECVMSG,
+       BPF_CGROUP_UDP6_RECVMSG,
        __MAX_BPF_ATTACH_TYPE
 };
 
@@ -3376,8 +3378,8 @@ struct bpf_raw_tracepoint_args {
 /* DIRECT:  Skip the FIB rules and go to FIB table associated with device
  * OUTPUT:  Do lookup from egress perspective; default is ingress
  */
-#define BPF_FIB_LOOKUP_DIRECT  BIT(0)
-#define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
+#define BPF_FIB_LOOKUP_DIRECT  (1U << 0)
+#define BPF_FIB_LOOKUP_OUTPUT  (1U << 1)
 
 enum {
        BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
index 86dc24a96c90ab047d5173d625450facd6c6dd79..fd42c1316d3d112ecd8a00d2b499d6f6901c5e81 100644 (file)
@@ -283,6 +283,7 @@ enum
        LINUX_MIB_TCPACKCOMPRESSED,             /* TCPAckCompressed */
        LINUX_MIB_TCPZEROWINDOWDROP,            /* TCPZeroWindowDrop */
        LINUX_MIB_TCPRCVQDROP,                  /* TCPRcvQDrop */
+       LINUX_MIB_TCPWQUEUETOOBIG,              /* TCPWqueueTooBig */
        __LINUX_MIB_MAX
 };
 
index 37e0a90dc9e6c34d43980bd65eccc3e111313bd3..0868eb47acf7e8ecd57824723f17824ab955085b 100644 (file)
@@ -26,7 +26,7 @@
 
 /* SOF ABI version major, minor and patch numbers */
 #define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 4
+#define SOF_ABI_MINOR 6
 #define SOF_ABI_PATCH 0
 
 /* SOF ABI version number. Format within 32bit word is MMmmmppp */
index 7c473f208a1058de97434a57a2d47e2360ae80a8..080e2bb644ccd761b3d54fbad9b58a881086231e 100644 (file)
@@ -2097,7 +2097,6 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
 
 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 EXPORT_SYMBOL(bpf_stats_enabled_key);
-int sysctl_bpf_stats_enabled __read_mostly;
 
 /* All definitions of tracepoints related to BPF. */
 #define CREATE_TRACE_POINTS
index 15dbc15c5b0c96fb9429c797abb539abd6fcc0a5..cd8297b3bdb9518a6c3498f81a88f792ebad7f60 100644 (file)
@@ -178,6 +178,7 @@ static void dev_map_free(struct bpf_map *map)
                if (!dev)
                        continue;
 
+               free_percpu(dev->bulkq);
                dev_put(dev->dev);
                kfree(dev);
        }
@@ -273,6 +274,7 @@ void __dev_map_flush(struct bpf_map *map)
        unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
        u32 bit;
 
+       rcu_read_lock();
        for_each_set_bit(bit, bitmap, map->max_entries) {
                struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
                struct xdp_bulk_queue *bq;
@@ -283,11 +285,12 @@ void __dev_map_flush(struct bpf_map *map)
                if (unlikely(!dev))
                        continue;
 
-               __clear_bit(bit, bitmap);
-
                bq = this_cpu_ptr(dev->bulkq);
                bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
+
+               __clear_bit(bit, bitmap);
        }
+       rcu_read_unlock();
 }
 
 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
@@ -380,6 +383,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
 
                int cpu;
 
+               rcu_read_lock();
                for_each_online_cpu(cpu) {
                        bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
                        __clear_bit(dev->bit, bitmap);
@@ -387,6 +391,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
                        bq = per_cpu_ptr(dev->bulkq, cpu);
                        bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
                }
+               rcu_read_unlock();
        }
 }
 
index e61630c2e50b28a3e342c4684b9be8e8141784a9..864e2a496376207072125ab321fcdd73b74dc198 100644 (file)
@@ -716,9 +716,14 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
         * have exact two children, so this function will never return NULL.
         */
        for (node = search_root; node;) {
-               if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+               if (node->flags & LPM_TREE_NODE_FLAG_IM) {
+                       node = rcu_dereference(node->child[0]);
+               } else {
                        next_node = node;
-               node = rcu_dereference(node->child[0]);
+                       node = rcu_dereference(node->child[0]);
+                       if (!node)
+                               node = rcu_dereference(next_node->child[1]);
+               }
        }
 do_copy:
        next_key->prefixlen = next_node->prefixlen;
index ef63d26622f2f58561c09c0760310da40ce11671..42d17f7307802a0c2196e59ee5d5abcb37184258 100644 (file)
@@ -1573,6 +1573,8 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
                case BPF_CGROUP_INET6_CONNECT:
                case BPF_CGROUP_UDP4_SENDMSG:
                case BPF_CGROUP_UDP6_SENDMSG:
+               case BPF_CGROUP_UDP4_RECVMSG:
+               case BPF_CGROUP_UDP6_RECVMSG:
                        return 0;
                default:
                        return -EINVAL;
@@ -1867,6 +1869,8 @@ static int bpf_prog_attach(const union bpf_attr *attr)
        case BPF_CGROUP_INET6_CONNECT:
        case BPF_CGROUP_UDP4_SENDMSG:
        case BPF_CGROUP_UDP6_SENDMSG:
+       case BPF_CGROUP_UDP4_RECVMSG:
+       case BPF_CGROUP_UDP6_RECVMSG:
                ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
                break;
        case BPF_CGROUP_SOCK_OPS:
@@ -1952,6 +1956,8 @@ static int bpf_prog_detach(const union bpf_attr *attr)
        case BPF_CGROUP_INET6_CONNECT:
        case BPF_CGROUP_UDP4_SENDMSG:
        case BPF_CGROUP_UDP6_SENDMSG:
+       case BPF_CGROUP_UDP4_RECVMSG:
+       case BPF_CGROUP_UDP6_RECVMSG:
                ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
                break;
        case BPF_CGROUP_SOCK_OPS:
@@ -2003,6 +2009,8 @@ static int bpf_prog_query(const union bpf_attr *attr,
        case BPF_CGROUP_INET6_CONNECT:
        case BPF_CGROUP_UDP4_SENDMSG:
        case BPF_CGROUP_UDP6_SENDMSG:
+       case BPF_CGROUP_UDP4_RECVMSG:
+       case BPF_CGROUP_UDP6_RECVMSG:
        case BPF_CGROUP_SOCK_OPS:
        case BPF_CGROUP_DEVICE:
        case BPF_CGROUP_SYSCTL:
index d15cc4fafa89b5f7e144307350b4668b80c598f4..a5c369e60343d4239285078410cb6d5b470ff48c 100644 (file)
@@ -5353,9 +5353,12 @@ static int check_return_code(struct bpf_verifier_env *env)
        struct tnum range = tnum_range(0, 1);
 
        switch (env->prog->type) {
+       case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+               if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
+                   env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
+                       range = tnum_range(1, 1);
        case BPF_PROG_TYPE_CGROUP_SKB:
        case BPF_PROG_TYPE_CGROUP_SOCK:
-       case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
        case BPF_PROG_TYPE_SOCK_OPS:
        case BPF_PROG_TYPE_CGROUP_DEVICE:
        case BPF_PROG_TYPE_CGROUP_SYSCTL:
@@ -5372,16 +5375,17 @@ static int check_return_code(struct bpf_verifier_env *env)
        }
 
        if (!tnum_in(range, reg->var_off)) {
+               char tn_buf[48];
+
                verbose(env, "At program exit the register R0 ");
                if (!tnum_is_unknown(reg->var_off)) {
-                       char tn_buf[48];
-
                        tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
                        verbose(env, "has value %s", tn_buf);
                } else {
                        verbose(env, "has unknown scalar value");
                }
-               verbose(env, " should have been 0 or 1\n");
+               tnum_strn(tn_buf, sizeof(tn_buf), range);
+               verbose(env, " should have been in %s\n", tn_buf);
                return -EINVAL;
        }
        return 0;
index 155048b0eca2c34913bf009d9573ba53a10f8011..bf9dbffd46b11548f80f52d4de58168fa7372274 100644 (file)
@@ -215,7 +215,8 @@ static struct cftype cgroup_base_files[];
 
 static int cgroup_apply_control(struct cgroup *cgrp);
 static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
-static void css_task_iter_advance(struct css_task_iter *it);
+static void css_task_iter_skip(struct css_task_iter *it,
+                              struct task_struct *task);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
                                              struct cgroup_subsys *ss);
@@ -738,6 +739,7 @@ struct css_set init_css_set = {
        .dom_cset               = &init_css_set,
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
        .mg_tasks               = LIST_HEAD_INIT(init_css_set.mg_tasks),
+       .dying_tasks            = LIST_HEAD_INIT(init_css_set.dying_tasks),
        .task_iters             = LIST_HEAD_INIT(init_css_set.task_iters),
        .threaded_csets         = LIST_HEAD_INIT(init_css_set.threaded_csets),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
@@ -843,6 +845,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
                cgroup_update_populated(link->cgrp, populated);
 }
 
+/*
+ * @task is leaving, advance task iterators which are pointing to it so
+ * that they can resume at the next position.  Advancing an iterator might
+ * remove it from the list, use safe walk.  See css_task_iter_skip() for
+ * details.
+ */
+static void css_set_skip_task_iters(struct css_set *cset,
+                                   struct task_struct *task)
+{
+       struct css_task_iter *it, *pos;
+
+       list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
+               css_task_iter_skip(it, task);
+}
+
 /**
  * css_set_move_task - move a task from one css_set to another
  * @task: task being moved
@@ -868,22 +885,9 @@ static void css_set_move_task(struct task_struct *task,
                css_set_update_populated(to_cset, true);
 
        if (from_cset) {
-               struct css_task_iter *it, *pos;
-
                WARN_ON_ONCE(list_empty(&task->cg_list));
 
-               /*
-                * @task is leaving, advance task iterators which are
-                * pointing to it so that they can resume at the next
-                * position.  Advancing an iterator might remove it from
-                * the list, use safe walk.  See css_task_iter_advance*()
-                * for details.
-                */
-               list_for_each_entry_safe(it, pos, &from_cset->task_iters,
-                                        iters_node)
-                       if (it->task_pos == &task->cg_list)
-                               css_task_iter_advance(it);
-
+               css_set_skip_task_iters(from_cset, task);
                list_del_init(&task->cg_list);
                if (!css_set_populated(from_cset))
                        css_set_update_populated(from_cset, false);
@@ -1210,6 +1214,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        cset->dom_cset = cset;
        INIT_LIST_HEAD(&cset->tasks);
        INIT_LIST_HEAD(&cset->mg_tasks);
+       INIT_LIST_HEAD(&cset->dying_tasks);
        INIT_LIST_HEAD(&cset->task_iters);
        INIT_LIST_HEAD(&cset->threaded_csets);
        INIT_HLIST_NODE(&cset->hlist);
@@ -1460,8 +1465,8 @@ struct cgroup *task_cgroup_from_root(struct task_struct *task,
 
 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
 
-static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft,
-                             char *buf, bool write_link_name)
+static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
+                             char *buf)
 {
        struct cgroup_subsys *ss = cft->ss;
 
@@ -1471,26 +1476,13 @@ static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft,
 
                snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
                         dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
-                        write_link_name ? cft->link_name : cft->name);
+                        cft->name);
        } else {
-               strscpy(buf, write_link_name ? cft->link_name : cft->name,
-                       CGROUP_FILE_NAME_MAX);
+               strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
        }
        return buf;
 }
 
-static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
-                             char *buf)
-{
-       return cgroup_fill_name(cgrp, cft, buf, false);
-}
-
-static char *cgroup_link_name(struct cgroup *cgrp, const struct cftype *cft,
-                             char *buf)
-{
-       return cgroup_fill_name(cgrp, cft, buf, true);
-}
-
 /**
  * cgroup_file_mode - deduce file mode of a control file
  * @cft: the control file in question
@@ -1649,9 +1641,6 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
        }
 
        kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
-       if (cft->flags & CFTYPE_SYMLINKED)
-               kernfs_remove_by_name(cgrp->kn,
-                                     cgroup_link_name(cgrp, cft, name));
 }
 
 /**
@@ -3837,7 +3826,6 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
 {
        char name[CGROUP_FILE_NAME_MAX];
        struct kernfs_node *kn;
-       struct kernfs_node *kn_link;
        struct lock_class_key *key = NULL;
        int ret;
 
@@ -3868,14 +3856,6 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
                spin_unlock_irq(&cgroup_file_kn_lock);
        }
 
-       if (cft->flags & CFTYPE_SYMLINKED) {
-               kn_link = kernfs_create_link(cgrp->kn,
-                                            cgroup_link_name(cgrp, cft, name),
-                                            kn);
-               if (IS_ERR(kn_link))
-                       return PTR_ERR(kn_link);
-       }
-
        return 0;
 }
 
@@ -4433,15 +4413,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
                        it->task_pos = NULL;
                        return;
                }
-       } while (!css_set_populated(cset));
+       } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
 
        if (!list_empty(&cset->tasks))
                it->task_pos = cset->tasks.next;
-       else
+       else if (!list_empty(&cset->mg_tasks))
                it->task_pos = cset->mg_tasks.next;
+       else
+               it->task_pos = cset->dying_tasks.next;
 
        it->tasks_head = &cset->tasks;
        it->mg_tasks_head = &cset->mg_tasks;
+       it->dying_tasks_head = &cset->dying_tasks;
 
        /*
         * We don't keep css_sets locked across iteration steps and thus
@@ -4467,9 +4450,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
        list_add(&it->iters_node, &cset->task_iters);
 }
 
+static void css_task_iter_skip(struct css_task_iter *it,
+                              struct task_struct *task)
+{
+       lockdep_assert_held(&css_set_lock);
+
+       if (it->task_pos == &task->cg_list) {
+               it->task_pos = it->task_pos->next;
+               it->flags |= CSS_TASK_ITER_SKIPPED;
+       }
+}
+
 static void css_task_iter_advance(struct css_task_iter *it)
 {
-       struct list_head *next;
+       struct task_struct *task;
 
        lockdep_assert_held(&css_set_lock);
 repeat:
@@ -4479,25 +4473,40 @@ static void css_task_iter_advance(struct css_task_iter *it)
                 * consumed first and then ->mg_tasks.  After ->mg_tasks,
                 * we move onto the next cset.
                 */
-               next = it->task_pos->next;
-
-               if (next == it->tasks_head)
-                       next = it->mg_tasks_head->next;
+               if (it->flags & CSS_TASK_ITER_SKIPPED)
+                       it->flags &= ~CSS_TASK_ITER_SKIPPED;
+               else
+                       it->task_pos = it->task_pos->next;
 
-               if (next == it->mg_tasks_head)
+               if (it->task_pos == it->tasks_head)
+                       it->task_pos = it->mg_tasks_head->next;
+               if (it->task_pos == it->mg_tasks_head)
+                       it->task_pos = it->dying_tasks_head->next;
+               if (it->task_pos == it->dying_tasks_head)
                        css_task_iter_advance_css_set(it);
-               else
-                       it->task_pos = next;
        } else {
                /* called from start, proceed to the first cset */
                css_task_iter_advance_css_set(it);
        }
 
-       /* if PROCS, skip over tasks which aren't group leaders */
-       if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
-           !thread_group_leader(list_entry(it->task_pos, struct task_struct,
-                                           cg_list)))
-               goto repeat;
+       if (!it->task_pos)
+               return;
+
+       task = list_entry(it->task_pos, struct task_struct, cg_list);
+
+       if (it->flags & CSS_TASK_ITER_PROCS) {
+               /* if PROCS, skip over tasks which aren't group leaders */
+               if (!thread_group_leader(task))
+                       goto repeat;
+
+               /* and dying leaders w/o live member threads */
+               if (!atomic_read(&task->signal->live))
+                       goto repeat;
+       } else {
+               /* skip all dying ones */
+               if (task->flags & PF_EXITING)
+                       goto repeat;
+       }
 }
 
 /**
@@ -4553,6 +4562,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
 
        spin_lock_irq(&css_set_lock);
 
+       /* @it may be half-advanced by skips, finish advancing */
+       if (it->flags & CSS_TASK_ITER_SKIPPED)
+               css_task_iter_advance(it);
+
        if (it->task_pos) {
                it->cur_task = list_entry(it->task_pos, struct task_struct,
                                          cg_list);
@@ -6034,6 +6047,7 @@ void cgroup_exit(struct task_struct *tsk)
        if (!list_empty(&tsk->cg_list)) {
                spin_lock_irq(&css_set_lock);
                css_set_move_task(tsk, cset, NULL, false);
+               list_add_tail(&tsk->cg_list, &cset->dying_tasks);
                cset->nr_tasks--;
 
                WARN_ON_ONCE(cgroup_task_frozen(tsk));
@@ -6059,6 +6073,13 @@ void cgroup_release(struct task_struct *task)
        do_each_subsys_mask(ss, ssid, have_release_callback) {
                ss->release(task);
        } while_each_subsys_mask();
+
+       if (use_task_css_set_links) {
+               spin_lock_irq(&css_set_lock);
+               css_set_skip_task_iters(task_css_set(task), task);
+               list_del_init(&task->cg_list);
+               spin_unlock_irq(&css_set_lock);
+       }
 }
 
 void cgroup_free(struct task_struct *task)
index 6a1942ed781c5b1712021299fb8227d389126c00..515525ff1cfd32bd9cd27fa1b0d7a06f0cda5064 100644 (file)
@@ -3254,10 +3254,23 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
        spin_unlock_irqrestore(&callback_lock, flags);
 }
 
+/**
+ * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
+ * @tsk: pointer to task_struct with which the scheduler is struggling
+ *
+ * Description: In the case that the scheduler cannot find an allowed cpu in
+ * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
+ * mode however, this value is the same as task_cs(tsk)->effective_cpus,
+ * which will not contain a sane cpumask during cases such as cpu hotplugging.
+ * This is the absolute last resort for the scheduler and it is only used if
+ * _every_ other avenue has been traveled.
+ **/
+
 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 {
        rcu_read_lock();
-       do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
+       do_set_cpus_allowed(tsk, is_in_v2_mode() ?
+               task_cs(tsk)->cpus_allowed : cpu_possible_mask);
        rcu_read_unlock();
 
        /*
index e74ffdc98a92c57fb634a6de23354fe80b3141c3..c73a87a4df13fa02d96661fa889ec9fa5f4fd7ea 100644 (file)
@@ -446,6 +446,15 @@ int commit_creds(struct cred *new)
                if (task->mm)
                        set_dumpable(task->mm, suid_dumpable);
                task->pdeath_signal = 0;
+               /*
+                * If a task drops privileges and becomes nondumpable,
+                * the dumpability change must become visible before
+                * the credential change; otherwise, a __ptrace_may_access()
+                * racing with this change may be able to attach to a task it
+                * shouldn't be able to attach to (as if the task had dropped
+                * privileges without becoming nondumpable).
+                * Pairs with a read barrier in __ptrace_may_access().
+                */
                smp_wmb();
        }
 
index 1803efb2922ffaaa78ebf01e064c820cd8538b4e..a75b6a7f458a7287439e40f0ac102fbb8b61e4a4 100644 (file)
@@ -195,6 +195,7 @@ void release_task(struct task_struct *p)
        rcu_read_unlock();
 
        proc_flush_task(p);
+       cgroup_release(p);
 
        write_lock_irq(&tasklist_lock);
        ptrace_release_task(p);
@@ -220,7 +221,6 @@ void release_task(struct task_struct *p)
        }
 
        write_unlock_irq(&tasklist_lock);
-       cgroup_release(p);
        release_thread(p);
        call_rcu(&p->rcu, delayed_put_task_struct);
 
index 2398832947c6afbf03d13f5f67357d2d67e44550..c4ce08f43bd63536793d47fd8bfa706ed511bbc2 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/elf.h>
 #include <linux/moduleloader.h>
 #include <linux/completion.h>
+#include <linux/memory.h>
 #include <asm/cacheflush.h>
 #include "core.h"
 #include "patch.h"
@@ -718,16 +719,21 @@ static int klp_init_object_loaded(struct klp_patch *patch,
        struct klp_func *func;
        int ret;
 
+       mutex_lock(&text_mutex);
+
        module_disable_ro(patch->mod);
        ret = klp_write_object_relocations(patch->mod, obj);
        if (ret) {
                module_enable_ro(patch->mod, true);
+               mutex_unlock(&text_mutex);
                return ret;
        }
 
        arch_klp_init_object_loaded(patch, obj);
        module_enable_ro(patch->mod, true);
 
+       mutex_unlock(&text_mutex);
+
        klp_for_each_func(obj, func) {
                ret = klp_find_object_symbol(obj->name, func->old_name,
                                             func->old_sympos,
index 1490e63f69a955e3857e8b8e10bdaa62cc063a3d..6e1970719dc23dac7a164560883c14ab7b09c50e 100644 (file)
@@ -95,6 +95,7 @@ static void devm_memremap_pages_release(void *data)
        pgmap->kill(pgmap->ref);
        for_each_device_pfn(pfn, pgmap)
                put_page(pfn_to_page(pfn));
+       pgmap->cleanup(pgmap->ref);
 
        /* pages are dead and unused, undo the arch mapping */
        align_start = res->start & ~(SECTION_SIZE - 1);
@@ -133,8 +134,8 @@ static void devm_memremap_pages_release(void *data)
  * 2/ The altmap field may optionally be initialized, in which case altmap_valid
  *    must be set to true
  *
- * 3/ pgmap->ref must be 'live' on entry and will be killed at
- *    devm_memremap_pages_release() time, or if this routine fails.
+ * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
+ *    at devm_memremap_pages_release() time, or if this routine fails.
  *
  * 4/ res is expected to be a host memory range that could feasibly be
  *    treated as a "System RAM" range, i.e. not a device mmio range, but
@@ -156,8 +157,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        pgprot_t pgprot = PAGE_KERNEL;
        int error, nid, is_ram;
 
-       if (!pgmap->ref || !pgmap->kill)
+       if (!pgmap->ref || !pgmap->kill || !pgmap->cleanup) {
+               WARN(1, "Missing reference count teardown definition\n");
                return ERR_PTR(-EINVAL);
+       }
 
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -168,14 +171,16 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        if (conflict_pgmap) {
                dev_WARN(dev, "Conflicting mapping in same section\n");
                put_dev_pagemap(conflict_pgmap);
-               return ERR_PTR(-ENOMEM);
+               error = -ENOMEM;
+               goto err_array;
        }
 
        conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
        if (conflict_pgmap) {
                dev_WARN(dev, "Conflicting mapping in same section\n");
                put_dev_pagemap(conflict_pgmap);
-               return ERR_PTR(-ENOMEM);
+               error = -ENOMEM;
+               goto err_array;
        }
 
        is_ram = region_intersects(align_start, align_size,
@@ -267,10 +272,18 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        pgmap_array_delete(res);
  err_array:
        pgmap->kill(pgmap->ref);
+       pgmap->cleanup(pgmap->ref);
+
        return ERR_PTR(error);
 }
 EXPORT_SYMBOL_GPL(devm_memremap_pages);
 
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
+{
+       devm_release_action(dev, devm_memremap_pages_release, pgmap);
+}
+EXPORT_SYMBOL_GPL(devm_memunmap_pages);
+
 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
 {
        /* number of pfns from base where pfn_to_page() is valid */
index 5710d07e67cf7f0f94342b0194939d479871b821..8456b6e2205f760b668647989e879160a1efb0ac 100644 (file)
@@ -324,6 +324,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
        return -EPERM;
 ok:
        rcu_read_unlock();
+       /*
+        * If a task drops privileges and becomes nondumpable (through a syscall
+        * like setresuid()) while we are trying to access it, we must ensure
+        * that the dumpability is read after the credentials; otherwise,
+        * we may be able to attach to a task that we shouldn't be able to
+        * attach to (as if the task had dropped privileges without becoming
+        * nondumpable).
+        * Pairs with a write barrier in commit_creds().
+        */
+       smp_rmb();
        mm = task->mm;
        if (mm &&
            ((get_dumpable(mm) != SUID_DUMP_USER) &&
@@ -705,6 +715,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
        if (arg.nr < 0)
                return -EINVAL;
 
+       /* Ensure arg.off fits in an unsigned long */
+       if (arg.off > ULONG_MAX)
+               return 0;
+
        if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
                pending = &child->signal->shared_pending;
        else
@@ -712,18 +726,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
 
        for (i = 0; i < arg.nr; ) {
                kernel_siginfo_t info;
-               s32 off = arg.off + i;
+               unsigned long off = arg.off + i;
+               bool found = false;
 
                spin_lock_irq(&child->sighand->siglock);
                list_for_each_entry(q, &pending->list, list) {
                        if (!off--) {
+                               found = true;
                                copy_siginfo(&info, &q->info);
                                break;
                        }
                }
                spin_unlock_irq(&child->sighand->siglock);
 
-               if (off >= 0) /* beyond the end of the list */
+               if (!found) /* beyond the end of the list */
                        break;
 
 #ifdef CONFIG_COMPAT
index 7d1008be6173313c807b2abb23f3171ef05cddc8..1beca96fb6252ddc4af07b6292f9dd95c4f53afd 100644 (file)
@@ -230,11 +230,6 @@ static int proc_dostring_coredump(struct ctl_table *table, int write,
 #endif
 static int proc_dopipe_max_size(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp, loff_t *ppos);
-#ifdef CONFIG_BPF_SYSCALL
-static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write,
-                                         void __user *buffer, size_t *lenp,
-                                         loff_t *ppos);
-#endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
 /* Note: sysrq code uses its own private copy */
@@ -1253,12 +1248,10 @@ static struct ctl_table kern_table[] = {
        },
        {
                .procname       = "bpf_stats_enabled",
-               .data           = &sysctl_bpf_stats_enabled,
-               .maxlen         = sizeof(sysctl_bpf_stats_enabled),
+               .data           = &bpf_stats_enabled_key.key,
+               .maxlen         = sizeof(bpf_stats_enabled_key),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax_bpf_stats,
-               .extra1         = &zero,
-               .extra2         = &one,
+               .proc_handler   = proc_do_static_key,
        },
 #endif
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
@@ -3374,26 +3367,35 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
 
 #endif /* CONFIG_PROC_SYSCTL */
 
-#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
-static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write,
-                                         void __user *buffer, size_t *lenp,
-                                         loff_t *ppos)
+#if defined(CONFIG_SYSCTL)
+int proc_do_static_key(struct ctl_table *table, int write,
+                      void __user *buffer, size_t *lenp,
+                      loff_t *ppos)
 {
-       int ret, bpf_stats = *(int *)table->data;
-       struct ctl_table tmp = *table;
+       struct static_key *key = (struct static_key *)table->data;
+       static DEFINE_MUTEX(static_key_mutex);
+       int val, ret;
+       struct ctl_table tmp = {
+               .data   = &val,
+               .maxlen = sizeof(val),
+               .mode   = table->mode,
+               .extra1 = &zero,
+               .extra2 = &one,
+       };
 
        if (write && !capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       tmp.data = &bpf_stats;
+       mutex_lock(&static_key_mutex);
+       val = static_key_enabled(key);
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
        if (write && !ret) {
-               *(int *)table->data = bpf_stats;
-               if (bpf_stats)
-                       static_branch_enable(&bpf_stats_enabled_key);
+               if (val)
+                       static_key_enable(key);
                else
-                       static_branch_disable(&bpf_stats_enabled_key);
+                       static_key_disable(key);
        }
+       mutex_unlock(&static_key_mutex);
        return ret;
 }
 #endif
index 85f5912d8f704e42ced9f2c0c889a0855671df38..44b726bab4bd973d8d77d25a6bba354427be18ad 100644 (file)
@@ -808,17 +808,18 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
        ktime_t base, *offset = offsets[offs];
+       u64 nsecs;
 
        WARN_ON(timekeeping_suspended);
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
                base = ktime_add(tk->tkr_mono.base, *offset);
+               nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       return base;
-
+       return base + nsecs;
 }
 EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
 
index f92d6ad5e080671450b8eba785811718bddb1700..1c9a4745e596d91bde5fc5910fc4f8bcb318529e 100644 (file)
@@ -410,8 +410,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
        .arg4_type      = ARG_CONST_SIZE,
 };
 
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
-
 static __always_inline u64
 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
                        u64 flags, struct perf_sample_data *sd)
@@ -442,24 +440,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
        return perf_event_output(event, sd, regs);
 }
 
+/*
+ * Support executing tracepoints in normal, irq, and nmi context that each call
+ * bpf_perf_event_output
+ */
+struct bpf_trace_sample_data {
+       struct perf_sample_data sds[3];
+};
+
+static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
+static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
           u64, flags, void *, data, u64, size)
 {
-       struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
+       struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
+       int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
        struct perf_raw_record raw = {
                .frag = {
                        .size = size,
                        .data = data,
                },
        };
+       struct perf_sample_data *sd;
+       int err;
 
-       if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
-               return -EINVAL;
+       if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       sd = &sds->sds[nest_level - 1];
+
+       if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
+               err = -EINVAL;
+               goto out;
+       }
 
        perf_sample_data_init(sd, 0, 0);
        sd->raw = &raw;
 
-       return __bpf_perf_event_output(regs, map, flags, sd);
+       err = __bpf_perf_event_output(regs, map, flags, sd);
+
+out:
+       this_cpu_dec(bpf_trace_nest_level);
+       return err;
 }
 
 static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -822,16 +846,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 /*
  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
  * to avoid potential recursive reuse issue when/if tracepoints are added
- * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
+ * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
+ *
+ * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
+ * in normal, irq, and nmi context.
  */
-static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
+struct bpf_raw_tp_regs {
+       struct pt_regs regs[3];
+};
+static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
+static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
+static struct pt_regs *get_bpf_raw_tp_regs(void)
+{
+       struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
+       int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
+
+       if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
+               this_cpu_dec(bpf_raw_tp_nest_level);
+               return ERR_PTR(-EBUSY);
+       }
+
+       return &tp_regs->regs[nest_level - 1];
+}
+
+static void put_bpf_raw_tp_regs(void)
+{
+       this_cpu_dec(bpf_raw_tp_nest_level);
+}
+
 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
           struct bpf_map *, map, u64, flags, void *, data, u64, size)
 {
-       struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+       struct pt_regs *regs = get_bpf_raw_tp_regs();
+       int ret;
+
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
 
        perf_fetch_caller_regs(regs);
-       return ____bpf_perf_event_output(regs, map, flags, data, size);
+       ret = ____bpf_perf_event_output(regs, map, flags, data, size);
+
+       put_bpf_raw_tp_regs();
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
@@ -848,12 +904,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
           struct bpf_map *, map, u64, flags)
 {
-       struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+       struct pt_regs *regs = get_bpf_raw_tp_regs();
+       int ret;
+
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
 
        perf_fetch_caller_regs(regs);
        /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
-       return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
-                              flags, 0, 0);
+       ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+                             flags, 0, 0);
+       put_bpf_raw_tp_regs();
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
@@ -868,11 +930,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
           void *, buf, u32, size, u64, flags)
 {
-       struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+       struct pt_regs *regs = get_bpf_raw_tp_regs();
+       int ret;
+
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
 
        perf_fetch_caller_regs(regs);
-       return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
-                            (unsigned long) size, flags, 0);
+       ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
+                           (unsigned long) size, flags, 0);
+       put_bpf_raw_tp_regs();
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
index a12aff849c0437021b46d3351e16274824cbbc87..38277af44f5c4c4fa99eba4bdf22b383e8b8065f 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
 #include <linux/kprobes.h>
+#include <linux/memory.h>
 
 #include <trace/events/sched.h>
 
@@ -2610,10 +2611,12 @@ static void ftrace_run_update_code(int command)
 {
        int ret;
 
+       mutex_lock(&text_mutex);
+
        ret = ftrace_arch_code_modify_prepare();
        FTRACE_WARN_ON(ret);
        if (ret)
-               return;
+               goto out_unlock;
 
        /*
         * By default we use stop_machine() to modify the code.
@@ -2625,6 +2628,9 @@ static void ftrace_run_update_code(int command)
 
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
+
+out_unlock:
+       mutex_unlock(&text_mutex);
 }
 
 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -2935,14 +2941,13 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
                        p = &pg->records[i];
                        p->flags = rec_flags;
 
-#ifndef CC_USING_NOP_MCOUNT
                        /*
                         * Do the initial record conversion from mcount jump
                         * to the NOP instructions.
                         */
-                       if (!ftrace_code_disable(mod, p))
+                       if (!__is_defined(CC_USING_NOP_MCOUNT) &&
+                           !ftrace_code_disable(mod, p))
                                break;
-#endif
 
                        update_cnt++;
                }
@@ -4221,10 +4226,13 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
        struct ftrace_func_entry *entry;
        struct ftrace_func_map *map;
        struct hlist_head *hhd;
-       int size = 1 << mapper->hash.size_bits;
-       int i;
+       int size, i;
+
+       if (!mapper)
+               return;
 
        if (free_func && mapper->hash.count) {
+               size = 1 << mapper->hash.size_bits;
                for (i = 0; i < size; i++) {
                        hhd = &mapper->hash.buckets[i];
                        hlist_for_each_entry(entry, hhd, hlist) {
@@ -5776,6 +5784,7 @@ void ftrace_module_enable(struct module *mod)
        struct ftrace_page *pg;
 
        mutex_lock(&ftrace_lock);
+       mutex_lock(&text_mutex);
 
        if (ftrace_disabled)
                goto out_unlock;
@@ -5837,6 +5846,7 @@ void ftrace_module_enable(struct module *mod)
                ftrace_arch_code_modify_post_process();
 
  out_unlock:
+       mutex_unlock(&text_mutex);
        mutex_unlock(&ftrace_lock);
 
        process_cached_mods(mod->name);
index 1c80521fd43602041d797032a26cac79f665314c..83e08b78dbee9e60bb49f2dde6394e43844289e3 100644 (file)
@@ -6923,7 +6923,7 @@ struct tracing_log_err {
 
 static DEFINE_MUTEX(tracing_err_log_lock);
 
-struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
+static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
 {
        struct tracing_log_err *err;
 
@@ -8192,7 +8192,7 @@ static const struct file_operations buffer_percent_fops = {
        .llseek         = default_llseek,
 };
 
-struct dentry *trace_instance_dir;
+static struct dentry *trace_instance_dir;
 
 static void
 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
index 54373d93e251a1c01aa85ec918024cd3d5e9bca8..ba751f993c3b6d7aab1e417cf3deccb3f54473f0 100644 (file)
@@ -1057,7 +1057,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
 
        trace_seq_puts(s, "<stack trace>\n");
 
-       for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
+       for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
 
                if (trace_seq_has_overflowed(s))
                        break;
index eb7e06b54741beec641f536a645597698a5df986..b55906c77ce0602ad6d7f0c20225e99f288ddbb9 100644 (file)
@@ -426,8 +426,6 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
 /*
  * Argument syntax:
  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
- *
- *  - Remove uprobe: -:[GRP/]EVENT
  */
 static int trace_uprobe_create(int argc, const char **argv)
 {
@@ -443,10 +441,17 @@ static int trace_uprobe_create(int argc, const char **argv)
        ret = 0;
        ref_ctr_offset = 0;
 
-       /* argc must be >= 1 */
-       if (argv[0][0] == 'r')
+       switch (argv[0][0]) {
+       case 'r':
                is_return = true;
-       else if (argv[0][0] != 'p' || argc < 2)
+               break;
+       case 'p':
+               break;
+       default:
+               return -ECANCELED;
+       }
+
+       if (argc < 2)
                return -ECANCELED;
 
        if (argv[0][1] == ':')
index 7e85d1e37a6eafd1f1507b3ec4c2a8095216e432..770c769d7cb7ff527e2fd48bb037f54ceeebd857 100644 (file)
@@ -168,20 +168,21 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
 EXPORT_SYMBOL(gen_pool_create);
 
 /**
- * gen_pool_add_virt - add a new chunk of special memory to the pool
+ * gen_pool_add_owner- add a new chunk of special memory to the pool
  * @pool: pool to add new memory chunk to
  * @virt: virtual starting address of memory chunk to add to pool
  * @phys: physical starting address of memory chunk to add to pool
  * @size: size in bytes of the memory chunk to add to pool
  * @nid: node id of the node the chunk structure and bitmap should be
  *       allocated on, or -1
+ * @owner: private data the publisher would like to recall at alloc time
  *
  * Add a new chunk of special memory to the specified pool.
  *
  * Returns 0 on success or a -ve errno on failure.
  */
-int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
-                size_t size, int nid)
+int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
+                size_t size, int nid, void *owner)
 {
        struct gen_pool_chunk *chunk;
        int nbits = size >> pool->min_alloc_order;
@@ -195,6 +196,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
        chunk->phys_addr = phys;
        chunk->start_addr = virt;
        chunk->end_addr = virt + size - 1;
+       chunk->owner = owner;
        atomic_long_set(&chunk->avail, size);
 
        spin_lock(&pool->lock);
@@ -203,7 +205,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
 
        return 0;
 }
-EXPORT_SYMBOL(gen_pool_add_virt);
+EXPORT_SYMBOL(gen_pool_add_owner);
 
 /**
  * gen_pool_virt_to_phys - return the physical address of memory
@@ -260,35 +262,20 @@ void gen_pool_destroy(struct gen_pool *pool)
 EXPORT_SYMBOL(gen_pool_destroy);
 
 /**
- * gen_pool_alloc - allocate special memory from the pool
- * @pool: pool to allocate from
- * @size: number of bytes to allocate from the pool
- *
- * Allocate the requested number of bytes from the specified pool.
- * Uses the pool allocation function (with first-fit algorithm by default).
- * Can not be used in NMI handler on architectures without
- * NMI-safe cmpxchg implementation.
- */
-unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
-{
-       return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
-}
-EXPORT_SYMBOL(gen_pool_alloc);
-
-/**
- * gen_pool_alloc_algo - allocate special memory from the pool
+ * gen_pool_alloc_algo_owner - allocate special memory from the pool
  * @pool: pool to allocate from
  * @size: number of bytes to allocate from the pool
  * @algo: algorithm passed from caller
  * @data: data passed to algorithm
+ * @owner: optionally retrieve the chunk owner
  *
  * Allocate the requested number of bytes from the specified pool.
  * Uses the pool allocation function (with first-fit algorithm by default).
  * Can not be used in NMI handler on architectures without
  * NMI-safe cmpxchg implementation.
  */
-unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
-               genpool_algo_t algo, void *data)
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+               genpool_algo_t algo, void *data, void **owner)
 {
        struct gen_pool_chunk *chunk;
        unsigned long addr = 0;
@@ -299,6 +286,9 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
        BUG_ON(in_nmi());
 #endif
 
+       if (owner)
+               *owner = NULL;
+
        if (size == 0)
                return 0;
 
@@ -326,12 +316,14 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
                addr = chunk->start_addr + ((unsigned long)start_bit << order);
                size = nbits << order;
                atomic_long_sub(size, &chunk->avail);
+               if (owner)
+                       *owner = chunk->owner;
                break;
        }
        rcu_read_unlock();
        return addr;
 }
-EXPORT_SYMBOL(gen_pool_alloc_algo);
+EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
 
 /**
  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
@@ -367,12 +359,14 @@ EXPORT_SYMBOL(gen_pool_dma_alloc);
  * @pool: pool to free to
  * @addr: starting address of memory to free back to pool
  * @size: size in bytes of memory to free
+ * @owner: private data stashed at gen_pool_add() time
  *
  * Free previously allocated special memory back to the specified
  * pool.  Can not be used in NMI handler on architectures without
  * NMI-safe cmpxchg implementation.
  */
-void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
+               void **owner)
 {
        struct gen_pool_chunk *chunk;
        int order = pool->min_alloc_order;
@@ -382,6 +376,9 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
        BUG_ON(in_nmi());
 #endif
 
+       if (owner)
+               *owner = NULL;
+
        nbits = (size + (1UL << order) - 1) >> order;
        rcu_read_lock();
        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
@@ -392,6 +389,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
                        BUG_ON(remain);
                        size = nbits << order;
                        atomic_long_add(size, &chunk->avail);
+                       if (owner)
+                               *owner = chunk->owner;
                        rcu_read_unlock();
                        return;
                }
@@ -399,7 +398,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
        rcu_read_unlock();
        BUG();
 }
-EXPORT_SYMBOL(gen_pool_free);
+EXPORT_SYMBOL(gen_pool_free_owner);
 
 /**
  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
index e97dc54b4fdf3fae180c4f276814a5fa5cd1aafd..2d7d257a430e6d24c78f6193419f96578496126e 100644 (file)
@@ -12,7 +12,7 @@
 
 /* Exfiltration buffer. */
 #define MAX_VAR_SIZE   128
-static char check_buf[MAX_VAR_SIZE];
+static u8 check_buf[MAX_VAR_SIZE];
 
 /* Character array to trigger stack protector in all functions. */
 #define VAR_BUFFER      32
@@ -106,9 +106,18 @@ static noinline __init int test_ ## name (void)                    \
                                                                \
        /* Fill clone type with zero for per-field init. */     \
        memset(&zero, 0x00, sizeof(zero));                      \
+       /* Clear entire check buffer for 0xFF overlap test. */  \
+       memset(check_buf, 0x00, sizeof(check_buf));             \
        /* Fill stack with 0xFF. */                             \
        ignored = leaf_ ##name((unsigned long)&ignored, 1,      \
                                FETCH_ARG_ ## which(zero));     \
+       /* Verify all bytes overwritten with 0xFF. */           \
+       for (sum = 0, i = 0; i < target_size; i++)              \
+               sum += (check_buf[i] != 0xFF);                  \
+       if (sum) {                                              \
+               pr_err(#name ": leaf fill was not 0xFF!?\n");   \
+               return 1;                                       \
+       }                                                       \
        /* Clear entire check buffer for later bit tests. */    \
        memset(check_buf, 0x00, sizeof(check_buf));             \
        /* Extract stack-defined variable contents. */          \
@@ -126,9 +135,9 @@ static noinline __init int test_ ## name (void)                     \
                return 1;                                       \
        }                                                       \
                                                                \
-       /* Look for any set bits in the check region. */        \
-       for (i = 0; i < sizeof(check_buf); i++)                 \
-               sum += (check_buf[i] != 0);                     \
+       /* Look for any bytes still 0xFF in check region. */    \
+       for (sum = 0, i = 0; i < target_size; i++)              \
+               sum += (check_buf[i] == 0xFF);                  \
                                                                \
        if (sum == 0)                                           \
                pr_info(#name " ok\n");                         \
@@ -162,13 +171,13 @@ static noinline __init int leaf_ ## name(unsigned long sp,        \
         * Keep this buffer around to make sure we've got a     \
         * stack frame of SOME kind...                          \
         */                                                     \
-       memset(buf, (char)(sp && 0xff), sizeof(buf));           \
+       memset(buf, (char)(sp & 0xff), sizeof(buf));            \
        /* Fill variable with 0xFF. */                          \
        if (fill) {                                             \
                fill_start = &var;                              \
                fill_size = sizeof(var);                        \
                memset(fill_start,                              \
-                      (char)((sp && 0xff) | forced_mask),      \
+                      (char)((sp & 0xff) | forced_mask),       \
                       fill_size);                              \
        }                                                       \
                                                                \
index c5d840e34b282abe448568fd7882f9452d3e263f..f702a3895d05d828dba9e59198c0914f8b5f5a72 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1354,9 +1354,8 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref)
        complete(&devmem->completion);
 }
 
-static void hmm_devmem_ref_exit(void *data)
+static void hmm_devmem_ref_exit(struct percpu_ref *ref)
 {
-       struct percpu_ref *ref = data;
        struct hmm_devmem *devmem;
 
        devmem = container_of(ref, struct hmm_devmem, ref);
@@ -1433,10 +1432,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
        if (ret)
                return ERR_PTR(ret);
 
-       ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
-       if (ret)
-               return ERR_PTR(ret);
-
        size = ALIGN(size, PA_SECTION_SIZE);
        addr = min((unsigned long)iomem_resource.end,
                   (1UL << MAX_PHYSMEM_BITS) - 1);
@@ -1475,6 +1470,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
        devmem->pagemap.ref = &devmem->ref;
        devmem->pagemap.data = devmem;
        devmem->pagemap.kill = hmm_devmem_ref_kill;
+       devmem->pagemap.cleanup = hmm_devmem_ref_exit;
 
        result = devm_memremap_pages(devmem->device, &devmem->pagemap);
        if (IS_ERR(result))
@@ -1512,11 +1508,6 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
        if (ret)
                return ERR_PTR(ret);
 
-       ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
-                       &devmem->ref);
-       if (ret)
-               return ERR_PTR(ret);
-
        devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
        devmem->pfn_last = devmem->pfn_first +
                           (resource_size(devmem->resource) >> PAGE_SHIFT);
@@ -1529,6 +1520,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
        devmem->pagemap.ref = &devmem->ref;
        devmem->pagemap.data = devmem;
        devmem->pagemap.kill = hmm_devmem_ref_kill;
+       devmem->pagemap.cleanup = hmm_devmem_ref_exit;
 
        result = devm_memremap_pages(devmem->device, &devmem->pagemap);
        if (IS_ERR(result))
index a335f7c1fac428b30d7e352ebaf717e033724f74..0f741993800845399ef7a371c45f9e0a48d3bc86 100644 (file)
@@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm,
         * handled by the anon_vma lock + PG_lock.
         */
        down_write(&mm->mmap_sem);
+       result = SCAN_ANY_PROCESS;
+       if (!mmget_still_valid(mm))
+               goto out;
        result = hugepage_vma_revalidate(mm, address, &vma);
        if (result)
                goto out;
index e4709fdaa8e6af8a35114af54f5dfaa3436a5d1c..927d85be32f62deb58da1029603a01e3a04e61d9 100644 (file)
@@ -354,7 +354,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
        }
        return 0;
 fail:
-       __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
+       __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
        return -ENOMEM;
 }
 
index ca0bc6e6be1316602def0ac42bc4ffddef14cf08..ba9138a4a1de37f744eaf488fc0930b82f60b3e1 100644 (file)
@@ -691,11 +691,12 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
        if (mem_cgroup_disabled())
                return;
 
+       __this_cpu_add(memcg->vmstats_local->stat[idx], val);
+
        x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
        if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup *mi;
 
-               atomic_long_add(x, &memcg->vmstats_local[idx]);
                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                        atomic_long_add(x, &mi->vmstats[idx]);
                x = 0;
@@ -745,11 +746,12 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
        __mod_memcg_state(memcg, idx, val);
 
        /* Update lruvec */
+       __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
+
        x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
        if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup_per_node *pi;
 
-               atomic_long_add(x, &pn->lruvec_stat_local[idx]);
                for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
                        atomic_long_add(x, &pi->lruvec_stat[idx]);
                x = 0;
@@ -771,11 +773,12 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
        if (mem_cgroup_disabled())
                return;
 
+       __this_cpu_add(memcg->vmstats_local->events[idx], count);
+
        x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
        if (unlikely(x > MEMCG_CHARGE_BATCH)) {
                struct mem_cgroup *mi;
 
-               atomic_long_add(x, &memcg->vmevents_local[idx]);
                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                        atomic_long_add(x, &mi->vmevents[idx]);
                x = 0;
@@ -790,7 +793,12 @@ static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 
 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 {
-       return atomic_long_read(&memcg->vmevents_local[event]);
+       long x = 0;
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               x += per_cpu(memcg->vmstats_local->events[event], cpu);
+       return x;
 }
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
@@ -2191,11 +2199,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
                        long x;
 
                        x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
-                       if (x) {
-                               atomic_long_add(x, &memcg->vmstats_local[i]);
+                       if (x)
                                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                                        atomic_long_add(x, &memcg->vmstats[i]);
-                       }
 
                        if (i >= NR_VM_NODE_STAT_ITEMS)
                                continue;
@@ -2205,12 +2211,10 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
 
                                pn = mem_cgroup_nodeinfo(memcg, nid);
                                x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
-                               if (x) {
-                                       atomic_long_add(x, &pn->lruvec_stat_local[i]);
+                               if (x)
                                        do {
                                                atomic_long_add(x, &pn->lruvec_stat[i]);
                                        } while ((pn = parent_nodeinfo(pn, nid)));
-                               }
                        }
                }
 
@@ -2218,11 +2222,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
                        long x;
 
                        x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
-                       if (x) {
-                               atomic_long_add(x, &memcg->vmevents_local[i]);
+                       if (x)
                                for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
                                        atomic_long_add(x, &memcg->vmevents[i]);
-                       }
                }
        }
 
@@ -4483,8 +4485,15 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
        if (!pn)
                return 1;
 
+       pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
+       if (!pn->lruvec_stat_local) {
+               kfree(pn);
+               return 1;
+       }
+
        pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
        if (!pn->lruvec_stat_cpu) {
+               free_percpu(pn->lruvec_stat_local);
                kfree(pn);
                return 1;
        }
@@ -4506,6 +4515,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
                return;
 
        free_percpu(pn->lruvec_stat_cpu);
+       free_percpu(pn->lruvec_stat_local);
        kfree(pn);
 }
 
@@ -4516,6 +4526,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
        free_percpu(memcg->vmstats_percpu);
+       free_percpu(memcg->vmstats_local);
        kfree(memcg);
 }
 
@@ -4544,6 +4555,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (memcg->id.id < 0)
                goto fail;
 
+       memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
+       if (!memcg->vmstats_local)
+               goto fail;
+
        memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
        if (!memcg->vmstats_percpu)
                goto fail;
index 080f3b36415b2ec34999dbc3d7dc6ea8f876cddd..a90099da4fb41658a21ed290cfe7c8528273f5ea 100644 (file)
@@ -636,11 +636,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
  * is also counted.
  * Return value: previously mlocked page counts
  */
-static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
                unsigned long start, size_t len)
 {
        struct vm_area_struct *vma;
-       int count = 0;
+       unsigned long count = 0;
 
        if (mm == NULL)
                mm = current->mm;
@@ -797,7 +797,8 @@ SYSCALL_DEFINE1(mlockall, int, flags)
        unsigned long lock_limit;
        int ret;
 
-       if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
+       if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
+           flags == MCL_ONFAULT)
                return -EINVAL;
 
        if (!can_do_mlock())
index 99740e1dd27304b31097eb3deedc309a6c398490..8c943a6e1696c095d77ae65bb5d784cc09a85afd 100644 (file)
@@ -245,14 +245,28 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
 {
        /*
         * If there are parallel threads are doing PTE changes on same range
-        * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
-        * flush by batching, a thread has stable TLB entry can fail to flush
-        * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
-        * forcefully if we detect parallel PTE batching threads.
+        * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
+        * flush by batching, one thread may end up seeing inconsistent PTEs
+        * and result in having stale TLB entries.  So flush TLB forcefully
+        * if we detect parallel PTE batching threads.
+        *
+        * However, some syscalls, e.g. munmap(), may free page tables, this
+        * needs force flush everything in the given range. Otherwise this
+        * may result in having stale TLB entries for some architectures,
+        * e.g. aarch64, that could specify flush what level TLB.
         */
        if (mm_tlb_flush_nested(tlb->mm)) {
+               /*
+                * The aarch64 yields better performance with fullmm by
+                * avoiding multiple CPUs spamming TLBI messages at the
+                * same time.
+                *
+                * On x86 non-fullmm doesn't yield significant difference
+                * against fullmm.
+                */
+               tlb->fullmm = 1;
                __tlb_reset_range(tlb);
-               __tlb_adjust_range(tlb, start, end - start);
+               tlb->freed_tables = 1;
        }
 
        tlb_flush_mmu(tlb);
index 7350a124524bb4b203fff489c577586661cbf03d..4c9e150e5ad3d26356666457e2d9fb97f807c0a1 100644 (file)
@@ -2123,9 +2123,9 @@ static inline void set_area_direct_map(const struct vm_struct *area,
 /* Handle removing and resetting vm mappings related to the vm_struct. */
 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
 {
-       unsigned long addr = (unsigned long)area->addr;
        unsigned long start = ULONG_MAX, end = 0;
        int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
+       int flush_dmap = 0;
        int i;
 
        /*
@@ -2135,8 +2135,8 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
         * execute permissions, without leaving a RW+X window.
         */
        if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
-               set_memory_nx(addr, area->nr_pages);
-               set_memory_rw(addr, area->nr_pages);
+               set_memory_nx((unsigned long)area->addr, area->nr_pages);
+               set_memory_rw((unsigned long)area->addr, area->nr_pages);
        }
 
        remove_vm_area(area->addr);
@@ -2160,9 +2160,11 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
         * the vm_unmap_aliases() flush includes the direct map.
         */
        for (i = 0; i < area->nr_pages; i++) {
-               if (page_address(area->pages[i])) {
+               unsigned long addr = (unsigned long)page_address(area->pages[i]);
+               if (addr) {
                        start = min(addr, start);
-                       end = max(addr, end);
+                       end = max(addr + PAGE_SIZE, end);
+                       flush_dmap = 1;
                }
        }
 
@@ -2172,7 +2174,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
         * reset the direct map permissions to the default.
         */
        set_area_direct_map(area, set_direct_map_invalid_noflush);
-       _vm_unmap_aliases(start, end, 1);
+       _vm_unmap_aliases(start, end, flush_dmap);
        set_area_direct_map(area, set_direct_map_default_noflush);
 }
 
index 7acd0afdfc2a707843c21fa59e6a91a2f22f2a43..7889f583ced9fef1319cadf94a8e2ca7d9d4c40e 100644 (file)
@@ -1505,7 +1505,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 
        list_for_each_entry_safe(page, next, page_list, lru) {
                if (page_is_file_cache(page) && !PageDirty(page) &&
-                   !__PageMovable(page)) {
+                   !__PageMovable(page) && !PageUnevictable(page)) {
                        ClearPageActive(page);
                        list_move(&page->lru, &clean_pages);
                }
@@ -1953,8 +1953,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        if (global_reclaim(sc))
                __count_vm_events(item, nr_reclaimed);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
-       reclaim_stat->recent_rotated[0] = stat.nr_activate[0];
-       reclaim_stat->recent_rotated[1] = stat.nr_activate[1];
+       reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
+       reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
 
        move_pages_to_lru(lruvec, &page_list);
 
index 09fdd0aac4b907b926b3ecc70d3cdde207b73b12..b40e0bce67ead7d1dd36f435aa51bb9c53fa0e19 100644 (file)
@@ -426,9 +426,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
        }
 
        if (ax25->sk != NULL) {
+               local_bh_disable();
                bh_lock_sock(ax25->sk);
                sock_reset_flag(ax25->sk, SOCK_ZAPPED);
                bh_unlock_sock(ax25->sk);
+               local_bh_enable();
        }
 
 put:
index e8fd5dc1780aeb0a5b2f6b888052938c931913a5..80281ef2ccbd227fcdb6a2f482c70c35dd1c1def 100644 (file)
@@ -99,6 +99,7 @@ EXPORT_SYMBOL(can_ioctl);
 static void can_sock_destruct(struct sock *sk)
 {
        skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_error_queue);
 }
 
 static const struct can_proto *can_get_proto(int protocol)
@@ -952,6 +953,8 @@ static struct pernet_operations can_pernet_ops __read_mostly = {
 
 static __init int can_init(void)
 {
+       int err;
+
        /* check for correct padding to be able to use the structs similarly */
        BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
                     offsetof(struct canfd_frame, len) ||
@@ -965,15 +968,31 @@ static __init int can_init(void)
        if (!rcv_cache)
                return -ENOMEM;
 
-       register_pernet_subsys(&can_pernet_ops);
+       err = register_pernet_subsys(&can_pernet_ops);
+       if (err)
+               goto out_pernet;
 
        /* protocol register */
-       sock_register(&can_family_ops);
-       register_netdevice_notifier(&can_netdev_notifier);
+       err = sock_register(&can_family_ops);
+       if (err)
+               goto out_sock;
+       err = register_netdevice_notifier(&can_netdev_notifier);
+       if (err)
+               goto out_notifier;
+
        dev_add_pack(&can_packet);
        dev_add_pack(&canfd_packet);
 
        return 0;
+
+out_notifier:
+       sock_unregister(PF_CAN);
+out_sock:
+       unregister_pernet_subsys(&can_pernet_ops);
+out_pernet:
+       kmem_cache_destroy(rcv_cache);
+
+       return err;
 }
 
 static __exit void can_exit(void)
index cc9597a877707f3de6e7bbfdec3d3c7ae9f56fbc..d1c4e1f3be2c71385e5c4414b8c831e4b2e4a91d 100644 (file)
@@ -633,7 +633,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
                return ERR_PTR(-ENOMEM);
        bpf_map_init_from_attr(&smap->map, attr);
 
-       smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
+       /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
+       smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
        nbuckets = 1U << smap->bucket_log;
        smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
                                 GFP_USER | __GFP_NOWARN);
index eb7fb6daa1efcadf1723813161c14193785fbf83..d6edd218babdde1ee35da7c0823ddf90dd5ba7cb 100644 (file)
@@ -4923,8 +4923,36 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
        }
 
        if (unlikely(skb_vlan_tag_present(skb))) {
-               if (skb_vlan_tag_get_id(skb))
+check_vlan_id:
+               if (skb_vlan_tag_get_id(skb)) {
+                       /* Vlan id is non 0 and vlan_do_receive() above couldn't
+                        * find vlan device.
+                        */
                        skb->pkt_type = PACKET_OTHERHOST;
+               } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+                          skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+                       /* Outer header is 802.1P with vlan 0, inner header is
+                        * 802.1Q or 802.1AD and vlan_do_receive() above could
+                        * not find vlan dev for vlan id 0.
+                        */
+                       __vlan_hwaccel_clear_tag(skb);
+                       skb = skb_vlan_untag(skb);
+                       if (unlikely(!skb))
+                               goto out;
+                       if (vlan_do_receive(&skb))
+                               /* After stripping off 802.1P header with vlan 0
+                                * vlan dev is found for inner header.
+                                */
+                               goto another_round;
+                       else if (unlikely(!skb))
+                               goto out;
+                       else
+                               /* We have stripped outer 802.1P vlan 0 header.
+                                * But could not find vlan dev.
+                                * check again for vlan id to set OTHERHOST.
+                                */
+                               goto check_vlan_id;
+               }
                /* Note: we might in the future use prio bits
                 * and set skb->priority like in vlan_do_receive()
                 * For the time being, just ignore Priority Code Point
index d08b1e19ce9c1ea5913ad1a1949ee1033f0f1236..4d1011b2e24f5ebc2695d7aa7825564756d928f8 100644 (file)
@@ -3020,6 +3020,11 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
                        match->mask.vlan.vlan_id =
                                ntohs(ext_m_spec->vlan_tci) & 0x0fff;
 
+                       match->key.vlan.vlan_dei =
+                               !!(ext_h_spec->vlan_tci & htons(0x1000));
+                       match->mask.vlan.vlan_dei =
+                               !!(ext_m_spec->vlan_tci & htons(0x1000));
+
                        match->key.vlan.vlan_priority =
                                (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
                        match->mask.vlan.vlan_priority =
index cd09bf5d21f4d2de2afbf88489faf84907b8749c..f615e42cf4eff2336a699cf146be801a1efb1ef7 100644 (file)
@@ -5300,7 +5300,13 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        struct net *net;
        int sdif;
 
-       family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
+       if (len == sizeof(tuple->ipv4))
+               family = AF_INET;
+       else if (len == sizeof(tuple->ipv6))
+               family = AF_INET6;
+       else
+               return NULL;
+
        if (unlikely(family == AF_UNSPEC || flags ||
                     !((s32)netns_id < 0 || netns_id <= S32_MAX)))
                goto out;
@@ -5333,8 +5339,14 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
                                           ifindex, proto, netns_id, flags);
 
-       if (sk)
+       if (sk) {
                sk = sk_to_full_sk(sk);
+               if (!sk_fullsock(sk)) {
+                       if (!sock_flag(sk, SOCK_RCU_FREE))
+                               sock_gen_put(sk);
+                       return NULL;
+               }
+       }
 
        return sk;
 }
@@ -5365,8 +5377,14 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
                                         flags);
 
-       if (sk)
+       if (sk) {
                sk = sk_to_full_sk(sk);
+               if (!sk_fullsock(sk)) {
+                       if (!sock_flag(sk, SOCK_RCU_FREE))
+                               sock_gen_put(sk);
+                       return NULL;
+               }
+       }
 
        return sk;
 }
@@ -6726,6 +6744,7 @@ static bool sock_addr_is_valid_access(int off, int size,
                case BPF_CGROUP_INET4_BIND:
                case BPF_CGROUP_INET4_CONNECT:
                case BPF_CGROUP_UDP4_SENDMSG:
+               case BPF_CGROUP_UDP4_RECVMSG:
                        break;
                default:
                        return false;
@@ -6736,6 +6755,7 @@ static bool sock_addr_is_valid_access(int off, int size,
                case BPF_CGROUP_INET6_BIND:
                case BPF_CGROUP_INET6_CONNECT:
                case BPF_CGROUP_UDP6_SENDMSG:
+               case BPF_CGROUP_UDP6_RECVMSG:
                        break;
                default:
                        return false;
index 0e2c0735546396005406d3297f39bca58a9938cb..9e7fc929bc50914698d1af09525d4e87fd32b198 100644 (file)
@@ -3203,6 +3203,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
 }
 
 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+       __acquires(tbl->lock)
        __acquires(rcu_bh)
 {
        struct neigh_seq_state *state = seq->private;
@@ -3213,6 +3214,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
 
        rcu_read_lock_bh();
        state->nht = rcu_dereference_bh(tbl->nht);
+       read_lock(&tbl->lock);
 
        return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
 }
@@ -3246,8 +3248,13 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 EXPORT_SYMBOL(neigh_seq_next);
 
 void neigh_seq_stop(struct seq_file *seq, void *v)
+       __releases(tbl->lock)
        __releases(rcu_bh)
 {
+       struct neigh_seq_state *state = seq->private;
+       struct neigh_table *tbl = state->tbl;
+
+       read_unlock(&tbl->lock);
        rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(neigh_seq_stop);
index 47c1aa9ee0454a4d6b4034e51f49fcaac8b2ac1a..c8cd99c3603f7874a9f8b5841d4117ba4ec4e5f2 100644 (file)
@@ -2337,6 +2337,7 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
                kv.iov_base = skb->data + offset;
                kv.iov_len = slen;
                memset(&msg, 0, sizeof(msg));
+               msg.msg_flags = MSG_DONTWAIT;
 
                ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
                if (ret <= 0)
index 2b3701958486219a26385c8fca1498c4e294dc1d..af09a23e48226337ebe0726c4358ee543c0e81b6 100644 (file)
@@ -1850,6 +1850,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                        goto out;
                }
                RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
+#ifdef CONFIG_BPF_SYSCALL
+               RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
+#endif
 
                newsk->sk_err      = 0;
                newsk->sk_err_soft = 0;
@@ -2320,6 +2323,7 @@ static void sk_leave_memory_pressure(struct sock *sk)
 
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER    get_order(32768)
+DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
 
 /**
  * skb_page_frag_refill - check that a page_frag contains enough room
@@ -2344,7 +2348,8 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
        }
 
        pfrag->offset = 0;
-       if (SKB_FRAG_PAGE_ORDER) {
+       if (SKB_FRAG_PAGE_ORDER &&
+           !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
                /* Avoid direct reclaim but allow kswapd to wake */
                pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
                                          __GFP_COMP | __GFP_NOWARN |
index 1a2685694abd537d7ae304754b84b237928fd298..f9204719aeeeb4700582d03fda244f2f8961f8a7 100644 (file)
@@ -562,6 +562,13 @@ static struct ctl_table net_core_table[] = {
                .extra1         = &zero,
                .extra2         = &two,
        },
+       {
+               .procname       = "high_order_alloc_disable",
+               .data           = &net_high_order_alloc_disable_key.key,
+               .maxlen         = sizeof(net_high_order_alloc_disable_key),
+               .mode           = 0644,
+               .proc_handler   = proc_do_static_key,
+       },
        { }
 };
 
index b80410673915094255824d4e4b5fbdad292fd4c8..bfa49a88d03ae570261646730eed24d4cc27e36c 100644 (file)
@@ -964,7 +964,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
 {
        struct net_device *dev;
        struct fib_result res;
-       int err;
+       int err = 0;
 
        if (nh->fib_nh_flags & RTNH_F_ONLINK) {
                unsigned int addr_type;
index 8c9189a41b136c6f170e146f8a425a7041efc5f8..16f9159234a2014491fdd7f7371d6cb06b978adc 100644 (file)
@@ -918,7 +918,7 @@ static int __ip_append_data(struct sock *sk,
                uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
                if (!uarg)
                        return -ENOBUFS;
-               extra_uref = !skb;      /* only extra ref if !MSG_MORE */
+               extra_uref = !skb_zcopy(skb);   /* only ref on new uarg */
                if (rt->dst.dev->features & NETIF_F_SG &&
                    csummode == CHECKSUM_PARTIAL) {
                        paged = true;
index 4370f4246e86dfe06a9e07cace848baeaf6cc4da..073273b751f8fcda1c9c79cd1ab566f2939b2517 100644 (file)
@@ -287,6 +287,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
        SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP),
        SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
+       SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
        SNMP_MIB_SENTINEL
 };
 
index 875867b64d6a6597bf4fcd3498ed55741cbe33f7..b6f14af926faf80f1686549bee7154c584dc63e6 100644 (file)
@@ -39,6 +39,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
 static int tcp_adv_win_scale_min = -31;
 static int tcp_adv_win_scale_max = 31;
+static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
+static int tcp_min_snd_mss_max = 65535;
 static int ip_privileged_port_min;
 static int ip_privileged_port_max = 65535;
 static int ip_ttl_min = 1;
@@ -559,6 +561,18 @@ static struct ctl_table ipv4_table[] = {
                .extra1         = &sysctl_fib_sync_mem_min,
                .extra2         = &sysctl_fib_sync_mem_max,
        },
+       {
+               .procname       = "tcp_rx_skb_cache",
+               .data           = &tcp_rx_skb_cache_key.key,
+               .mode           = 0644,
+               .proc_handler   = proc_do_static_key,
+       },
+       {
+               .procname       = "tcp_tx_skb_cache",
+               .data           = &tcp_tx_skb_cache_key.key,
+               .mode           = 0644,
+               .proc_handler   = proc_do_static_key,
+       },
        { }
 };
 
@@ -757,6 +771,15 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "tcp_min_snd_mss",
+               .data           = &init_net.ipv4.sysctl_tcp_min_snd_mss,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &tcp_min_snd_mss_min,
+               .extra2         = &tcp_min_snd_mss_max,
+       },
        {
                .procname       = "tcp_probe_threshold",
                .data           = &init_net.ipv4.sysctl_tcp_probe_threshold,
index f12d500ec85cf770b0b94cb5e08d16f77e4c126b..7dc9ab84bb69aa90953e98f9763287fcee3a1659 100644 (file)
@@ -317,6 +317,11 @@ struct tcp_splice_state {
 unsigned long tcp_memory_pressure __read_mostly;
 EXPORT_SYMBOL_GPL(tcp_memory_pressure);
 
+DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
+EXPORT_SYMBOL(tcp_rx_skb_cache_key);
+
+DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
+
 void tcp_enter_memory_pressure(struct sock *sk)
 {
        unsigned long val;
@@ -3868,6 +3873,7 @@ void __init tcp_init(void)
        unsigned long limit;
        unsigned int i;
 
+       BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
                     FIELD_SIZEOF(struct sk_buff, cb));
 
index 08a477e74cf3267b725294c66b46fdad12bd2b72..d95ee40df6c2b020d590018bc41833b8a6aefa4a 100644 (file)
@@ -1302,7 +1302,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
        TCP_SKB_CB(skb)->seq += shifted;
 
        tcp_skb_pcount_add(prev, pcount);
-       BUG_ON(tcp_skb_pcount(skb) < pcount);
+       WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
        tcp_skb_pcount_add(skb, -pcount);
 
        /* When we're adding to gso_segs == 1, gso_size will be zero,
@@ -1368,6 +1368,21 @@ static int skb_can_shift(const struct sk_buff *skb)
        return !skb_headlen(skb) && skb_is_nonlinear(skb);
 }
 
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
+                 int pcount, int shiftlen)
+{
+       /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
+        * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
+        * to make sure not storing more than 65535 * 8 bytes per skb,
+        * even if current MSS is bigger.
+        */
+       if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
+               return 0;
+       if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
+               return 0;
+       return skb_shift(to, from, shiftlen);
+}
+
 /* Try collapsing SACK blocks spanning across multiple skbs to a single
  * skb.
  */
@@ -1473,7 +1488,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
        if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
                goto fallback;
 
-       if (!skb_shift(prev, skb, len))
+       if (!tcp_skb_shift(prev, skb, pcount, len))
                goto fallback;
        if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
                goto out;
@@ -1491,11 +1506,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
                goto out;
 
        len = skb->len;
-       if (skb_shift(prev, skb, len)) {
-               pcount += tcp_skb_pcount(skb);
-               tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
+       pcount = tcp_skb_pcount(skb);
+       if (tcp_skb_shift(prev, skb, pcount, len))
+               tcp_shifted_skb(sk, prev, skb, state, pcount,
                                len, mss, 0);
-       }
 
 out:
        return prev;
@@ -2648,7 +2662,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
 
-       if ((flag & FLAG_SND_UNA_ADVANCED) &&
+       if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
            tcp_try_undo_loss(sk, false))
                return;
 
index bc86f9735f4577d50d94f42b10edb6ba95bb7a05..cfa81190a1b1af30d05f4f6cd84c05b025a6afeb 100644 (file)
@@ -2628,6 +2628,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_ecn_fallback = 1;
 
        net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+       net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
        net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
        net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
 
index f429e856e2631a9e6de1d2e060406742f97e538e..00c01a01b547ec67c971dc25a74c9258563cf871 100644 (file)
@@ -1296,6 +1296,11 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
        if (nsize < 0)
                nsize = 0;
 
+       if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+               return -ENOMEM;
+       }
+
        if (skb_unclone(skb, gfp))
                return -ENOMEM;
 
@@ -1454,8 +1459,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
        mss_now -= icsk->icsk_ext_hdr_len;
 
        /* Then reserve room for full set of TCP options and 8 bytes of data */
-       if (mss_now < 48)
-               mss_now = 48;
+       mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
        return mss_now;
 }
 
@@ -2747,7 +2751,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
                if (next_skb_size <= skb_availroom(skb))
                        skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
                                      next_skb_size);
-               else if (!skb_shift(skb, next_skb, next_skb_size))
+               else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
                        return false;
        }
        tcp_highest_sack_replace(sk, next_skb, skb);
index 5bad937ce779ef8dca42a26dcbb5f1d60a571c73..c801cd37cc2a9c11f2dd4b9681137755e501a538 100644 (file)
@@ -155,6 +155,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
                mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
                mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
                mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len);
+               mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
                icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
        }
        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
index 7c6228fbf5dd8852f54da1d220ba26755d4695f2..eed59c8477228a72109718a7f35ef806ba6af826 100644 (file)
@@ -498,7 +498,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
                                 __be16 sport, __be16 dport)
 {
-       return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
+       const struct iphdr *iph = ip_hdr(skb);
+
+       return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
+                                iph->daddr, dport, inet_iif(skb),
+                                inet_sdif(skb), &udp_table, NULL);
 }
 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
 
@@ -1773,6 +1777,10 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
                sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
                memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
                *addr_len = sizeof(*sin);
+
+               if (cgroup_bpf_enabled)
+                       BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
+                                                       (struct sockaddr *)sin);
        }
 
        if (udp_sk(sk)->gro_enabled)
index bafdd04a768d16b7c862db3ac84ad1633deafc09..375b4b4f9bf5dde6fee9f4fc2a9028738498504e 100644 (file)
@@ -393,23 +393,28 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
        return ERR_PTR(err);
 }
 
-static int icmp6_iif(const struct sk_buff *skb)
+static struct net_device *icmp6_dev(const struct sk_buff *skb)
 {
-       int iif = skb->dev->ifindex;
+       struct net_device *dev = skb->dev;
 
        /* for local traffic to local address, skb dev is the loopback
         * device. Check if there is a dst attached to the skb and if so
         * get the real device index. Same is needed for replies to a link
         * local address on a device enslaved to an L3 master device
         */
-       if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
+       if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
                const struct rt6_info *rt6 = skb_rt6_info(skb);
 
                if (rt6)
-                       iif = rt6->rt6i_idev->dev->ifindex;
+                       dev = rt6->rt6i_idev->dev;
        }
 
-       return iif;
+       return dev;
+}
+
+static int icmp6_iif(const struct sk_buff *skb)
+{
+       return icmp6_dev(skb)->ifindex;
 }
 
 /*
@@ -810,7 +815,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
 static int icmpv6_rcv(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb->dev);
-       struct net_device *dev = skb->dev;
+       struct net_device *dev = icmp6_dev(skb);
        struct inet6_dev *idev = __in6_dev_get(dev);
        const struct in6_addr *saddr, *daddr;
        struct icmp6hdr *hdr;
index 2f3eb7dc45daa5012b4b29f34b0542e00c60d135..545e339b8c4fb5a372d4193361b4f5c9f9dd3b0b 100644 (file)
@@ -250,9 +250,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
        rcu_read_lock_bh();
        for_each_sk_fl_rcu(np, sfl) {
                struct ip6_flowlabel *fl = sfl->fl;
-               if (fl->label == label) {
+
+               if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
                        fl->lastuse = jiffies;
-                       atomic_inc(&fl->users);
                        rcu_read_unlock_bh();
                        return fl;
                }
@@ -618,7 +618,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                                                goto done;
                                        }
                                        fl1 = sfl->fl;
-                                       atomic_inc(&fl1->users);
+                                       if (!atomic_inc_not_zero(&fl1->users))
+                                               fl1 = NULL;
                                        break;
                                }
                        }
index 934c88f128abbd22bb994d56a018da1748ff3a16..834475717110ecc53aa5bd2e3f275f2e8980fb05 100644 (file)
@@ -1340,7 +1340,7 @@ static int __ip6_append_data(struct sock *sk,
                uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
                if (!uarg)
                        return -ENOBUFS;
-               extra_uref = !skb;      /* only extra ref if !MSG_MORE */
+               extra_uref = !skb_zcopy(skb);   /* only ref on new uarg */
                if (rt->dst.dev->features & NETIF_F_SG &&
                    csummode == CHECKSUM_PARTIAL) {
                        paged = true;
index 22369694b2fbfff807211a22d1c06e02eb5093c2..b2b2c0c38b870515085b23e39e7c5c51707bf472 100644 (file)
@@ -298,7 +298,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
                           skb_network_header_len(skb));
 
        rcu_read_lock();
-       __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+       __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMOKS);
        rcu_read_unlock();
        fq->q.rb_fragments = RB_ROOT;
        fq->q.fragments_tail = NULL;
@@ -312,7 +312,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
        net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
 out_fail:
        rcu_read_lock();
-       __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+       __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMFAILS);
        rcu_read_unlock();
        inet_frag_kill(&fq->q);
        return -1;
index b3418a7c5c748275047b9a7bf53b78d80322568a..70b01bd950227897d69a397c63f53a937efeaccc 100644 (file)
@@ -239,7 +239,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 
        return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
                                 &iph->daddr, dport, inet6_iif(skb),
-                                inet6_sdif(skb), &udp_table, skb);
+                                inet6_sdif(skb), &udp_table, NULL);
 }
 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
 
@@ -365,6 +365,10 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                                                    inet6_iif(skb));
                }
                *addr_len = sizeof(*sin6);
+
+               if (cgroup_bpf_enabled)
+                       BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
+                                               (struct sockaddr *)sin6);
        }
 
        if (udp_sk(sk)->gro_enabled)
@@ -511,7 +515,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct net *net = dev_net(skb->dev);
 
        sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
-                              inet6_iif(skb), inet6_sdif(skb), udptable, skb);
+                              inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
        if (!sk) {
                /* No socket for error: try tunnels before discarding */
                sk = ERR_PTR(-ENOENT);
index 03f0cd872dcec1596b413e41df89f3ee256bf153..5d2d1f746b9176b452487657a2dc164911411cf9 100644 (file)
@@ -177,6 +177,7 @@ int lapb_unregister(struct net_device *dev)
        lapb = __lapb_devtostruct(dev);
        if (!lapb)
                goto out;
+       lapb_put(lapb);
 
        lapb_stop_t1timer(lapb);
        lapb_stop_t2timer(lapb);
index 073a8235ae1bd8cfcd28640a31bbb66bb4d22f96..a86fcae279a64911ae22d8d3075e282c8e1e4afe 100644 (file)
@@ -1435,7 +1435,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 
-       if (WARN_ON(!chanctx_conf)) {
+       if (WARN_ON_ONCE(!chanctx_conf)) {
                rcu_read_unlock();
                return NULL;
        }
@@ -2037,6 +2037,13 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
 
 static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
 {
+       /*
+        * It's unsafe to try to do any work during reconfigure flow.
+        * When the flow ends the work will be requeued.
+        */
+       if (local->in_reconfig)
+               return false;
+
        /*
         * If quiescing is set, we are racing with __ieee80211_suspend.
         * __ieee80211_suspend flushes the workers after setting quiescing,
@@ -2225,6 +2232,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
                                          const u8 *addr);
 void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
 void ieee80211_tdls_chsw_work(struct work_struct *wk);
+void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
+                                     const u8 *peer, u16 reason);
+const char *ieee80211_get_reason_code_string(u16 reason_code);
 
 extern const struct ethtool_ops ieee80211_ethtool_ops;
 
index 20bf9db7a3886de7601fabd0e0ff2c721e54e30e..89f09a09efdb7c6a3bd13bf91f1e82c9186e766e 100644 (file)
@@ -268,11 +268,9 @@ int ieee80211_set_tx_key(struct ieee80211_key *key)
 {
        struct sta_info *sta = key->sta;
        struct ieee80211_local *local = key->local;
-       struct ieee80211_key *old;
 
        assert_key_lock(local);
 
-       old = key_mtx_dereference(local, sta->ptk[sta->ptk_idx]);
        sta->ptk_idx = key->conf.keyidx;
        ieee80211_check_fast_xmit(sta);
 
index 766e5e5bab8a51958a60cf909ffcca26e09b1006..fe44f0d98de02c3a22f03bab725e3905f95dd4f5 100644 (file)
@@ -929,6 +929,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
 
        /* flush STAs and mpaths on this iface */
        sta_info_flush(sdata);
+       ieee80211_free_keys(sdata, true);
        mesh_path_flush_by_iface(sdata);
 
        /* stop the beacon */
@@ -1220,7 +1221,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
        ifmsh->chsw_ttl = 0;
 
        /* Remove the CSA and MCSP elements from the beacon */
-       tmp_csa_settings = rcu_dereference(ifmsh->csa);
+       tmp_csa_settings = rcu_dereference_protected(ifmsh->csa,
+                                           lockdep_is_held(&sdata->wdev.mtx));
        RCU_INIT_POINTER(ifmsh->csa, NULL);
        if (tmp_csa_settings)
                kfree_rcu(tmp_csa_settings, rcu_head);
@@ -1242,6 +1244,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
        struct mesh_csa_settings *tmp_csa_settings;
        int ret = 0;
 
+       lockdep_assert_held(&sdata->wdev.mtx);
+
        tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
                                   GFP_ATOMIC);
        if (!tmp_csa_settings)
index b7a9fe3d5fcb75dfb15e27c536d19066993b0c6d..383b0df100e4b974f7a888b509f1973704a7816a 100644 (file)
@@ -2963,7 +2963,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
 #define case_WLAN(type) \
        case WLAN_REASON_##type: return #type
 
-static const char *ieee80211_get_reason_code_string(u16 reason_code)
+const char *ieee80211_get_reason_code_string(u16 reason_code)
 {
        switch (reason_code) {
        case_WLAN(UNSPECIFIED);
@@ -3028,6 +3028,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
        if (len < 24 + 2)
                return;
 
+       if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
+               ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
+               return;
+       }
+
        if (ifmgd->associated &&
            ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
                const u8 *bssid = ifmgd->associated->bssid;
@@ -3077,6 +3082,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
 
        reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
 
+       if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
+               ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
+               return;
+       }
+
        sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
                   mgmt->sa, reason_code,
                   ieee80211_get_reason_code_string(reason_code));
index 25577ede2986ec5616a94501145a9734ec402521..fd3740000e877064360e9d3de7bcf8d8a76ca4cb 100644 (file)
@@ -3831,6 +3831,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
        case NL80211_IFTYPE_STATION:
                if (!bssid && !sdata->u.mgd.use_4addr)
                        return false;
+               if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
+                       return false;
                if (multicast)
                        return true;
                return ether_addr_equal(sdata->vif.addr, hdr->addr1);
index ca97e1598c28fe70f99f04e59e87ff2e28b66484..fca1f54773965ad36862ed148e667ef6856957a0 100644 (file)
@@ -1993,3 +1993,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk)
        }
        rtnl_unlock();
 }
+
+void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
+                                     const u8 *peer, u16 reason)
+{
+       struct ieee80211_sta *sta;
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(&sdata->vif, peer);
+       if (!sta || !sta->tdls) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n",
+                peer, reason,
+                ieee80211_get_reason_code_string(reason));
+
+       ieee80211_tdls_oper_request(&sdata->vif, peer,
+                                   NL80211_TDLS_TEARDOWN,
+                                   WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
+                                   GFP_ATOMIC);
+}
index cba4633cd6cfd02a838450bad524465579b18f8d..e2edc2a3cc8bae2fff296ab3190a58f218391386 100644 (file)
@@ -2480,6 +2480,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_lock(&local->mtx);
                ieee80211_start_next_roc(local);
                mutex_unlock(&local->mtx);
+
+               /* Requeue all works */
+               list_for_each_entry(sdata, &local->interfaces, list)
+                       ieee80211_queue_work(&local->hw, &sdata->work);
        }
 
        ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
@@ -3795,7 +3799,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
        }
 
        /* Always allow software iftypes */
-       if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
+       if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
+           (iftype == NL80211_IFTYPE_AP_VLAN &&
+            local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
                if (radar_detect)
                        return -EINVAL;
                return 0;
index 58d0b258b684cdd8124266e1b392c4cd77fec5a7..5dd48f0a4b1bd1d5eb24489f72a126a146639f4d 100644 (file)
@@ -1175,7 +1175,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_key *key = rx->key;
        struct ieee80211_mmie_16 *mmie;
-       u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
+       u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
        if (!ieee80211_is_mgmt(hdr->frame_control))
@@ -1206,13 +1206,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
                memcpy(nonce, hdr->addr2, ETH_ALEN);
                memcpy(nonce + ETH_ALEN, ipn, 6);
 
+               mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
+               if (!mic)
+                       return RX_DROP_UNUSABLE;
                if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
                                       skb->data + 24, skb->len - 24,
                                       mic) < 0 ||
                    crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
                        key->u.aes_gmac.icverrors++;
+                       kfree(mic);
                        return RX_DROP_UNUSABLE;
                }
+               kfree(mic);
        }
 
        memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
index d9391beea980d0d262f7917d5db3acfac537b7f7..d1ad69b7942ac2b369efd7c363f8034d3ed0e985 100644 (file)
@@ -26,6 +26,7 @@ config NET_MPLS_GSO
 config MPLS_ROUTING
        tristate "MPLS: routing support"
        depends on NET_IP_TUNNEL || NET_IP_TUNNEL=n
+       depends on PROC_SYSCTL
        ---help---
         Add support for forwarding of mpls packets.
 
index 50059613076066e4cf3d901be05506b6370de7c3..d25e91d7bdc18526301c6228f5943aec7f9becc8 100644 (file)
@@ -23,7 +23,7 @@
 #include "internal.h"
 
 static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
-       [MPLS_IPTUNNEL_DST]     = { .type = NLA_U32 },
+       [MPLS_IPTUNNEL_DST]     = { .len = sizeof(u32) },
        [MPLS_IPTUNNEL_TTL]     = { .type = NLA_U8 },
 };
 
index 1180b3e58a0abe940e6fdba4fc887ed4b4b80a7b..ea64c90b14e8c8c5385c8357b69efe1a1eac9d4c 100644 (file)
@@ -911,7 +911,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb,
        u32 device_idx, target_idx;
        int rc;
 
-       if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+           !info->attrs[NFC_ATTR_TARGET_INDEX])
                return -EINVAL;
 
        device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
index 758bb1da31dfeb44a1f4151ba148c30cf9839e21..d2437b5b2f6ad093c0738e48ebe9261ead757e13 100644 (file)
@@ -157,7 +157,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 {
        struct vport *vport;
        struct internal_dev *internal_dev;
+       struct net_device *dev;
        int err;
+       bool free_vport = true;
 
        vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
        if (IS_ERR(vport)) {
@@ -165,8 +167,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
                goto error;
        }
 
-       vport->dev = alloc_netdev(sizeof(struct internal_dev),
-                                 parms->name, NET_NAME_USER, do_setup);
+       dev = alloc_netdev(sizeof(struct internal_dev),
+                          parms->name, NET_NAME_USER, do_setup);
+       vport->dev = dev;
        if (!vport->dev) {
                err = -ENOMEM;
                goto error_free_vport;
@@ -187,8 +190,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
        rtnl_lock();
        err = register_netdevice(vport->dev);
-       if (err)
+       if (err) {
+               free_vport = false;
                goto error_unlock;
+       }
 
        dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
@@ -198,11 +203,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
 error_unlock:
        rtnl_unlock();
-       free_percpu(vport->dev->tstats);
+       free_percpu(dev->tstats);
 error_free_netdev:
-       free_netdev(vport->dev);
+       free_netdev(dev);
 error_free_vport:
-       ovs_vport_free(vport);
+       if (free_vport)
+               ovs_vport_free(vport);
 error:
        return ERR_PTR(err);
 }
index c388372df0e2f9f77b1e51dd2eb3c75138186a29..eedd5786c0844a363c081391b5afc679c89b0843 100644 (file)
@@ -320,10 +320,13 @@ static int fl_init(struct tcf_proto *tp)
        return rhashtable_init(&head->ht, &mask_ht_params);
 }
 
-static void fl_mask_free(struct fl_flow_mask *mask)
+static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
 {
-       WARN_ON(!list_empty(&mask->filters));
-       rhashtable_destroy(&mask->ht);
+       /* temporary masks don't have their filters list and ht initialized */
+       if (mask_init_done) {
+               WARN_ON(!list_empty(&mask->filters));
+               rhashtable_destroy(&mask->ht);
+       }
        kfree(mask);
 }
 
@@ -332,7 +335,15 @@ static void fl_mask_free_work(struct work_struct *work)
        struct fl_flow_mask *mask = container_of(to_rcu_work(work),
                                                 struct fl_flow_mask, rwork);
 
-       fl_mask_free(mask);
+       fl_mask_free(mask, true);
+}
+
+static void fl_uninit_mask_free_work(struct work_struct *work)
+{
+       struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+                                                struct fl_flow_mask, rwork);
+
+       fl_mask_free(mask, false);
 }
 
 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
@@ -1346,9 +1357,6 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
        if (err)
                goto errout_destroy;
 
-       /* Wait until any potential concurrent users of mask are finished */
-       synchronize_rcu();
-
        spin_lock(&head->masks_lock);
        list_add_tail_rcu(&newmask->list, &head->masks);
        spin_unlock(&head->masks_lock);
@@ -1375,11 +1383,7 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
 
        /* Insert mask as temporary node to prevent concurrent creation of mask
         * with same key. Any concurrent lookups with same key will return
-        * -EAGAIN because mask's refcnt is zero. It is safe to insert
-        * stack-allocated 'mask' to masks hash table because we call
-        * synchronize_rcu() before returning from this function (either in case
-        * of error or after replacing it with heap-allocated mask in
-        * fl_create_new_mask()).
+        * -EAGAIN because mask's refcnt is zero.
         */
        fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
                                                       &mask->ht_node,
@@ -1414,8 +1418,6 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
 errout_cleanup:
        rhashtable_remove_fast(&head->ht, &mask->ht_node,
                               mask_ht_params);
-       /* Wait until any potential concurrent users of mask are finished */
-       synchronize_rcu();
        return ret;
 }
 
@@ -1644,7 +1646,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        *arg = fnew;
 
        kfree(tb);
-       kfree(mask);
+       tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
        return 0;
 
 errout_ht:
@@ -1664,7 +1666,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 errout_tb:
        kfree(tb);
 errout_mask_alloc:
-       kfree(mask);
+       tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
 errout_fold:
        if (fold)
                __fl_put(fold);
index f17908f5c4f3c52c21614e24316f2e17c6fa1742..9b0e5b0d701ad9255327897b119634e1939276c7 100644 (file)
@@ -2583,6 +2583,8 @@ static int sctp_process_param(struct sctp_association *asoc,
        case SCTP_PARAM_STATE_COOKIE:
                asoc->peer.cookie_len =
                        ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
+               if (asoc->peer.cookie)
+                       kfree(asoc->peer.cookie);
                asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
                if (!asoc->peer.cookie)
                        retval = 0;
@@ -2647,6 +2649,8 @@ static int sctp_process_param(struct sctp_association *asoc,
                        goto fall_through;
 
                /* Save peer's random parameter */
+               if (asoc->peer.peer_random)
+                       kfree(asoc->peer.peer_random);
                asoc->peer.peer_random = kmemdup(param.p,
                                            ntohs(param.p->length), gfp);
                if (!asoc->peer.peer_random) {
@@ -2660,6 +2664,8 @@ static int sctp_process_param(struct sctp_association *asoc,
                        goto fall_through;
 
                /* Save peer's HMAC list */
+               if (asoc->peer.peer_hmacs)
+                       kfree(asoc->peer.peer_hmacs);
                asoc->peer.peer_hmacs = kmemdup(param.p,
                                            ntohs(param.p->length), gfp);
                if (!asoc->peer.peer_hmacs) {
@@ -2675,6 +2681,8 @@ static int sctp_process_param(struct sctp_association *asoc,
                if (!ep->auth_enable)
                        goto fall_through;
 
+               if (asoc->peer.peer_chunks)
+                       kfree(asoc->peer.peer_chunks);
                asoc->peer.peer_chunks = kmemdup(param.p,
                                            ntohs(param.p->length), gfp);
                if (!asoc->peer.peer_chunks)
index 992be6113676988291157fb81c65b5ad605e522b..5f98d38bcf082742ef98b22da4b9a6074df4050f 100644 (file)
@@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
 
        rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
                tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
+               __skb_queue_purge(&m->deferredq);
                list_del(&m->list);
                kfree(m);
        }
index 960494f437ac34e27b2c8a55575003e273f9e1fc..455a782c7658f6de2d23a7f8978c63eda819d552 100644 (file)
@@ -1143,7 +1143,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
 
                full_record = false;
                record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
-               copied = 0;
                copy = size;
                if (copy >= record_room) {
                        copy = record_room;
index 0a6fe064994529e1f8e5be4d48db494f8f0bd89b..fb2df6e068fa6accf7e678f42ab10620578a2c53 100644 (file)
@@ -335,8 +335,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 
        struct sockaddr_vm addr;
        struct sock *sk, *new = NULL;
-       struct vsock_sock *vnew;
-       struct hvsock *hvs, *hvs_new;
+       struct vsock_sock *vnew = NULL;
+       struct hvsock *hvs, *hvs_new = NULL;
        int ret;
 
        if_type = &chan->offermsg.offer.if_type;
index f3f3d06cb6d8f69f9d4b082c07c7ddd488c30a1f..e30f53728725d1c5e80d22cfccaea4ffb17474a9 100644 (file)
@@ -871,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk,
                if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
                        vsk->peer_shutdown |= SEND_SHUTDOWN;
                if (vsk->peer_shutdown == SHUTDOWN_MASK &&
-                   vsock_stream_has_data(vsk) <= 0)
+                   vsock_stream_has_data(vsk) <= 0) {
+                       sock_set_flag(sk, SOCK_DONE);
                        sk->sk_state = TCP_CLOSING;
+               }
                if (le32_to_cpu(pkt->hdr.flags))
                        sk->sk_state_change(sk);
                break;
index 72a224ce8e0a834d04d99c276de80a6956be9797..2eee93985ab0df8f9bbdcd2d4e146c34ac3899f5 100644 (file)
@@ -39,6 +39,7 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
        @(set -e; \
          allf=""; \
          for f in $^ ; do \
+             test -f $$f || continue;\
              # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
              thisf=$$(od -An -v -tx1 < $$f | \
                           sed -e 's/ /\n/g' | \
index 037816163e70d3a9d1e06498ed0f5bc6bcb3cb22..53ad3dbb76fe554089d6d3bf38b398ad64bafb79 100644 (file)
@@ -514,7 +514,7 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
                                   &rdev->rfkill_ops, rdev);
 
        if (!rdev->rfkill) {
-               kfree(rdev);
+               wiphy_free(&rdev->wiphy);
                return NULL;
        }
 
@@ -1397,8 +1397,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                }
                break;
        case NETDEV_PRE_UP:
-               if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
+               if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
+                   !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
+                     rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
+                     wdev->use_4addr))
                        return notifier_from_errno(-EOPNOTSUPP);
+
                if (rfkill_blocked(rdev->rfkill))
                        return notifier_from_errno(-ERFKILL);
                break;
index c391b560d98631c222f3a03563f84fad5e8ac8f5..520d437aa8d15539f081f3964c31c3c73dec6519 100644 (file)
@@ -304,8 +304,11 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
        [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
 
-       [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
-       [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
+       [NL80211_ATTR_MAC] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN },
+       [NL80211_ATTR_PREV_BSSID] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
 
        [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
        [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@ -356,7 +359,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
        [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
 
-       [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
+       [NL80211_ATTR_HT_CAPABILITY] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = NL80211_HT_CAPABILITY_LEN
+       },
 
        [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
        [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
@@ -386,7 +392,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
        [NL80211_ATTR_PID] = { .type = NLA_U32 },
        [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
-       [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
+       [NL80211_ATTR_PMKID] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = WLAN_PMKID_LEN
+       },
        [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
        [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
        [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -448,7 +457,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WDEV] = { .type = NLA_U64 },
        [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
        [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
-       [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
+       [NL80211_ATTR_VHT_CAPABILITY] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = NL80211_VHT_CAPABILITY_LEN
+       },
        [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
        [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127),
        [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1),
@@ -484,7 +496,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
        [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
                                   .len = IEEE80211_QOS_MAP_LEN_MAX },
-       [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
+       [NL80211_ATTR_MAC_HINT] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
        [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
        [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
        [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG },
@@ -495,7 +510,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
        [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
        [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
-       [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
+       [NL80211_ATTR_MAC_MASK] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
        [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
        [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
        [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
@@ -507,15 +525,21 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MU_MIMO_GROUP_DATA] = {
                .len = VHT_MUMIMO_GROUPS_DATA_LEN
        },
-       [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
+       [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
        [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1),
        [NL80211_ATTR_BANDS] = { .type = NLA_U32 },
        [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
        [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
                                    .len = FILS_MAX_KEK_LEN },
-       [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
+       [NL80211_ATTR_FILS_NONCES] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = 2 * FILS_NONCE_LEN
+       },
        [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
-       [NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+       [NL80211_ATTR_BSSID] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN },
        [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
        [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
                .len = sizeof(struct nl80211_bss_select_rssi_adjust)
@@ -528,7 +552,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
        [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
                                        .len = FILS_ERP_MAX_RRK_LEN },
-       [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+       [NL80211_ATTR_FILS_CACHE_ID] = { .type = NLA_EXACT_LEN_WARN, .len = 2 },
        [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
        [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG },
        [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
@@ -589,10 +613,13 @@ static const struct nla_policy
 nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
        [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 },
        [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 },
-       [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN },
+       [NL80211_WOWLAN_TCP_DST_MAC] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
        [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
        [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
-       [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 },
+       [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
        [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
                .len = sizeof(struct nl80211_wowlan_tcp_data_seq)
        },
@@ -600,8 +627,8 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
                .len = sizeof(struct nl80211_wowlan_tcp_data_token)
        },
        [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
-       [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 },
-       [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
+       [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
+       [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 },
 };
 #endif /* CONFIG_PM */
 
@@ -619,9 +646,18 @@ nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
 /* policy for GTK rekey offload attributes */
 static const struct nla_policy
 nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
-       [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN },
-       [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN },
-       [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
+       [NL80211_REKEY_DATA_KEK] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = NL80211_KEK_LEN,
+       },
+       [NL80211_REKEY_DATA_KCK] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = NL80211_KCK_LEN,
+       },
+       [NL80211_REKEY_DATA_REPLAY_CTR] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = NL80211_REPLAY_CTR_LEN
+       },
 };
 
 static const struct nla_policy
@@ -635,7 +671,10 @@ static const struct nla_policy
 nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
        [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
                                                 .len = IEEE80211_MAX_SSID_LEN },
-       [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { .len = ETH_ALEN },
+       [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
        [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
        [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] =
                NLA_POLICY_NESTED(nl80211_match_band_rssi_policy),
@@ -667,7 +706,10 @@ nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
        [NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE] = { .type = NLA_FLAG },
        [NL80211_NAN_FUNC_FOLLOW_UP_ID] = { .type = NLA_U8 },
        [NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] = { .type = NLA_U8 },
-       [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = { .len = ETH_ALEN },
+       [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = ETH_ALEN
+       },
        [NL80211_NAN_FUNC_CLOSE_RANGE] = { .type = NLA_FLAG },
        [NL80211_NAN_FUNC_TTL] = { .type = NLA_U32 },
        [NL80211_NAN_FUNC_SERVICE_INFO] = { .type = NLA_BINARY,
@@ -3420,8 +3462,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
        if (info->attrs[NL80211_ATTR_IFTYPE])
                type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
 
-       if (!rdev->ops->add_virtual_intf ||
-           !(rdev->wiphy.interface_modes & (1 << type)))
+       if (!rdev->ops->add_virtual_intf)
                return -EOPNOTSUPP;
 
        if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN ||
@@ -3440,6 +3481,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                        return err;
        }
 
+       if (!(rdev->wiphy.interface_modes & (1 << type)) &&
+           !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
+             rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
+               return -EOPNOTSUPP;
+
        err = nl80211_parse_mon_options(rdev, type, info, &params);
        if (err < 0)
                return err;
@@ -4057,7 +4103,10 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
                                    .len = NL80211_MAX_SUPP_RATES },
        [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
                                .len = NL80211_MAX_SUPP_HT_RATES },
-       [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+       [NL80211_TXRATE_VHT] = {
+               .type = NLA_EXACT_LEN_WARN,
+               .len = sizeof(struct nl80211_txrate_vht),
+       },
        [NL80211_TXRATE_GI] = { .type = NLA_U8 },
 };
 
@@ -4856,8 +4905,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
        struct nlattr *sinfoattr, *bss_param;
 
        hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
-       if (!hdr)
+       if (!hdr) {
+               cfg80211_sinfo_release_content(sinfo);
                return -1;
+       }
 
        if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
index 1b190475359a794754b922acb485d07524cec777..c09fbf09549dfa6e9baa436c88e5adb7b9817dfb 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
  */
 #ifndef __PMSR_H
 #define __PMSR_H
@@ -448,7 +448,7 @@ static int nl80211_pmsr_send_result(struct sk_buff *msg,
 
        if (res->ap_tsf_valid &&
            nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
-                             res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
+                             res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD))
                goto error;
 
        if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
index c04f5451f89b114cb98c43b1b3ce6724858f2a4f..aa571d727903f2481ce2ce1215d5a444ffd83927 100644 (file)
@@ -1601,12 +1601,12 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
                                continue;
                        }
 
-                       if (seen_indices & BIT(mbssid_index_ie[2]))
+                       if (seen_indices & BIT_ULL(mbssid_index_ie[2]))
                                /* We don't support legacy split of a profile */
                                net_dbg_ratelimited("Partial info for BSSID index %d\n",
                                                    mbssid_index_ie[2]);
 
-                       seen_indices |= BIT(mbssid_index_ie[2]);
+                       seen_indices |= BIT_ULL(mbssid_index_ie[2]);
 
                        non_tx_data->bssid_index = mbssid_index_ie[2];
                        non_tx_data->max_bssid_indicator = elem->data[0];
index cf63b635afc098eeafe7462bd41b86a7e433b85c..1c39d6a2e85011aff8a4991473fdec387c93bdac 100644 (file)
@@ -1246,7 +1246,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
        if (rate->he_dcm)
                result /= 2;
 
-       return result;
+       return result / 10000;
 }
 
 u32 cfg80211_calculate_bitrate(struct rate_info *rate)
@@ -1998,7 +1998,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
                        continue;
 
                if (supp >= mcs_encoding) {
-                       max_vht_nss = i;
+                       max_vht_nss = i + 1;
                        break;
                }
        }
index 2b18223e7eb8cd3691290b4d0e7bdf17843bd683..9c6de4f114f84cc17b442294a2049d8a8cd87002 100644 (file)
@@ -143,6 +143,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
        struct netdev_bpf bpf;
        int err;
 
+       if (!umem->dev)
+               return;
+
        if (umem->zc) {
                bpf.command = XDP_SETUP_XSK_UMEM;
                bpf.xsk.umem = NULL;
@@ -156,11 +159,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
                        WARN(1, "failed to disable umem!\n");
        }
 
-       if (umem->dev) {
-               rtnl_lock();
-               xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
-               rtnl_unlock();
-       }
+       rtnl_lock();
+       xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
+       rtnl_unlock();
 
        if (umem->zc) {
                dev_put(umem->dev);
index eae7b635343d13182a4d458d3280464f6ffee4ca..6e87cc831e84287170f4ec77d8eb890e587cad6c 100644 (file)
@@ -678,7 +678,7 @@ void read_trace_pipe(void)
                static char buf[4096];
                ssize_t sz;
 
-               sz = read(trace_fd, buf, sizeof(buf));
+               sz = read(trace_fd, buf, sizeof(buf) - 1);
                if (sz > 0) {
                        buf[sz] = 0;
                        puts(buf);
index aff2b4ae914ee00acda73ef0c025a7500de42923..e3993805822355a544aa5af4b7f44804e27a965b 100644 (file)
@@ -216,7 +216,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
 {
        const char *event_type = "uprobe";
        struct perf_event_attr attr = {};
-       char buf[256], event_alias[256];
+       char buf[256], event_alias[sizeof("test_1234567890")];
        __u64 probe_offset, probe_addr;
        __u32 len, prog_id, fd_type;
        int err, res, kfd, efd;
index bcdd45df3f5127a160b3c79f835502a47e1a169b..a7a36209a193384d2e4dd932fbf2308c83b78efa 100755 (executable)
@@ -73,7 +73,7 @@ parse_symbol() {
        if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then
                local code=${cache[$module,$address]}
        else
-               local code=$(addr2line -i -e "$objfile" "$address")
+               local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address")
                cache[$module,$address]=$code
        fi
 
index 1ce4e9bdac48191ac859a31919ec4eac12a29b98..b5b4b8190e654ef77c88d0e752944c08595061db 100644 (file)
@@ -213,7 +213,16 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
        return labels_profile(aa_get_newest_label(&p->label));
 }
 
-#define PROFILE_MEDIATES(P, T)  ((P)->policy.start[(unsigned char) (T)])
+static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile,
+                                           unsigned char class)
+{
+       if (class <= AA_CLASS_LAST)
+               return profile->policy.start[class];
+       else
+               return aa_dfa_match_len(profile->policy.dfa,
+                                       profile->policy.start[0], &class, 1);
+}
+
 static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
                                               u16 AF) {
        unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
index 01957ce9252b52d436da3f057ec5c97687106431..8cfc9493eefc77b886eecb06ce7037313c39975e 100644 (file)
@@ -219,16 +219,21 @@ static void *kvmemdup(const void *src, size_t len)
 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
 {
        size_t size = 0;
+       void *pos = e->pos;
 
        if (!inbounds(e, sizeof(u16)))
-               return 0;
+               goto fail;
        size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
        e->pos += sizeof(__le16);
        if (!inbounds(e, size))
-               return 0;
+               goto fail;
        *chunk = e->pos;
        e->pos += size;
        return size;
+
+fail:
+       e->pos = pos;
+       return 0;
 }
 
 /* unpack control byte */
@@ -272,7 +277,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
                char *tag = NULL;
                size_t size = unpack_u16_chunk(e, &tag);
                /* if a name is specified it must match. otherwise skip tag */
-               if (name && (!size || strcmp(name, tag)))
+               if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
                        goto fail;
        } else if (name) {
                /* if a name is specified and there is no name tag fail */
@@ -290,62 +295,84 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
 
 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_U8, name)) {
                if (!inbounds(e, sizeof(u8)))
-                       return 0;
+                       goto fail;
                if (data)
                        *data = get_unaligned((u8 *)e->pos);
                e->pos += sizeof(u8);
                return 1;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_U32, name)) {
                if (!inbounds(e, sizeof(u32)))
-                       return 0;
+                       goto fail;
                if (data)
                        *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
                e->pos += sizeof(u32);
                return 1;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_U64, name)) {
                if (!inbounds(e, sizeof(u64)))
-                       return 0;
+                       goto fail;
                if (data)
                        *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
                e->pos += sizeof(u64);
                return 1;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static size_t unpack_array(struct aa_ext *e, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_ARRAY, name)) {
                int size;
                if (!inbounds(e, sizeof(u16)))
-                       return 0;
+                       goto fail;
                size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
                e->pos += sizeof(u16);
                return size;
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
 {
+       void *pos = e->pos;
+
        if (unpack_nameX(e, AA_BLOB, name)) {
                u32 size;
                if (!inbounds(e, sizeof(u32)))
-                       return 0;
+                       goto fail;
                size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
                e->pos += sizeof(u32);
                if (inbounds(e, (size_t) size)) {
@@ -354,6 +381,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
                        return size;
                }
        }
+
+fail:
+       e->pos = pos;
        return 0;
 }
 
@@ -370,9 +400,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
                        if (src_str[size - 1] != 0)
                                goto fail;
                        *string = src_str;
+
+                       return size;
                }
        }
-       return size;
 
 fail:
        e->pos = pos;
index 8346a4f7c5d7802289371f2dc786d02133be863b..a99be508f93d0dbfa9e939a82c60d06bb6b15e0c 100644 (file)
@@ -739,14 +739,20 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
        rc = security_sid_to_context_inval(sad->state, sad->ssid, &scontext,
                                           &scontext_len);
        if (!rc && scontext) {
-               audit_log_format(ab, " srawcon=%s", scontext);
+               if (scontext_len && scontext[scontext_len - 1] == '\0')
+                       scontext_len--;
+               audit_log_format(ab, " srawcon=");
+               audit_log_n_untrustedstring(ab, scontext, scontext_len);
                kfree(scontext);
        }
 
        rc = security_sid_to_context_inval(sad->state, sad->tsid, &scontext,
                                           &scontext_len);
        if (!rc && scontext) {
-               audit_log_format(ab, " trawcon=%s", scontext);
+               if (scontext_len && scontext[scontext_len - 1] == '\0')
+                       scontext_len--;
+               audit_log_format(ab, " trawcon=");
+               audit_log_n_untrustedstring(ab, scontext, scontext_len);
                kfree(scontext);
        }
 }
index 3ec702cf46ca6d04492e0b05ddb84cbad17542e7..fea66f6b31bf50435519fcd324ed54e3a785bb2f 100644 (file)
@@ -1052,15 +1052,24 @@ static int selinux_add_mnt_opt(const char *option, const char *val, int len,
        if (token == Opt_error)
                return -EINVAL;
 
-       if (token != Opt_seclabel)
+       if (token != Opt_seclabel) {
                val = kmemdup_nul(val, len, GFP_KERNEL);
+               if (!val) {
+                       rc = -ENOMEM;
+                       goto free_opt;
+               }
+       }
        rc = selinux_add_opt(token, val, mnt_opts);
        if (unlikely(rc)) {
                kfree(val);
-               if (*mnt_opts) {
-                       selinux_free_mnt_opts(*mnt_opts);
-                       *mnt_opts = NULL;
-               }
+               goto free_opt;
+       }
+       return rc;
+
+free_opt:
+       if (*mnt_opts) {
+               selinux_free_mnt_opts(*mnt_opts);
+               *mnt_opts = NULL;
        }
        return rc;
 }
@@ -2616,10 +2625,11 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
        char *from = options;
        char *to = options;
        bool first = true;
+       int rc;
 
        while (1) {
                int len = opt_len(from);
-               int token, rc;
+               int token;
                char *arg = NULL;
 
                token = match_opt_prefix(from, len, &arg);
@@ -2635,15 +2645,15 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
                                                *q++ = c;
                                }
                                arg = kmemdup_nul(arg, q - arg, GFP_KERNEL);
+                               if (!arg) {
+                                       rc = -ENOMEM;
+                                       goto free_opt;
+                               }
                        }
                        rc = selinux_add_opt(token, arg, mnt_opts);
                        if (unlikely(rc)) {
                                kfree(arg);
-                               if (*mnt_opts) {
-                                       selinux_free_mnt_opts(*mnt_opts);
-                                       *mnt_opts = NULL;
-                               }
-                               return rc;
+                               goto free_opt;
                        }
                } else {
                        if (!first) {   // copy with preceding comma
@@ -2661,6 +2671,13 @@ static int selinux_sb_eat_lsm_opts(char *options, void **mnt_opts)
        }
        *to = '\0';
        return 0;
+
+free_opt:
+       if (*mnt_opts) {
+               selinux_free_mnt_opts(*mnt_opts);
+               *mnt_opts = NULL;
+       }
+       return rc;
 }
 
 static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
index 0de725f88bedb24a0c0207b9953074316ee158f5..d99450b4f51124f255932023f3b6c6e91efcb25e 100644 (file)
@@ -68,6 +68,7 @@ static struct {
        int len;
        int opt;
 } smk_mount_opts[] = {
+       {"smackfsdef", sizeof("smackfsdef") - 1, Opt_fsdefault},
        A(fsdefault), A(fsfloor), A(fshat), A(fsroot), A(fstransmute)
 };
 #undef A
@@ -682,11 +683,12 @@ static int smack_fs_context_dup(struct fs_context *fc,
 }
 
 static const struct fs_parameter_spec smack_param_specs[] = {
-       fsparam_string("fsdefault",     Opt_fsdefault),
-       fsparam_string("fsfloor",       Opt_fsfloor),
-       fsparam_string("fshat",         Opt_fshat),
-       fsparam_string("fsroot",        Opt_fsroot),
-       fsparam_string("fstransmute",   Opt_fstransmute),
+       fsparam_string("smackfsdef",            Opt_fsdefault),
+       fsparam_string("smackfsdefault",        Opt_fsdefault),
+       fsparam_string("smackfsfloor",          Opt_fsfloor),
+       fsparam_string("smackfshat",            Opt_fshat),
+       fsparam_string("smackfsroot",           Opt_fsroot),
+       fsparam_string("smackfstransmute",      Opt_fstransmute),
        {}
 };
 
index 37e47fa7b0e386c1727446051d2d5fa27dda7ba1..81f7edc560d0490a5d9ce93d632c0757b535d7bf 100644 (file)
@@ -344,7 +344,7 @@ static void destroy_stream(struct snd_motu *motu,
        }
 
        amdtp_stream_destroy(stream);
-       fw_iso_resources_free(resources);
+       fw_iso_resources_destroy(resources);
 }
 
 int snd_motu_stream_init_duplex(struct snd_motu *motu)
index 5e31c460a90f113f59c84b0cce97640bd1fc04b5..9fd145cc4b07f00d206197ef11470f0ac48eb64d 100644 (file)
@@ -148,9 +148,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
                oxfw->midi_input_ports = 0;
                oxfw->midi_output_ports = 0;
 
-               /* Output stream exists but no data channels are useful. */
-               oxfw->has_output = false;
-
                return snd_oxfw_scs1x_add(oxfw);
        }
 
index ad1b55a1f045f8ed2cf295989afd2cbabba86bfa..a3a113ef5d56ccee46af49996b28ff5625f66397 100644 (file)
@@ -162,7 +162,6 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_init);
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev)
 {
        snd_hdac_device_exit(hdev);
-       kfree(hdev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
 
index c5e46df9c5488621bd2750ccbf5fb563887ee15c..6c51b8363f8b73ccea5f8ff65558a298ddde8524 100644 (file)
@@ -826,7 +826,14 @@ static int snd_hda_codec_dev_free(struct snd_device *device)
        if (codec->core.type == HDA_DEV_LEGACY)
                snd_hdac_device_unregister(&codec->core);
        codec_display_power(codec, false);
-       put_device(hda_codec_dev(codec));
+
+       /*
+        * In the case of ASoC HD-audio bus, the device refcount is released in
+        * snd_hdac_ext_bus_device_remove() explicitly.
+        */
+       if (codec->core.type == HDA_DEV_LEGACY)
+               put_device(hda_codec_dev(codec));
+
        return 0;
 }
 
index 9742449785094c994f7174a4bad976c2656e0c47..5b3c26991f26b07474a8481b588982432fd7fdbe 100644 (file)
@@ -4120,18 +4120,19 @@ static struct coef_fw alc225_pre_hsmode[] = {
 static void alc_headset_mode_unplugged(struct hda_codec *codec)
 {
        static struct coef_fw coef0255[] = {
+               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
                WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
                UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
                WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
                WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
                {}
        };
-       static struct coef_fw coef0255_1[] = {
-               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
-               {}
-       };
        static struct coef_fw coef0256[] = {
                WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+               WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+               WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
+               WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
                {}
        };
        static struct coef_fw coef0233[] = {
@@ -4194,13 +4195,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-               alc_process_coef_fw(codec, coef0255_1);
                alc_process_coef_fw(codec, coef0255);
                break;
        case 0x10ec0236:
        case 0x10ec0256:
                alc_process_coef_fw(codec, coef0256);
-               alc_process_coef_fw(codec, coef0255);
                break;
        case 0x10ec0234:
        case 0x10ec0274:
@@ -4253,6 +4252,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
                {}
        };
+       static struct coef_fw coef0256[] = {
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/
+               WRITE_COEFEX(0x57, 0x03, 0x09a3),
+               WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
+               {}
+       };
        static struct coef_fw coef0233[] = {
                UPDATE_COEF(0x35, 0, 1<<14),
                WRITE_COEF(0x06, 0x2100),
@@ -4300,14 +4305,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
        };
 
        switch (codec->core.vendor_id) {
-       case 0x10ec0236:
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_write_coef_idx(codec, 0x45, 0xc489);
                snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
                alc_process_coef_fw(codec, coef0255);
                snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
                break;
+       case 0x10ec0236:
+       case 0x10ec0256:
+               alc_write_coef_idx(codec, 0x45, 0xc489);
+               snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+               alc_process_coef_fw(codec, coef0256);
+               snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+               break;
        case 0x10ec0234:
        case 0x10ec0274:
        case 0x10ec0294:
@@ -4389,6 +4399,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
                WRITE_COEF(0x49, 0x0049),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xc489),
+               WRITE_COEFEX(0x57, 0x03, 0x0da3),
+               WRITE_COEF(0x49, 0x0049),
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
+               WRITE_COEF(0x06, 0x6100),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x06, 0x2100),
                WRITE_COEF(0x32, 0x4ea3),
@@ -4439,11 +4457,16 @@ static void alc_headset_mode_default(struct hda_codec *codec)
                alc_process_coef_fw(codec, alc225_pre_hsmode);
                alc_process_coef_fw(codec, coef0225);
                break;
-       case 0x10ec0236:
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0236:
+       case 0x10ec0256:
+               alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+               alc_write_coef_idx(codec, 0x45, 0xc089);
+               msleep(50);
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0234:
        case 0x10ec0274:
        case 0x10ec0294:
@@ -4487,8 +4510,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
        };
        static struct coef_fw coef0256[] = {
                WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
-               WRITE_COEF(0x1b, 0x0c6b),
-               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               WRITE_COEF(0x1b, 0x0e6b),
                {}
        };
        static struct coef_fw coef0233[] = {
@@ -4606,8 +4628,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
        };
        static struct coef_fw coef0256[] = {
                WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
-               WRITE_COEF(0x1b, 0x0c6b),
-               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               WRITE_COEF(0x1b, 0x0e6b),
                {}
        };
        static struct coef_fw coef0233[] = {
@@ -4739,13 +4760,37 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        };
 
        switch (codec->core.vendor_id) {
-       case 0x10ec0236:
        case 0x10ec0255:
+               alc_process_coef_fw(codec, coef0255);
+               msleep(300);
+               val = alc_read_coef_idx(codec, 0x46);
+               is_ctia = (val & 0x0070) == 0x0070;
+               break;
+       case 0x10ec0236:
        case 0x10ec0256:
+               alc_write_coef_idx(codec, 0x1b, 0x0e4b);
+               alc_write_coef_idx(codec, 0x06, 0x6104);
+               alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
+
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+               msleep(80);
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
                alc_process_coef_fw(codec, coef0255);
                msleep(300);
                val = alc_read_coef_idx(codec, 0x46);
                is_ctia = (val & 0x0070) == 0x0070;
+
+               alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
+               alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+               msleep(80);
+               snd_hda_codec_write(codec, 0x21, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
                break;
        case 0x10ec0234:
        case 0x10ec0274:
@@ -6210,15 +6255,13 @@ static const struct hda_fixup alc269_fixups[] = {
                .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
        },
        [ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
-               .type = HDA_FIXUP_VERBS,
-               .v.verbs = (const struct hda_verb[]) {
-                       /* Enable the Mic */
-                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
-                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
-                       {}
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
                },
                .chained = true,
-               .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
+               .chain_id = ALC255_FIXUP_HEADSET_MODE
        },
        [ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
@@ -7262,10 +7305,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x18, 0x02a11030},
                {0x19, 0x0181303F},
                {0x21, 0x0221102f}),
-       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
-               {0x12, 0x90a60140},
-               {0x14, 0x90170120},
-               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
                {0x12, 0x90a601c0},
                {0x14, 0x90171120},
index 3729b132ad85d9050ddc9dceda1550ccd4ef3a10..fe08dd9f15645429d11acdb2b84f1bedf67ffe2c 100644 (file)
@@ -812,7 +812,7 @@ static int snd_ice1712_6fire_read_pca(struct snd_ice1712 *ice, unsigned char reg
 
        snd_i2c_lock(ice->i2c);
        byte = reg;
-       if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1)) {
+       if (snd_i2c_sendbytes(spec->i2cdevs[EWS_I2C_6FIRE], &byte, 1) != 1) {
                snd_i2c_unlock(ice->i2c);
                dev_err(ice->card->dev, "cannot send pca\n");
                return -EIO;
index eab7c76cfcd931bd06dd050da94bc23e67d567c4..71562154c0b1e6e38873cfa76d8ab9e03721767d 100644 (file)
@@ -304,7 +304,10 @@ static int ak4458_rstn_control(struct snd_soc_component *component, int bit)
                                          AK4458_00_CONTROL1,
                                          AK4458_RSTN_MASK,
                                          0x0);
-       return ret;
+       if (ret < 0)
+               return ret;
+
+       return 0;
 }
 
 static int ak4458_hw_params(struct snd_pcm_substream *substream,
@@ -536,9 +539,10 @@ static void ak4458_power_on(struct ak4458_priv *ak4458)
        }
 }
 
-static void ak4458_init(struct snd_soc_component *component)
+static int ak4458_init(struct snd_soc_component *component)
 {
        struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
+       int ret;
 
        /* External Mute ON */
        if (ak4458->mute_gpiod)
@@ -546,21 +550,21 @@ static void ak4458_init(struct snd_soc_component *component)
 
        ak4458_power_on(ak4458);
 
-       snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
+       ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
                            0x80, 0x80);   /* ACKS bit = 1; 10000000 */
+       if (ret < 0)
+               return ret;
 
-       ak4458_rstn_control(component, 1);
+       return ak4458_rstn_control(component, 1);
 }
 
 static int ak4458_probe(struct snd_soc_component *component)
 {
        struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
 
-       ak4458_init(component);
-
        ak4458->fs = 48000;
 
-       return 0;
+       return ak4458_init(component);
 }
 
 static void ak4458_remove(struct snd_soc_component *component)
index ab27d2b94d02db70e137946b0cd4bd8953ab46cc..c0190ec59e7492e57d98cd2bf7d6e592a383cb94 100644 (file)
@@ -60,7 +60,7 @@ static const struct reg_default cs4265_reg_defaults[] = {
 static bool cs4265_readable_register(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
+       case CS4265_CHIP_ID ... CS4265_MAX_REGISTER:
                return true;
        default:
                return false;
index ebb9e0cf83647b80fae1b9949fcd04e556ba9eed..28a4ac36c4f856b3ec3b9242fdf60a7fa0100cda 100644 (file)
@@ -558,6 +558,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
        msleep(5);
 
        regcache_cache_only(cs42xx8->regmap, false);
+       regcache_mark_dirty(cs42xx8->regmap);
 
        ret = regcache_sync(cs42xx8->regmap);
        if (ret) {
index 7619ea31ab50e3900e73fb9015db6df564e0a69a..ada8c25e643da93a96c5fb07b5d43e71f642c554 100644 (file)
@@ -1909,6 +1909,21 @@ static int max98090_configure_dmic(struct max98090_priv *max98090,
        return 0;
 }
 
+static int max98090_dai_startup(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
+       unsigned int fmt = max98090->dai_fmt;
+
+       /* Remove 24-bit format support if it is not in right justified mode. */
+       if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_RIGHT_J) {
+               substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+               snd_pcm_hw_constraint_msbits(substream->runtime, 0, 16, 16);
+       }
+       return 0;
+}
+
 static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
                                   struct snd_pcm_hw_params *params,
                                   struct snd_soc_dai *dai)
@@ -2316,6 +2331,7 @@ EXPORT_SYMBOL_GPL(max98090_mic_detect);
 #define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 
 static const struct snd_soc_dai_ops max98090_dai_ops = {
+       .startup = max98090_dai_startup,
        .set_sysclk = max98090_dai_set_sysclk,
        .set_fmt = max98090_dai_set_fmt,
        .set_tdm_slot = max98090_set_tdm_slot,
index adf59039a3b6ba1d277c0bbfa8dc04af86ca13e5..cdd312db3e781ae4716dff0d922a721a0980faa0 100644 (file)
@@ -405,6 +405,8 @@ static int rt274_mic_detect(struct snd_soc_component *component,
 {
        struct rt274_priv *rt274 = snd_soc_component_get_drvdata(component);
 
+       rt274->jack = jack;
+
        if (jack == NULL) {
                /* Disable jack detection */
                regmap_update_bits(rt274->regmap, RT274_EAPD_GPIO_IRQ_CTRL,
@@ -412,7 +414,6 @@ static int rt274_mic_detect(struct snd_soc_component *component,
 
                return 0;
        }
-       rt274->jack = jack;
 
        regmap_update_bits(rt274->regmap, RT274_EAPD_GPIO_IRQ_CTRL,
                                RT274_IRQ_EN, RT274_IRQ_EN);
index 9a037108b1aea56a6f37831cf769597bf5b092a7..a746e11ccfe3a5b3e4cfcfc2fa2824b54d928ca1 100644 (file)
@@ -2882,6 +2882,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
                                                 RT5670_DEV_GPIO |
                                                 RT5670_JD_MODE3),
        },
+       {
+               .callback = rt5670_quirk_cb,
+               .ident = "Aegex 10 tablet (RU2)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "AEGEX"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "RU2"),
+               },
+               .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+                                                RT5670_DMIC2_INR |
+                                                RT5670_DEV_GPIO |
+                                                RT5670_JD_MODE3),
+       },
        {}
 };
 
index 84b6bd8b50e18a8dfeee74b16820b34aacbb0829..a4dfa0345c6e9d4956e0883041af7708930da38a 100644 (file)
@@ -101,7 +101,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
        u32 word_size = min_t(u32, dstlen, 8);
 
        for (w = 0; w < dstlen; w += word_size) {
-               for (i = 0; i < word_size; i++) {
+               for (i = 0; i < word_size && i + w < dstlen; i++) {
                        si = w + word_size - i - 1;
                        dst[w + i] = si < srclen ? src[si] : 0;
                }
@@ -152,8 +152,9 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
                status |= spi_sync(g_spi, &m);
                mutex_unlock(&spi_mutex);
 
+
                /* Copy data back to caller buffer */
-               rt5677_spi_reverse(cb + offset, t[1].len, body, t[1].len);
+               rt5677_spi_reverse(cb + offset, len - offset, body, t[1].len);
        }
        return status;
 }
index 0b937924d2e47961d697d068edf3944772058baa..ea035c12a325ee1e47a0514e76dba92ed15be7f3 100644 (file)
@@ -282,8 +282,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
                return -EINVAL;
        }
 
-       if ((outrate > 8000 && outrate < 30000) &&
-           (outrate/inrate > 24 || inrate/outrate > 8)) {
+       if ((outrate >= 8000 && outrate <= 30000) &&
+           (outrate > 24 * inrate || inrate > 8 * outrate)) {
                pair_err("exceed supported ratio range [1/24, 8] for \
                                inrate/outrate: %d/%d\n", inrate, outrate);
                return -EINVAL;
index 1ec298dd0e4f065bff14658a0922d4afe1fa2f78..13db2854db3e74fa97b566d19ab93949b0152c1b 100644 (file)
@@ -158,11 +158,11 @@ int sst_create_ipc_msg(struct ipc_post **arg, bool large)
 {
        struct ipc_post *msg;
 
-       msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+       msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
        if (!msg)
                return -ENOMEM;
        if (large) {
-               msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
+               msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
                if (!msg->mailbox_data) {
                        kfree(msg);
                        return -ENOMEM;
index 1f00d473793fc99c513cb7dee0b771db95dfcfca..2fe1ce8791235c51bcd39501dce428d71f195708 100644 (file)
@@ -487,6 +487,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
        }
 
        /* override plaform name, if required */
+       byt_cht_es8316_card.dev = dev;
        platform_name = mach->mach_params.platform;
 
        ret = snd_soc_fixup_dai_links_platform_name(&byt_cht_es8316_card,
@@ -567,7 +568,6 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
                 (quirk & BYT_CHT_ES8316_MONO_SPEAKER) ? "mono" : "stereo",
                 mic_name[BYT_CHT_ES8316_MAP(quirk)]);
        byt_cht_es8316_card.long_name = long_name;
-       byt_cht_es8316_card.dev = dev;
        snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
 
        ret = devm_snd_soc_register_card(dev, &byt_cht_es8316_card);
index d72623f508b7f6a165290b1be8105bf1a9acb92a..613b37172441f686bf4a0ddb7232b03f484a3a2c 100644 (file)
@@ -446,6 +446,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        }
 
        /* override plaform name, if required */
+       snd_soc_card_cht.dev = &pdev->dev;
        mach = (&pdev->dev)->platform_data;
        platform_name = mach->mach_params.platform;
 
@@ -455,7 +456,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return ret_val;
 
        /* register the soc card */
-       snd_soc_card_cht.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
        if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
index 1c17f82fc9225ae1b3c5ed0c1fd8732fc223ba0e..b0d658e3d3f7e56d456db730b47a02869075b38b 100644 (file)
@@ -249,6 +249,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
        /* override plaform name, if required */
+       snd_soc_card_cht.dev = &pdev->dev;
        mach = (&pdev->dev)->platform_data;
        platform_name = mach->mach_params.platform;
 
@@ -258,7 +259,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return ret_val;
 
        /* register the soc card */
-       snd_soc_card_cht.dev = &pdev->dev;
        ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
        if (ret_val) {
                dev_err(&pdev->dev,
index 1731cc0fac259d449374e5371bdaefab1a079928..028e571f6a7763a15b480fff9914c22355dcb93a 100644 (file)
@@ -418,6 +418,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        }
 
        /* override plaform name, if required */
+       snd_soc_card_cht.dev = &pdev->dev;
        platform_name = mach->mach_params.platform;
 
        ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
@@ -435,7 +436,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
        /* register the soc card */
-       snd_soc_card_cht.dev = &pdev->dev;
        ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
        if (ret_val) {
                dev_err(&pdev->dev,
index f28fb98cc30632393386a15a96f7c98d317e2985..3343dbcd506fdc8de46faaf17ec42f0f392dcf21 100644 (file)
 #define SOF_RT5682_MCLK_EN                     BIT(3)
 #define SOF_RT5682_MCLK_24MHZ                  BIT(4)
 #define SOF_SPEAKER_AMP_PRESENT                BIT(5)
-#define SOF_RT5682_SSP_AMP(quirk)              ((quirk) & GENMASK(8, 6))
-#define SOF_RT5682_SSP_AMP_MASK                        (GENMASK(8, 6))
 #define SOF_RT5682_SSP_AMP_SHIFT               6
+#define SOF_RT5682_SSP_AMP_MASK                 (GENMASK(8, 6))
+#define SOF_RT5682_SSP_AMP(quirk)      \
+       (((quirk) << SOF_RT5682_SSP_AMP_SHIFT) & SOF_RT5682_SSP_AMP_MASK)
 
 /* Default: MCLK on, MCLK 19.2M, SSP0  */
 static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
@@ -144,9 +145,9 @@ static int sof_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
        jack = &ctx->sof_headset;
 
        snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
-       snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
-       snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
-       snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+       snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+       snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+       snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
        ret = snd_soc_component_set_jack(component, jack, NULL);
 
        if (ret) {
index abd34fa277498215155e0b075669cfd7b0d3668e..55e80c3d2af0028a570d46e2fd33651e5e796567 100644 (file)
@@ -13,6 +13,7 @@ static unsigned long byt_machine_id;
 
 #define BYT_THINKPAD_10  1
 #define BYT_POV_P1006W   2
+#define BYT_AEGEX_10     3
 
 static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
 {
@@ -26,6 +27,12 @@ static int byt_pov_p1006w_quirk_cb(const struct dmi_system_id *id)
        return 1;
 }
 
+static int byt_aegex10_quirk_cb(const struct dmi_system_id *id)
+{
+       byt_machine_id = BYT_AEGEX_10;
+       return 1;
+}
+
 static const struct dmi_system_id byt_table[] = {
        {
                .callback = byt_thinkpad10_quirk_cb,
@@ -66,9 +73,18 @@ static const struct dmi_system_id byt_table[] = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
                },
        },
+       {
+               /* Aegex 10 tablet (RU2) */
+               .callback = byt_aegex10_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "AEGEX"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "RU2"),
+               },
+       },
        { }
 };
 
+/* The Thinkapd 10 and Aegex 10 tablets have the same ID problem */
 static struct snd_soc_acpi_mach byt_thinkpad_10 = {
        .id = "10EC5640",
        .drv_name = "cht-bsw-rt5672",
@@ -95,6 +111,7 @@ static struct snd_soc_acpi_mach *byt_quirk(void *arg)
 
        switch (byt_machine_id) {
        case BYT_THINKPAD_10:
+       case BYT_AEGEX_10:
                return &byt_thinkpad_10;
        case BYT_POV_P1006W:
                return &byt_pov_p1006w;
index df7c52cad5c3f660ff0da320bce076b892fdf6ff..c36c0aa4f683250a18085d8adeb28779ec86099d 100644 (file)
@@ -29,17 +29,17 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
                .sof_tplg_filename = "sof-cnl-rt274.tplg",
        },
        {
-               .id = "10EC5682",
+               .id = "MX98357A",
                .drv_name = "sof_rt5682",
+               .quirk_data = &cml_codecs,
                .sof_fw_filename = "sof-cnl.ri",
-               .sof_tplg_filename = "sof-cml-rt5682.tplg",
+               .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
        },
        {
-               .id = "MX98357A",
+               .id = "10EC5682",
                .drv_name = "sof_rt5682",
-               .quirk_data = &cml_codecs,
                .sof_fw_filename = "sof-cnl.ri",
-               .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
+               .sof_tplg_filename = "sof-cml-rt5682.tplg",
        },
 
        {},
index 933ab51af05b1fdc5c8cbfcd9fa9c07a74289721..111e44b64b38c4004a8cd53f0760ec9676f633a5 100644 (file)
@@ -133,7 +133,7 @@ config SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A
 
 config SND_SOC_MT8183_DA7219_MAX98357A
        tristate "ASoC Audio driver for MT8183 with DA7219 MAX98357A codec"
-       depends on SND_SOC_MT8183
+       depends on SND_SOC_MT8183 && I2C
        select SND_SOC_MT6358
        select SND_SOC_MAX98357A
        select SND_SOC_DA7219
index 2403bec2fccf35ad4b1752bba204454c4bd2b6cf..41c0cfaf2db5d423ad3cad7675ee9ad9367018a2 100644 (file)
@@ -228,7 +228,10 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)
 
 static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
 {
+       if (!card->debugfs_card_root)
+               return;
        debugfs_remove_recursive(card->debugfs_card_root);
+       card->debugfs_card_root = NULL;
 }
 
 static void snd_soc_debugfs_init(void)
@@ -2037,8 +2040,10 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
 static int soc_cleanup_card_resources(struct snd_soc_card *card)
 {
        /* free the ALSA card at first; this syncs with pending operations */
-       if (card->snd_card)
+       if (card->snd_card) {
                snd_card_free(card->snd_card);
+               card->snd_card = NULL;
+       }
 
        /* remove and free each DAI */
        soc_remove_dai_links(card);
@@ -2065,6 +2070,16 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
        int ret, i, order;
 
        mutex_lock(&client_mutex);
+       for_each_card_prelinks(card, i, dai_link) {
+               ret = soc_init_dai_link(card, dai_link);
+               if (ret) {
+                       soc_cleanup_platform(card);
+                       dev_err(card->dev, "ASoC: failed to init link %s: %d\n",
+                               dai_link->name, ret);
+                       mutex_unlock(&client_mutex);
+                       return ret;
+               }
+       }
        mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
 
        card->dapm.bias_level = SND_SOC_BIAS_OFF;
@@ -2789,26 +2804,9 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
  */
 int snd_soc_register_card(struct snd_soc_card *card)
 {
-       int i, ret;
-       struct snd_soc_dai_link *link;
-
        if (!card->name || !card->dev)
                return -EINVAL;
 
-       mutex_lock(&client_mutex);
-       for_each_card_prelinks(card, i, link) {
-
-               ret = soc_init_dai_link(card, link);
-               if (ret) {
-                       soc_cleanup_platform(card);
-                       dev_err(card->dev, "ASoC: failed to init link %s\n",
-                               link->name);
-                       mutex_unlock(&client_mutex);
-                       return ret;
-               }
-       }
-       mutex_unlock(&client_mutex);
-
        dev_set_drvdata(card->dev, card);
 
        snd_soc_initialize_card_lists(card);
@@ -2839,12 +2837,14 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
                snd_soc_dapm_shutdown(card);
                snd_soc_flush_all_delayed_work(card);
 
+               mutex_lock(&client_mutex);
                /* remove all components used by DAI links on this card */
                for_each_comp_order(order) {
                        for_each_card_rtds(card, rtd) {
                                soc_remove_link_components(card, rtd, order);
                        }
                }
+               mutex_unlock(&client_mutex);
 
                soc_cleanup_card_resources(card);
                if (!unregister)
index 81a7a12196ffed084305126b14b31deca9f4be8e..55f8278077f493d7df6694429dc0201ff1c15838 100644 (file)
@@ -2193,7 +2193,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
 
 static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
 {
+       if (!dapm->debugfs_dapm)
+               return;
        debugfs_remove_recursive(dapm->debugfs_dapm);
+       dapm->debugfs_dapm = NULL;
 }
 
 #else
@@ -3831,8 +3834,8 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
                                                ret);
                                        goto out;
                                }
-                               source->active++;
                        }
+                       source->active++;
                        ret = soc_dai_hw_params(&substream, params, source);
                        if (ret < 0)
                                goto out;
@@ -3853,8 +3856,8 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
                                                ret);
                                        goto out;
                                }
-                               sink->active++;
                        }
+                       sink->active++;
                        ret = soc_dai_hw_params(&substream, params, sink);
                        if (ret < 0)
                                goto out;
index 0a4f60c7a188cd6c81122f5dbe5d413921df897e..c46ad0f662921210425ec78295a166301efb769b 100644 (file)
@@ -2479,7 +2479,8 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
 
                if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
-                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
                        continue;
 
                dev_dbg(be->dev, "ASoC: prepare BE %s\n",
index 048c7b034d70841b5cd981f4fb22e2d193f4ec57..71d87a86f060beb220309733692b5e1b81f995ed 100644 (file)
@@ -45,7 +45,10 @@ config SND_SOC_SOF_OPTIONS
 if SND_SOC_SOF_OPTIONS
 
 config SND_SOC_SOF_NOCODEC
-       tristate "SOF nocodec mode Support"
+       tristate
+
+config SND_SOC_SOF_NOCODEC_SUPPORT
+       bool "SOF nocodec mode support"
        help
          This adds support for a dummy/nocodec machine driver fallback
          option if no known codec is detected. This is typically only
@@ -81,7 +84,7 @@ if SND_SOC_SOF_DEBUG
 
 config SND_SOC_SOF_FORCE_NOCODEC_MODE
        bool "SOF force nocodec Mode"
-       depends on SND_SOC_SOF_NOCODEC
+       depends on SND_SOC_SOF_NOCODEC_SUPPORT
        help
          This forces SOF to use dummy/nocodec as machine driver, even
          though there is a codec detected on the real platform. This is
@@ -136,6 +139,7 @@ endif ## SND_SOC_SOF_OPTIONS
 config SND_SOC_SOF
        tristate
        select SND_SOC_TOPOLOGY
+       select SND_SOC_SOF_NOCODEC if SND_SOC_SOF_NOCODEC_SUPPORT
        help
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
index 11762c4580f1020aa6b0a20f4377536156885f96..84e2cbfbbcbb3c89c37e07e7be1351f267d4584e 100644 (file)
@@ -349,6 +349,7 @@ int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        struct sof_abi_hdr *data = cdata->data;
+       size_t size = data->size + sizeof(*data);
        int ret, err;
 
        if (be->max > sizeof(ucontrol->value.bytes.data)) {
@@ -358,10 +359,10 @@ int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
                return -EINVAL;
        }
 
-       if (data->size > be->max) {
+       if (size > be->max) {
                dev_err_ratelimited(sdev->dev,
-                                   "error: size too big %d bytes max is %d\n",
-                                   data->size, be->max);
+                                   "error: size too big %zu bytes max is %d\n",
+                                   size, be->max);
                return -EINVAL;
        }
 
@@ -375,7 +376,7 @@ int snd_sof_bytes_put(struct snd_kcontrol *kcontrol,
        }
 
        /* copy from kcontrol */
-       memcpy(data, ucontrol->value.bytes.data, data->size);
+       memcpy(data, ucontrol->value.bytes.data, size);
 
        /* notify DSP of byte control updates */
        snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
index 32105e0fabe884f368e1ae943f844a5141bb029b..5beda47cdf9fc748e9adec46d1fd1023a351d87b 100644 (file)
@@ -382,7 +382,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
 
        if (IS_ERR(plat_data->pdev_mach)) {
                ret = PTR_ERR(plat_data->pdev_mach);
-               goto comp_err;
+               goto fw_run_err;
        }
 
        dev_dbg(sdev->dev, "created machine %s\n",
@@ -393,8 +393,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
 
        return 0;
 
-comp_err:
-       snd_soc_unregister_component(sdev->dev);
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
 fw_run_err:
        snd_sof_fw_unload(sdev);
 fw_load_err:
@@ -403,6 +402,21 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
        snd_sof_free_debug(sdev);
 dbg_err:
        snd_sof_remove(sdev);
+#else
+
+       /*
+        * when the probe_continue is handled in a work queue, the
+        * probe does not fail so we don't release resources here.
+        * They will be released with an explicit call to
+        * snd_sof_device_remove() when the PCI/ACPI device is removed
+        */
+
+fw_run_err:
+fw_load_err:
+ipc_err:
+dbg_err:
+
+#endif
 
        return ret;
 }
@@ -484,7 +498,6 @@ int snd_sof_device_remove(struct device *dev)
        snd_sof_ipc_free(sdev);
        snd_sof_free_debug(sdev);
        snd_sof_free_trace(sdev);
-       snd_sof_remove(sdev);
 
        /*
         * Unregister machine driver. This will unbind the snd_card which
@@ -494,6 +507,14 @@ int snd_sof_device_remove(struct device *dev)
        if (!IS_ERR_OR_NULL(pdata->pdev_mach))
                platform_device_unregister(pdata->pdev_mach);
 
+       /*
+        * Unregistering the machine driver results in unloading the topology.
+        * Some widgets, ex: scheduler, attempt to power down the core they are
+        * scheduled on, when they are unloaded. Therefore, the DSP must be
+        * removed only after the topology has been unloaded.
+        */
+       snd_sof_remove(sdev);
+
        /* release firmware */
        release_firmware(pdata->fw);
        pdata->fw = NULL;
index 065cb868bdface7fdbaf10e822763e2de75e93ec..70d524ef9bc07d220e09e21f9cdd3a245017a09d 100644 (file)
@@ -220,17 +220,20 @@ static void bdw_get_registers(struct snd_sof_dev *sdev,
                              struct sof_ipc_panic_info *panic_info,
                              u32 *stack, size_t stack_words)
 {
-       /* first read regsisters */
-       sof_mailbox_read(sdev, sdev->dsp_oops_offset, xoops, sizeof(*xoops));
+       u32 offset = sdev->dsp_oops_offset;
+
+       /* first read registers */
+       sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+       /* note: variable AR register array is not read */
 
        /* then get panic info */
-       sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops),
-                        panic_info, sizeof(*panic_info));
+       offset += xoops->arch_hdr.totalsize;
+       sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
        /* then get the stack */
-       sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops) +
-                          sizeof(*panic_info), stack,
-                          stack_words * sizeof(u32));
+       offset += sizeof(*panic_info);
+       sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
 }
 
 static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
@@ -283,6 +286,8 @@ static irqreturn_t bdw_irq_thread(int irq, void *context)
                                                 SHIM_IMRX, SHIM_IMRX_DONE,
                                                 SHIM_IMRX_DONE);
 
+               spin_lock_irq(&sdev->ipc_lock);
+
                /*
                 * handle immediate reply from DSP core. If the msg is
                 * found, set done bit in cmd_done which is called at the
@@ -294,6 +299,8 @@ static irqreturn_t bdw_irq_thread(int irq, void *context)
                snd_sof_ipc_reply(sdev, ipcx);
 
                bdw_dsp_done(sdev);
+
+               spin_unlock_irq(&sdev->ipc_lock);
        }
 
        ipcd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
@@ -485,7 +492,6 @@ static void bdw_get_reply(struct snd_sof_dev *sdev)
 {
        struct snd_sof_ipc_msg *msg = sdev->msg;
        struct sof_ipc_reply reply;
-       unsigned long flags;
        int ret = 0;
 
        /*
@@ -501,8 +507,6 @@ static void bdw_get_reply(struct snd_sof_dev *sdev)
        /* get reply */
        sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
 
-       spin_lock_irqsave(&sdev->ipc_lock, flags);
-
        if (reply.error < 0) {
                memcpy(msg->reply_data, &reply, sizeof(reply));
                ret = reply.error;
@@ -521,8 +525,6 @@ static void bdw_get_reply(struct snd_sof_dev *sdev)
        }
 
        msg->reply_error = ret;
-
-       spin_unlock_irqrestore(&sdev->ipc_lock, flags);
 }
 
 static void bdw_host_done(struct snd_sof_dev *sdev)
index 7bf9143d3106737f751b01b06eb0fa3616275f70..39d1ae01c45db62b4c87bd57d83125bf0f5f93ba 100644 (file)
@@ -265,17 +265,20 @@ static void byt_get_registers(struct snd_sof_dev *sdev,
                              struct sof_ipc_panic_info *panic_info,
                              u32 *stack, size_t stack_words)
 {
+       u32 offset = sdev->dsp_oops_offset;
+
        /* first read regsisters */
-       sof_mailbox_read(sdev, sdev->dsp_oops_offset, xoops, sizeof(*xoops));
+       sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+       /* note: variable AR register array is not read */
 
        /* then get panic info */
-       sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops),
-                        panic_info, sizeof(*panic_info));
+       offset += xoops->arch_hdr.totalsize;
+       sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
        /* then get the stack */
-       sof_mailbox_read(sdev, sdev->dsp_oops_offset + sizeof(*xoops) +
-                          sizeof(*panic_info), stack,
-                          stack_words * sizeof(u32));
+       offset += sizeof(*panic_info);
+       sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
 }
 
 static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
@@ -329,6 +332,9 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
                                                   SHIM_IMRX,
                                                   SHIM_IMRX_DONE,
                                                   SHIM_IMRX_DONE);
+
+               spin_lock_irq(&sdev->ipc_lock);
+
                /*
                 * handle immediate reply from DSP core. If the msg is
                 * found, set done bit in cmd_done which is called at the
@@ -340,6 +346,8 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
                snd_sof_ipc_reply(sdev, ipcx);
 
                byt_dsp_done(sdev);
+
+               spin_unlock_irq(&sdev->ipc_lock);
        }
 
        /* new message from DSP */
@@ -383,7 +391,6 @@ static void byt_get_reply(struct snd_sof_dev *sdev)
 {
        struct snd_sof_ipc_msg *msg = sdev->msg;
        struct sof_ipc_reply reply;
-       unsigned long flags;
        int ret = 0;
 
        /*
@@ -399,8 +406,6 @@ static void byt_get_reply(struct snd_sof_dev *sdev)
        /* get reply */
        sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
 
-       spin_lock_irqsave(&sdev->ipc_lock, flags);
-
        if (reply.error < 0) {
                memcpy(msg->reply_data, &reply, sizeof(reply));
                ret = reply.error;
@@ -419,8 +424,6 @@ static void byt_get_reply(struct snd_sof_dev *sdev)
        }
 
        msg->reply_error = ret;
-
-       spin_unlock_irqrestore(&sdev->ipc_lock, flags);
 }
 
 static void byt_host_done(struct snd_sof_dev *sdev)
index 08a1a3d3c08d6529f634a8308bee026c06fcf244..b2eba7adcad810c9af905472df7115388a276288 100644 (file)
@@ -64,6 +64,8 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
                                        CNL_DSP_REG_HIPCCTL,
                                        CNL_DSP_REG_HIPCCTL_DONE, 0);
 
+               spin_lock_irq(&sdev->ipc_lock);
+
                /* handle immediate reply from DSP core */
                hda_dsp_ipc_get_reply(sdev);
                snd_sof_ipc_reply(sdev, msg);
@@ -75,6 +77,8 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
 
                cnl_ipc_dsp_done(sdev);
 
+               spin_unlock_irq(&sdev->ipc_lock);
+
                ret = IRQ_HANDLED;
        }
 
index 2c3645736e1f76429791faaf3a2c26eec55a4273..07bc123112c9ba657fdae3fd12582f7df7521b30 100644 (file)
@@ -161,21 +161,105 @@ int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-/*
- * While performing reset, controller may not come back properly and causing
- * issues, so recommendation is to set CGCTL.MISCBDCGE to 0 then do reset
- * (init chip) and then again set CGCTL.MISCBDCGE to 1
- */
 int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
 {
        struct hdac_bus *bus = sof_to_bus(sdev);
-       int ret;
+       struct hdac_stream *stream;
+       int sd_offset, ret = 0;
+
+       if (bus->chip_init)
+               return 0;
 
        hda_dsp_ctrl_misc_clock_gating(sdev, false);
-       ret = snd_hdac_bus_init_chip(bus, full_reset);
+
+       if (full_reset) {
+               /* clear WAKESTS */
+               snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+                                       SOF_HDA_WAKESTS_INT_MASK,
+                                       SOF_HDA_WAKESTS_INT_MASK);
+
+               /* reset HDA controller */
+               ret = hda_dsp_ctrl_link_reset(sdev, true);
+               if (ret < 0) {
+                       dev_err(sdev->dev, "error: failed to reset HDA controller\n");
+                       return ret;
+               }
+
+               usleep_range(500, 1000);
+
+               /* exit HDA controller reset */
+               ret = hda_dsp_ctrl_link_reset(sdev, false);
+               if (ret < 0) {
+                       dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+                       return ret;
+               }
+
+               usleep_range(1000, 1200);
+       }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+       /* check to see if controller is ready */
+       if (!snd_hdac_chip_readb(bus, GCTL)) {
+               dev_dbg(bus->dev, "controller not ready!\n");
+               return -EBUSY;
+       }
+
+       /* Accept unsolicited responses */
+       snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+
+       /* detect codecs */
+       if (!bus->codec_mask) {
+               bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+               dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
+       }
+#endif
+
+       /* clear stream status */
+       list_for_each_entry(stream, &bus->stream_list, list) {
+               sd_offset = SOF_STREAM_SD_OFFSET(stream);
+               snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                       sd_offset +
+                                       SOF_HDA_ADSP_REG_CL_SD_STS,
+                                       SOF_HDA_CL_DMA_SD_INT_MASK,
+                                       SOF_HDA_CL_DMA_SD_INT_MASK);
+       }
+
+       /* clear WAKESTS */
+       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
+                               SOF_HDA_WAKESTS_INT_MASK,
+                               SOF_HDA_WAKESTS_INT_MASK);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+       /* clear rirb status */
+       snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+#endif
+
+       /* clear interrupt status register */
+       snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
+                         SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+       /* initialize the codec command I/O */
+       snd_hdac_bus_init_cmd_io(bus);
+#endif
+
+       /* enable CIE and GIE interrupts */
+       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
+                               SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
+                               SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+       /* program the position buffer */
+       if (bus->use_posbuf && bus->posbuf.addr) {
+               snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
+               snd_hdac_chip_writel(bus, DPUBASE,
+                                    upper_32_bits(bus->posbuf.addr));
+       }
+#endif
+
+       bus->chip_init = true;
+
        hda_dsp_ctrl_misc_clock_gating(sdev, true);
 
        return ret;
 }
-#endif
index 73ead7070cdefd4c9364c543f4ee23de4848456f..51b285103394ef31b88831a1cb6ba4ef019ad699 100644 (file)
@@ -72,7 +72,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
        struct snd_sof_ipc_msg *msg = sdev->msg;
        struct sof_ipc_reply reply;
        struct sof_ipc_cmd_hdr *hdr;
-       unsigned long flags;
        int ret = 0;
 
        /*
@@ -84,7 +83,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
                dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
                return;
        }
-       spin_lock_irqsave(&sdev->ipc_lock, flags);
 
        hdr = msg->msg_data;
        if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE)) {
@@ -123,7 +121,6 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
 out:
        msg->reply_error = ret;
 
-       spin_unlock_irqrestore(&sdev->ipc_lock, flags);
 }
 
 static bool hda_dsp_ipc_is_sof(uint32_t msg)
@@ -172,6 +169,18 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
                                        HDA_DSP_REG_HIPCCTL,
                                        HDA_DSP_REG_HIPCCTL_DONE, 0);
 
+               /*
+                * Make sure the interrupt thread cannot be preempted between
+                * waking up the sender and re-enabling the interrupt. Also
+                * protect against a theoretical race with sof_ipc_tx_message():
+                * if the DSP is fast enough to receive an IPC message, reply to
+                * it, and the host interrupt processing calls this function on
+                * a different core from the one, where the sending is taking
+                * place, the message might not yet be marked as expecting a
+                * reply.
+                */
+               spin_lock_irq(&sdev->ipc_lock);
+
                /* handle immediate reply from DSP core - ignore ROM messages */
                if (hda_dsp_ipc_is_sof(msg)) {
                        hda_dsp_ipc_get_reply(sdev);
@@ -187,6 +196,8 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
                /* set the done bit */
                hda_dsp_ipc_dsp_done(sdev);
 
+               spin_unlock_irq(&sdev->ipc_lock);
+
                ret = IRQ_HANDLED;
        }
 
index 7e3980a2f7ba229497568abe7170b75d3d0b39a2..faf1a8ada091fddc7352a9b7ac107ab7a80a8c9d 100644 (file)
@@ -108,17 +108,21 @@ static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
                                  struct sof_ipc_panic_info *panic_info,
                                  u32 *stack, size_t stack_words)
 {
+       u32 offset = sdev->dsp_oops_offset;
+
        /* first read registers */
-       sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset, xoops,
-                      sizeof(*xoops));
+       sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
+
+       /* note: variable AR register array is not read */
 
        /* then get panic info */
-       sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset +
-                      sizeof(*xoops), panic_info, sizeof(*panic_info));
+       offset += xoops->arch_hdr.totalsize;
+       sof_block_read(sdev, sdev->mmio_bar, offset,
+                      panic_info, sizeof(*panic_info));
 
        /* then get the stack */
-       sof_block_read(sdev, sdev->mmio_bar, sdev->dsp_oops_offset +
-                      sizeof(*xoops) + sizeof(*panic_info), stack,
+       offset += sizeof(*panic_info);
+       sof_block_read(sdev, sdev->mmio_bar, offset, stack,
                       stack_words * sizeof(u32));
 }
 
@@ -223,7 +227,9 @@ static int hda_init(struct snd_sof_dev *sdev)
 
        /* initialise hdac bus */
        bus->addr = pci_resource_start(pci, 0);
+#if IS_ENABLED(CONFIG_PCI)
        bus->remap_addr = pci_ioremap_bar(pci, 0);
+#endif
        if (!bus->remap_addr) {
                dev_err(bus->dev, "error: ioremap error\n");
                return -ENXIO;
@@ -264,9 +270,12 @@ static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
        return tplg_filename;
 }
 
+#endif
+
 static int hda_init_caps(struct snd_sof_dev *sdev)
 {
        struct hdac_bus *bus = sof_to_bus(sdev);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        struct hdac_ext_link *hlink;
        struct snd_soc_acpi_mach_params *mach_params;
        struct snd_soc_acpi_mach *hda_mach;
@@ -274,8 +283,9 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
        struct snd_soc_acpi_mach *mach;
        const char *tplg_filename;
        int codec_num = 0;
-       int ret = 0;
        int i;
+#endif
+       int ret = 0;
 
        device_disable_async_suspend(bus->dev);
 
@@ -283,6 +293,14 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
        if (bus->ppcap)
                dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
 
+       ret = hda_dsp_ctrl_init_chip(sdev, true);
+       if (ret < 0) {
+               dev_err(bus->dev, "error: init chip failed with ret: %d\n",
+                       ret);
+               return ret;
+       }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        if (bus->mlcap)
                snd_hdac_ext_bus_get_ml_capabilities(bus);
 
@@ -293,12 +311,6 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
                return ret;
        }
 
-       ret = hda_dsp_ctrl_init_chip(sdev, true);
-       if (ret < 0) {
-               dev_err(bus->dev, "error: init chip failed with ret: %d\n", ret);
-               goto out;
-       }
-
        /* codec detection */
        if (!bus->codec_mask) {
                dev_info(bus->dev, "no hda codecs found!\n");
@@ -339,8 +351,10 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
                                /* use local variable for readability */
                                tplg_filename = pdata->tplg_filename;
                                tplg_filename = fixup_tplg_name(sdev, tplg_filename);
-                               if (!tplg_filename)
-                                       goto out;
+                               if (!tplg_filename) {
+                                       hda_codec_i915_exit(sdev);
+                                       return ret;
+                               }
                                pdata->tplg_filename = tplg_filename;
                        }
                }
@@ -364,35 +378,10 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
         */
        list_for_each_entry(hlink, &bus->hlink_list, list)
                snd_hdac_ext_bus_link_put(bus, hlink);
-
-       return 0;
-
-out:
-       hda_codec_i915_exit(sdev);
-       return ret;
-}
-
-#else
-
-static int hda_init_caps(struct snd_sof_dev *sdev)
-{
-       /*
-        * set CGCTL.MISCBDCGE to 0 during reset and set back to 1
-        * when reset finished.
-        * TODO: maybe no need for init_caps?
-        */
-       hda_dsp_ctrl_misc_clock_gating(sdev, 0);
-
-       /* clear WAKESTS */
-       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
-                               SOF_HDA_WAKESTS_INT_MASK,
-                               SOF_HDA_WAKESTS_INT_MASK);
-
+#endif
        return 0;
 }
 
-#endif
-
 static const struct sof_intel_dsp_desc
        *get_chip_info(struct snd_sof_pdata *pdata)
 {
@@ -409,9 +398,8 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
        struct pci_dev *pci = to_pci_dev(sdev->dev);
        struct sof_intel_hda_dev *hdev;
        struct hdac_bus *bus;
-       struct hdac_stream *stream;
        const struct sof_intel_dsp_desc *chip;
-       int sd_offset, ret = 0;
+       int ret = 0;
 
        /*
         * detect DSP by checking class/subclass/prog-id information
@@ -468,7 +456,9 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
                goto hdac_bus_unmap;
 
        /* DSP base */
+#if IS_ENABLED(CONFIG_PCI)
        sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
+#endif
        if (!sdev->bar[HDA_DSP_BAR]) {
                dev_err(sdev->dev, "error: ioremap error\n");
                ret = -ENXIO;
@@ -558,56 +548,9 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
        if (ret < 0)
                goto free_ipc_irq;
 
-       /* reset HDA controller */
-       ret = hda_dsp_ctrl_link_reset(sdev, true);
-       if (ret < 0) {
-               dev_err(sdev->dev, "error: failed to reset HDA controller\n");
-               goto free_ipc_irq;
-       }
-
-       /* exit HDA controller reset */
-       ret = hda_dsp_ctrl_link_reset(sdev, false);
-       if (ret < 0) {
-               dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
-               goto free_ipc_irq;
-       }
-
-       /* clear stream status */
-       list_for_each_entry(stream, &bus->stream_list, list) {
-               sd_offset = SOF_STREAM_SD_OFFSET(stream);
-               snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
-                                       sd_offset +
-                                       SOF_HDA_ADSP_REG_CL_SD_STS,
-                                       SOF_HDA_CL_DMA_SD_INT_MASK,
-                                       SOF_HDA_CL_DMA_SD_INT_MASK);
-       }
-
-       /* clear WAKESTS */
-       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
-                               SOF_HDA_WAKESTS_INT_MASK,
-                               SOF_HDA_WAKESTS_INT_MASK);
-
-       /* clear interrupt status register */
-       snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
-                         SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
-
-       /* enable CIE and GIE interrupts */
-       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
-                               SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
-                               SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
-
-       /* re-enable CGCTL.MISCBDCGE after reset */
-       hda_dsp_ctrl_misc_clock_gating(sdev, true);
-
-       device_disable_async_suspend(&pci->dev);
-
-       /* enable DSP features */
-       snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
-                               SOF_HDA_PPCTL_GPROCEN, SOF_HDA_PPCTL_GPROCEN);
-
-       /* enable DSP IRQ */
-       snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
-                               SOF_HDA_PPCTL_PIE, SOF_HDA_PPCTL_PIE);
+       /* enable ppcap interrupt */
+       hda_dsp_ctrl_ppcap_enable(sdev, true);
+       hda_dsp_ctrl_ppcap_int_enable(sdev, true);
 
        /* initialize waitq for code loading */
        init_waitqueue_head(&sdev->waitq);
index f0b9d3c53f6f6944d75f4fbafbe13119fb7e688e..2414640a32d18dcece27a88018ad82ebd0c485b6 100644 (file)
@@ -115,7 +115,7 @@ static void ipc_log_header(struct device *dev, u8 *text, u32 cmd)
                }
                break;
        case SOF_IPC_GLB_COMP_MSG:
-               str = "GLB_COMP_MSG: SET_VALUE";
+               str = "GLB_COMP_MSG";
                switch (type) {
                case SOF_IPC_COMP_SET_VALUE:
                        str2 = "SET_VALUE"; break;
@@ -308,19 +308,8 @@ EXPORT_SYMBOL(sof_ipc_tx_message);
 int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
 {
        struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
-       unsigned long flags;
-
-       /*
-        * Protect against a theoretical race with sof_ipc_tx_message(): if the
-        * DSP is fast enough to receive an IPC message, reply to it, and the
-        * host interrupt processing calls this function on a different core
-        * from the one, where the sending is taking place, the message might
-        * not yet be marked as expecting a reply.
-        */
-       spin_lock_irqsave(&sdev->ipc_lock, flags);
 
        if (msg->ipc_complete) {
-               spin_unlock_irqrestore(&sdev->ipc_lock, flags);
                dev_err(sdev->dev, "error: no reply expected, received 0x%x",
                        msg_id);
                return -EINVAL;
@@ -330,8 +319,6 @@ int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
        msg->ipc_complete = true;
        wake_up(&msg->waitq);
 
-       spin_unlock_irqrestore(&sdev->ipc_lock, flags);
-
        return 0;
 }
 EXPORT_SYMBOL(snd_sof_ipc_reply);
@@ -776,16 +763,19 @@ int snd_sof_ipc_valid(struct snd_sof_dev *sdev)
                }
        }
 
-       if (ready->debug.bits.build) {
+       if (ready->flags & SOF_IPC_INFO_BUILD) {
                dev_info(sdev->dev,
                         "Firmware debug build %d on %s-%s - options:\n"
                         " GDB: %s\n"
                         " lock debug: %s\n"
                         " lock vdebug: %s\n",
                         v->build, v->date, v->time,
-                        ready->debug.bits.gdb ? "enabled" : "disabled",
-                        ready->debug.bits.locks ? "enabled" : "disabled",
-                        ready->debug.bits.locks_verbose ? "enabled" : "disabled");
+                        ready->flags & SOF_IPC_INFO_GDB ?
+                               "enabled" : "disabled",
+                        ready->flags & SOF_IPC_INFO_LOCKS ?
+                               "enabled" : "disabled",
+                        ready->flags & SOF_IPC_INFO_LOCKSV ?
+                               "enabled" : "disabled");
        }
 
        /* copy the fw_version into debugfs at first boot */
index 81c7452aae171819484a705c076be0d151a0deac..628fae5524424a6f05c8c54985f75d1866cd99f5 100644 (file)
@@ -372,6 +372,8 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev)
                                 msecs_to_jiffies(sdev->boot_timeout));
        if (ret == 0) {
                dev_err(sdev->dev, "error: firmware boot failure\n");
+               /* after this point FW_READY msg should be ignored */
+               sdev->boot_complete = true;
                snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
                        SOF_DBG_TEXT | SOF_DBG_PCI);
                return -EIO;
index 649968841dad9f59078155b4c2cb3c2bd081c8f1..dace6c4cd91e35666070110804951faaf7e2567c 100644 (file)
@@ -211,8 +211,8 @@ static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
        /* save pcm hw_params */
        memcpy(&spcm->params[substream->stream], params, sizeof(*params));
 
-       INIT_WORK(&spcm->stream[substream->stream].period_elapsed_work,
-                 sof_pcm_period_elapsed_work);
+       /* clear hw_params_upon_resume flag */
+       spcm->hw_params_upon_resume[substream->stream] = 0;
 
        return ret;
 }
@@ -429,8 +429,8 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
        dev_dbg(sdev->dev, "pcm: open stream %d dir %d\n", spcm->pcm.pcm_id,
                substream->stream);
 
-       /* clear hw_params_upon_resume flag */
-       spcm->hw_params_upon_resume[substream->stream] = 0;
+       INIT_WORK(&spcm->stream[substream->stream].period_elapsed_work,
+                 sof_pcm_period_elapsed_work);
 
        caps = &spcm->pcm.caps[substream->stream];
 
index c3ad23a85b99fe112cd0c5f2152c981eb9cf9715..46a4905a9dce1537ecb985ed657a935e7056a230 100644 (file)
@@ -110,7 +110,7 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
                         u32 stack_words)
 {
        struct sof_ipc_dsp_oops_xtensa *xoops = oops;
-       u32 stack_ptr = xoops->stack;
+       u32 stack_ptr = xoops->plat_hdr.stackptr;
        /* 4 * 8chars + 3 ws + 1 terminating NUL */
        unsigned char buf[4 * 8 + 3 + 1];
        int i;
index f2deffe026c54da3e1b2c031c3a13745b0a98bb4..9e1f00e8c32b2e49c247b35eafb53d80baffbaa5 100644 (file)
@@ -1320,6 +1320,15 @@ static int sun4i_codec_spk_event(struct snd_soc_dapm_widget *w,
        gpiod_set_value_cansleep(scodec->gpio_pa,
                                 !!SND_SOC_DAPM_EVENT_ON(event));
 
+       if (SND_SOC_DAPM_EVENT_ON(event)) {
+               /*
+                * Need a delay to wait for DAC to push the data. 700ms seems
+                * to be the best compromise not to feel this delay while
+                * playing a sound.
+                */
+               msleep(700);
+       }
+
        return 0;
 }
 
index c53bfed8d4c2bcf7525ad2945fce0e5f1e89b65e..fd7c37596f21a282df5f43e47cd5f46aeeba3753 100644 (file)
 
 #define SUN8I_I2S_TX_CHAN_MAP_REG      0x44
 #define SUN8I_I2S_TX_CHAN_SEL_REG      0x34
-#define SUN8I_I2S_TX_CHAN_OFFSET_MASK          GENMASK(13, 11)
+#define SUN8I_I2S_TX_CHAN_OFFSET_MASK          GENMASK(13, 12)
 #define SUN8I_I2S_TX_CHAN_OFFSET(offset)       (offset << 12)
 #define SUN8I_I2S_TX_CHAN_EN_MASK              GENMASK(11, 4)
 #define SUN8I_I2S_TX_CHAN_EN(num_chan)         (((1 << num_chan) - 1) << 4)
@@ -456,6 +456,10 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                regmap_update_bits(i2s->regmap, SUN8I_I2S_TX_CHAN_SEL_REG,
                                   SUN8I_I2S_TX_CHAN_OFFSET_MASK,
                                   SUN8I_I2S_TX_CHAN_OFFSET(offset));
+
+               regmap_update_bits(i2s->regmap, SUN8I_I2S_RX_CHAN_SEL_REG,
+                                  SUN8I_I2S_TX_CHAN_OFFSET_MASK,
+                                  SUN8I_I2S_TX_CHAN_OFFSET(offset));
        }
 
        regmap_field_write(i2s->field_fmt_mode, val);
index 7a0e64ccd6ff5d02108a4424fc72a36f49018987..24a8cd229df6897975528d6097bca80d94204eee 100644 (file)
@@ -392,7 +392,7 @@ struct kvm_sync_regs {
 
 struct kvm_vmx_nested_state {
        __u64 vmxon_pa;
-       __u64 vmcs_pa;
+       __u64 vmcs12_pa;
 
        struct {
                __u16 flags;
index ac26876389c2383297185ddc1d3f3f1ac0067d8b..e744b3e4e56aa9030aa3027f1f594036a88bd105 100644 (file)
@@ -29,7 +29,7 @@ CGROUP COMMANDS
 |      *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
 |      *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
 |              **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
-|              **sendmsg4** | **sendmsg6** | **sysctl** }
+|              **sendmsg4** | **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** }
 |      *ATTACH_FLAGS* := { **multi** | **override** }
 
 DESCRIPTION
@@ -86,6 +86,10 @@ DESCRIPTION
                  unconnected udp4 socket (since 4.18);
                  **sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
                  unconnected udp6 socket (since 4.18);
+                 **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
+                  an unconnected udp4 socket (since 5.2);
+                 **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
+                  an unconnected udp6 socket (since 5.2);
                  **sysctl** sysctl access (since 5.2).
 
        **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
index e8118544d118282b63bfc27e515ea53ae9903d96..018ecef8dc139d0f2a84ddc6ec1ae85b6f87ffb2 100644 (file)
@@ -40,7 +40,7 @@ PROG COMMANDS
 |              **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
 |              **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
 |              **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
-|              **cgroup/sysctl**
+|              **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl**
 |      }
 |       *ATTACH_TYPE* := {
 |              **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
index 50e402a5a9c86276f2b37526f43c0ede5c1ba8a6..4300adf6e5ab48d8ea3dd0b0fe19b6b861e40ca2 100644 (file)
@@ -371,6 +371,7 @@ _bpftool()
                                 lirc_mode2 cgroup/bind4 cgroup/bind6 \
                                 cgroup/connect4 cgroup/connect6 \
                                 cgroup/sendmsg4 cgroup/sendmsg6 \
+                                cgroup/recvmsg4 cgroup/recvmsg6 \
                                 cgroup/post_bind4 cgroup/post_bind6 \
                                 cgroup/sysctl" -- \
                                                    "$cur" ) )
@@ -666,7 +667,7 @@ _bpftool()
                 attach|detach)
                     local ATTACH_TYPES='ingress egress sock_create sock_ops \
                         device bind4 bind6 post_bind4 post_bind6 connect4 \
-                        connect6 sendmsg4 sendmsg6 sysctl'
+                        connect6 sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl'
                     local ATTACH_FLAGS='multi override'
                     local PROG_TYPE='id pinned tag'
                     case $prev in
@@ -676,7 +677,7 @@ _bpftool()
                             ;;
                         ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
                         post_bind4|post_bind6|connect4|connect6|sendmsg4|\
-                        sendmsg6|sysctl)
+                        sendmsg6|recvmsg4|recvmsg6|sysctl)
                             COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
                                 "$cur" ) )
                             return 0
index 7e22f115c8c1af352984961a3a6c0dc336d4b864..73ec8ea33fb43af8f4286e2dfd4361eb4672fd07 100644 (file)
@@ -25,7 +25,8 @@
        "       ATTACH_TYPE := { ingress | egress | sock_create |\n"           \
        "                        sock_ops | device | bind4 | bind6 |\n"        \
        "                        post_bind4 | post_bind6 | connect4 |\n"       \
-       "                        connect6 | sendmsg4 | sendmsg6 | sysctl }"
+       "                        connect6 | sendmsg4 | sendmsg6 |\n"           \
+       "                        recvmsg4 | recvmsg6 | sysctl }"
 
 static const char * const attach_type_strings[] = {
        [BPF_CGROUP_INET_INGRESS] = "ingress",
@@ -42,6 +43,8 @@ static const char * const attach_type_strings[] = {
        [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
        [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
        [BPF_CGROUP_SYSCTL] = "sysctl",
+       [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
+       [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
        [__MAX_BPF_ATTACH_TYPE] = NULL,
 };
 
index 3ec82904ccec6aa974353d20f96875267a20ffee..5da5a7311f13050773e82194621c317c08d1c482 100644 (file)
@@ -716,12 +716,14 @@ static int dump_map_elem(int fd, void *key, void *value,
                return 0;
 
        if (json_output) {
+               jsonw_start_object(json_wtr);
                jsonw_name(json_wtr, "key");
                print_hex_data_json(key, map_info->key_size);
                jsonw_name(json_wtr, "value");
                jsonw_start_object(json_wtr);
                jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
                jsonw_end_object(json_wtr);
+               jsonw_end_object(json_wtr);
        } else {
                const char *msg = NULL;
 
index 26336bad0442734d6a56fd94243c6412f9adca4f..7a4e21a315236eb8fe7f18ff3ceed8e9875af849 100644 (file)
@@ -1063,7 +1063,8 @@ static int do_help(int argc, char **argv)
                "                 sk_reuseport | flow_dissector | cgroup/sysctl |\n"
                "                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
                "                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
-               "                 cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
+               "                 cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
+               "                 cgroup/recvmsg6 }\n"
                "       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
                "                        flow_dissector }\n"
                "       " HELP_SPEC_OPTIONS "\n"
index 63e0cf66f01a9698ff6bc41ac5492809bb83d834..a8b823c30b434d8022ebeab666d7072a09473be5 100644 (file)
@@ -192,6 +192,8 @@ enum bpf_attach_type {
        BPF_LIRC_MODE2,
        BPF_FLOW_DISSECTOR,
        BPF_CGROUP_SYSCTL,
+       BPF_CGROUP_UDP4_RECVMSG,
+       BPF_CGROUP_UDP6_RECVMSG,
        __MAX_BPF_ATTACH_TYPE
 };
 
@@ -3376,8 +3378,8 @@ struct bpf_raw_tracepoint_args {
 /* DIRECT:  Skip the FIB rules and go to FIB table associated with device
  * OUTPUT:  Do lookup from egress perspective; default is ingress
  */
-#define BPF_FIB_LOOKUP_DIRECT  BIT(0)
-#define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
+#define BPF_FIB_LOOKUP_DIRECT  (1U << 0)
+#define BPF_FIB_LOOKUP_OUTPUT  (1U << 1)
 
 enum {
        BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
index 197b574406b3ac6599bb81e395ff88d9c0374661..151f7ac1882e52f09db4c2dce66686cc63d97e0a 100644 (file)
@@ -1645,14 +1645,16 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
                /* FUNC x */                                    /* [3] */
                BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
        };
-       int res;
+       int btf_fd;
 
-       res = libbpf__probe_raw_btf((char *)types, sizeof(types),
-                                   strs, sizeof(strs));
-       if (res < 0)
-               return res;
-       if (res > 0)
+       btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+                                     strs, sizeof(strs));
+       if (btf_fd >= 0) {
                obj->caps.btf_func = 1;
+               close(btf_fd);
+               return 1;
+       }
+
        return 0;
 }
 
@@ -1670,14 +1672,16 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
                BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
                BTF_VAR_SECINFO_ENC(2, 0, 4),
        };
-       int res;
+       int btf_fd;
 
-       res = libbpf__probe_raw_btf((char *)types, sizeof(types),
-                                   strs, sizeof(strs));
-       if (res < 0)
-               return res;
-       if (res > 0)
+       btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+                                     strs, sizeof(strs));
+       if (btf_fd >= 0) {
                obj->caps.btf_datasec = 1;
+               close(btf_fd);
+               return 1;
+       }
+
        return 0;
 }
 
@@ -3206,6 +3210,10 @@ static const struct {
                                                BPF_CGROUP_UDP4_SENDMSG),
        BPF_EAPROG_SEC("cgroup/sendmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
                                                BPF_CGROUP_UDP6_SENDMSG),
+       BPF_EAPROG_SEC("cgroup/recvmsg4",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+                                               BPF_CGROUP_UDP4_RECVMSG),
+       BPF_EAPROG_SEC("cgroup/recvmsg6",       BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+                                               BPF_CGROUP_UDP6_RECVMSG),
        BPF_EAPROG_SEC("cgroup/sysctl",         BPF_PROG_TYPE_CGROUP_SYSCTL,
                                                BPF_CGROUP_SYSCTL),
 };
index f3025b4d90e19806d028485aca99f35796cc8f13..dfab8012185c49c302fd528006ba28320e208729 100644 (file)
@@ -34,7 +34,7 @@ do {                          \
 #define pr_info(fmt, ...)      __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
 #define pr_debug(fmt, ...)     __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
 
-int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
-                         const char *str_sec, size_t str_len);
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+                        const char *str_sec, size_t str_len);
 
 #endif /* __LIBBPF_LIBBPF_INTERNAL_H */
index 5e2aa83f637a4726dd9b6c0b8628f1cd83bc225d..6635a31a7a164e7d03cdd697b599774d9c360969 100644 (file)
@@ -133,8 +133,8 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
        return errno != EINVAL && errno != EOPNOTSUPP;
 }
 
-int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
-                         const char *str_sec, size_t str_len)
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+                        const char *str_sec, size_t str_len)
 {
        struct btf_header hdr = {
                .magic = BTF_MAGIC,
@@ -157,14 +157,9 @@ int libbpf__probe_raw_btf(const char *raw_types, size_t types_len,
        memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
 
        btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
-       if (btf_fd < 0) {
-               free(raw_btf);
-               return 0;
-       }
 
-       close(btf_fd);
        free(raw_btf);
-       return 1;
+       return btf_fd;
 }
 
 static int load_sk_storage_btf(void)
@@ -190,7 +185,7 @@ static int load_sk_storage_btf(void)
                BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
        };
 
-       return libbpf__probe_raw_btf((char *)types, sizeof(types),
+       return libbpf__load_raw_btf((char *)types, sizeof(types),
                                     strs, sizeof(strs));
 }
 
index 280015c22598dfb4b2a536a549605e8a837aafa6..076df22e4bda1118966f748b0da8df59bffa36a3 100644 (file)
@@ -100,7 +100,9 @@ static void nfit_test_kill(void *_pgmap)
 {
        struct dev_pagemap *pgmap = _pgmap;
 
+       WARN_ON(!pgmap || !pgmap->ref || !pgmap->kill || !pgmap->cleanup);
        pgmap->kill(pgmap->ref);
+       pgmap->cleanup(pgmap->ref);
 }
 
 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
index 66f2dca1dee19cf73596757d8b39eab7b3651687..e36356e2377e9624b23002a8ab6e9786b2a7c518 100644 (file)
@@ -21,8 +21,8 @@ LDLIBS += -lcap -lelf -lrt -lpthread
 # Order correspond to 'make run_tests' order
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
        test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
-       test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
-       test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
+       test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
+       test_cgroup_storage test_select_reuseport test_section_names \
        test_netcnt test_tcpnotify_user test_sock_fields test_sysctl
 
 BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
@@ -63,7 +63,8 @@ TEST_PROGS_EXTENDED := with_addr.sh \
 
 # Compile but not part of 'make run_tests'
 TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
-       flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user
+       flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
+       test_lirc_mode2_user
 
 include ../lib.mk
 
index fbd1d88a6095325a5273f21b86ec88eae91d8994..c938283ac232a8bd1879af0a7e6e8c5de3b3169d 100644 (file)
@@ -3,6 +3,7 @@
 #include <error.h>
 #include <linux/if.h>
 #include <linux/if_tun.h>
+#include <sys/uio.h>
 
 #define CHECK_FLOW_KEYS(desc, got, expected)                           \
        CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0,           \
index 02d7c871862af26080e823a6936a54bbd182e454..006be39639778f9311fd849849a41ebe4e62f5c0 100644 (file)
@@ -573,13 +573,13 @@ static void test_lpm_get_next_key(void)
 
        /* add one more element (total two) */
        key_p->prefixlen = 24;
-       inet_pton(AF_INET, "192.168.0.0", key_p->data);
+       inet_pton(AF_INET, "192.168.128.0", key_p->data);
        assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
 
        memset(key_p, 0, key_size);
        assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
        assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
-              key_p->data[1] == 168 && key_p->data[2] == 0);
+              key_p->data[1] == 168 && key_p->data[2] == 128);
 
        memset(next_key_p, 0, key_size);
        assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
@@ -592,7 +592,7 @@ static void test_lpm_get_next_key(void)
 
        /* Add one more element (total three) */
        key_p->prefixlen = 24;
-       inet_pton(AF_INET, "192.168.128.0", key_p->data);
+       inet_pton(AF_INET, "192.168.0.0", key_p->data);
        assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
 
        memset(key_p, 0, key_size);
@@ -643,6 +643,41 @@ static void test_lpm_get_next_key(void)
        assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
               errno == ENOENT);
 
+       /* Add one more element (total five) */
+       key_p->prefixlen = 28;
+       inet_pton(AF_INET, "192.168.1.128", key_p->data);
+       assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+       memset(key_p, 0, key_size);
+       assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+       assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+              key_p->data[1] == 168 && key_p->data[2] == 0);
+
+       memset(next_key_p, 0, key_size);
+       assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+       assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 &&
+              next_key_p->data[1] == 168 && next_key_p->data[2] == 1 &&
+              next_key_p->data[3] == 128);
+
+       memcpy(key_p, next_key_p, key_size);
+       assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+       assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+              next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
+
+       memcpy(key_p, next_key_p, key_size);
+       assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+       assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+              next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+       memcpy(key_p, next_key_p, key_size);
+       assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+       assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+              next_key_p->data[1] == 168);
+
+       memcpy(key_p, next_key_p, key_size);
+       assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+              errno == ENOENT);
+
        /* no exact matching key should return the first one in post order */
        key_p->prefixlen = 22;
        inet_pton(AF_INET, "192.168.1.0", key_p->data);
index bebd4fbca1f43e7102ec9dd66c0828d3aebfc60a..dee2f2eceb0fa1f8fda230888f1c3d065a3a3b82 100644 (file)
@@ -119,6 +119,16 @@ static struct sec_name_test tests[] = {
                {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
                {0, BPF_CGROUP_UDP6_SENDMSG},
        },
+       {
+               "cgroup/recvmsg4",
+               {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
+               {0, BPF_CGROUP_UDP4_RECVMSG},
+       },
+       {
+               "cgroup/recvmsg6",
+               {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
+               {0, BPF_CGROUP_UDP6_RECVMSG},
+       },
        {
                "cgroup/sysctl",
                {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
index 3f110eaaf29cea214844ff98211697c764b8a870..4ecde2392327dfbcea82554821a4042ba9e9702e 100644 (file)
@@ -76,6 +76,7 @@ struct sock_addr_test {
        enum {
                LOAD_REJECT,
                ATTACH_REJECT,
+               ATTACH_OKAY,
                SYSCALL_EPERM,
                SYSCALL_ENOTSUPP,
                SUCCESS,
@@ -88,9 +89,13 @@ static int connect4_prog_load(const struct sock_addr_test *test);
 static int connect6_prog_load(const struct sock_addr_test *test);
 static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
 static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
+static int recvmsg_allow_prog_load(const struct sock_addr_test *test);
+static int recvmsg_deny_prog_load(const struct sock_addr_test *test);
 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
+static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
 static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
+static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
 static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
@@ -507,6 +512,92 @@ static struct sock_addr_test tests[] = {
                SRC6_REWRITE_IP,
                SYSCALL_EPERM,
        },
+
+       /* recvmsg */
+       {
+               "recvmsg4: return code ok",
+               recvmsg_allow_prog_load,
+               BPF_CGROUP_UDP4_RECVMSG,
+               BPF_CGROUP_UDP4_RECVMSG,
+               AF_INET,
+               SOCK_DGRAM,
+               NULL,
+               0,
+               NULL,
+               0,
+               NULL,
+               ATTACH_OKAY,
+       },
+       {
+               "recvmsg4: return code !ok",
+               recvmsg_deny_prog_load,
+               BPF_CGROUP_UDP4_RECVMSG,
+               BPF_CGROUP_UDP4_RECVMSG,
+               AF_INET,
+               SOCK_DGRAM,
+               NULL,
+               0,
+               NULL,
+               0,
+               NULL,
+               LOAD_REJECT,
+       },
+       {
+               "recvmsg6: return code ok",
+               recvmsg_allow_prog_load,
+               BPF_CGROUP_UDP6_RECVMSG,
+               BPF_CGROUP_UDP6_RECVMSG,
+               AF_INET6,
+               SOCK_DGRAM,
+               NULL,
+               0,
+               NULL,
+               0,
+               NULL,
+               ATTACH_OKAY,
+       },
+       {
+               "recvmsg6: return code !ok",
+               recvmsg_deny_prog_load,
+               BPF_CGROUP_UDP6_RECVMSG,
+               BPF_CGROUP_UDP6_RECVMSG,
+               AF_INET6,
+               SOCK_DGRAM,
+               NULL,
+               0,
+               NULL,
+               0,
+               NULL,
+               LOAD_REJECT,
+       },
+       {
+               "recvmsg4: rewrite IP & port (asm)",
+               recvmsg4_rw_asm_prog_load,
+               BPF_CGROUP_UDP4_RECVMSG,
+               BPF_CGROUP_UDP4_RECVMSG,
+               AF_INET,
+               SOCK_DGRAM,
+               SERV4_REWRITE_IP,
+               SERV4_REWRITE_PORT,
+               SERV4_REWRITE_IP,
+               SERV4_REWRITE_PORT,
+               SERV4_IP,
+               SUCCESS,
+       },
+       {
+               "recvmsg6: rewrite IP & port (asm)",
+               recvmsg6_rw_asm_prog_load,
+               BPF_CGROUP_UDP6_RECVMSG,
+               BPF_CGROUP_UDP6_RECVMSG,
+               AF_INET6,
+               SOCK_DGRAM,
+               SERV6_REWRITE_IP,
+               SERV6_REWRITE_PORT,
+               SERV6_REWRITE_IP,
+               SERV6_REWRITE_PORT,
+               SERV6_IP,
+               SUCCESS,
+       },
 };
 
 static int mk_sockaddr(int domain, const char *ip, unsigned short port,
@@ -765,8 +856,8 @@ static int connect6_prog_load(const struct sock_addr_test *test)
        return load_path(test, CONNECT6_PROG_PATH);
 }
 
-static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
-                                     int32_t rc)
+static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
+                                  int32_t rc)
 {
        struct bpf_insn insns[] = {
                /* return rc */
@@ -778,12 +869,22 @@ static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
 
 static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
 {
-       return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+       return xmsg_ret_only_prog_load(test, /*rc*/ 1);
 }
 
 static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
 {
-       return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+       return xmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
+static int recvmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+       return xmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int recvmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+       return xmsg_ret_only_prog_load(test, /*rc*/ 0);
 }
 
 static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
@@ -838,6 +939,47 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
        return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
 }
 
+static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
+{
+       struct sockaddr_in src4_rw_addr;
+
+       if (mk_sockaddr(AF_INET, SERV4_IP, SERV4_PORT,
+                       (struct sockaddr *)&src4_rw_addr,
+                       sizeof(src4_rw_addr)) == -1)
+               return -1;
+
+       struct bpf_insn insns[] = {
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+               /* if (sk.family == AF_INET && */
+               BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, family)),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 6),
+
+               /*     sk.type == SOCK_DGRAM)  { */
+               BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, type)),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 4),
+
+               /*      user_ip4 = src4_rw_addr.sin_addr */
+               BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_addr.s_addr),
+               BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
+                           offsetof(struct bpf_sock_addr, user_ip4)),
+
+               /*      user_port = src4_rw_addr.sin_port */
+               BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_port),
+               BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
+                           offsetof(struct bpf_sock_addr, user_port)),
+               /* } */
+
+               /* return 1 */
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       };
+
+       return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+}
+
 static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test)
 {
        return load_path(test, SENDMSG4_PROG_PATH);
@@ -901,6 +1043,39 @@ static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
        return sendmsg6_rw_dst_asm_prog_load(test, SERV6_REWRITE_IP);
 }
 
+static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
+{
+       struct sockaddr_in6 src6_rw_addr;
+
+       if (mk_sockaddr(AF_INET6, SERV6_IP, SERV6_PORT,
+                       (struct sockaddr *)&src6_rw_addr,
+                       sizeof(src6_rw_addr)) == -1)
+               return -1;
+
+       struct bpf_insn insns[] = {
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+               /* if (sk.family == AF_INET6) { */
+               BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, family)),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 10),
+
+               STORE_IPV6(user_ip6, src6_rw_addr.sin6_addr.s6_addr32),
+
+               /*      user_port = dst6_rw_addr.sin6_port */
+               BPF_MOV32_IMM(BPF_REG_7, src6_rw_addr.sin6_port),
+               BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
+                           offsetof(struct bpf_sock_addr, user_port)),
+               /* } */
+
+               /* return 1 */
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       };
+
+       return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+}
+
 static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
 {
        return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
@@ -1282,13 +1457,13 @@ static int run_connect_test_case(const struct sock_addr_test *test)
        return err;
 }
 
-static int run_sendmsg_test_case(const struct sock_addr_test *test)
+static int run_xmsg_test_case(const struct sock_addr_test *test, int max_cmsg)
 {
        socklen_t addr_len = sizeof(struct sockaddr_storage);
-       struct sockaddr_storage expected_src_addr;
-       struct sockaddr_storage requested_addr;
        struct sockaddr_storage expected_addr;
-       struct sockaddr_storage real_src_addr;
+       struct sockaddr_storage server_addr;
+       struct sockaddr_storage sendmsg_addr;
+       struct sockaddr_storage recvmsg_addr;
        int clientfd = -1;
        int servfd = -1;
        int set_cmsg;
@@ -1297,20 +1472,19 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
        if (test->type != SOCK_DGRAM)
                goto err;
 
-       if (init_addrs(test, &requested_addr, &expected_addr,
-                      &expected_src_addr))
+       if (init_addrs(test, &sendmsg_addr, &server_addr, &expected_addr))
                goto err;
 
        /* Prepare server to sendmsg to */
-       servfd = start_server(test->type, &expected_addr, addr_len);
+       servfd = start_server(test->type, &server_addr, addr_len);
        if (servfd == -1)
                goto err;
 
-       for (set_cmsg = 0; set_cmsg <= 1; ++set_cmsg) {
+       for (set_cmsg = 0; set_cmsg <= max_cmsg; ++set_cmsg) {
                if (clientfd >= 0)
                        close(clientfd);
 
-               clientfd = sendmsg_to_server(test->type, &requested_addr,
+               clientfd = sendmsg_to_server(test->type, &sendmsg_addr,
                                             addr_len, set_cmsg, /*flags*/0,
                                             &err);
                if (err)
@@ -1330,10 +1504,10 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
                 * specific packet may differ from the one used by default and
                 * returned by getsockname(2).
                 */
-               if (recvmsg_from_client(servfd, &real_src_addr) == -1)
+               if (recvmsg_from_client(servfd, &recvmsg_addr) == -1)
                        goto err;
 
-               if (cmp_addr(&real_src_addr, &expected_src_addr, /*cmp_port*/0))
+               if (cmp_addr(&recvmsg_addr, &expected_addr, /*cmp_port*/0))
                        goto err;
        }
 
@@ -1366,6 +1540,9 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
                goto out;
        } else if (test->expected_result == ATTACH_REJECT || err) {
                goto err;
+       } else if (test->expected_result == ATTACH_OKAY) {
+               err = 0;
+               goto out;
        }
 
        switch (test->attach_type) {
@@ -1379,7 +1556,11 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
                break;
        case BPF_CGROUP_UDP4_SENDMSG:
        case BPF_CGROUP_UDP6_SENDMSG:
-               err = run_sendmsg_test_case(test);
+               err = run_xmsg_test_case(test, 1);
+               break;
+       case BPF_CGROUP_UDP4_RECVMSG:
+       case BPF_CGROUP_UDP6_RECVMSG:
+               err = run_xmsg_test_case(test, 0);
                break;
        default:
                goto err;
index bd3f38dbe79646bd4a0bb557739eb60fda924322..acab4f00819f331a6bd58e24d373e88a97b6dce5 100644 (file)
        "DIV64 overflow, check 1",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_1, -1),
-       BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
-       BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+       BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+       BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1),
+       BPF_MOV32_IMM(BPF_REG_0, 1),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 {
        "DIV64 overflow, check 2",
        .insns = {
-       BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
-       BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
+       BPF_LD_IMM64(BPF_REG_1, LLONG_MIN),
+       BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1),
+       BPF_MOV32_IMM(BPF_REG_0, 1),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c
new file mode 100644 (file)
index 0000000..4c4133c
--- /dev/null
@@ -0,0 +1,533 @@
+/* This file contains sub-register zero extension checks for insns defining
+ * sub-registers, meaning:
+ *   - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
+ *     forms (BPF_END) could define sub-registers.
+ *   - Narrow direct loads, BPF_B/H/W | BPF_LDX.
+ *   - BPF_LD is not exposed to JIT back-ends, so no need for testing.
+ *
+ * "get_prandom_u32" is used to initialize low 32-bit of some registers to
+ * prevent potential optimizations done by verifier or JIT back-ends which could
+ * optimize register back into constant when range info shows one register is a
+ * constant.
+ */
+{
+       "add32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+       BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "add32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       /* An insn could have no effect on the low 32-bit, for example:
+        *   a = a + 0
+        *   a = a | 0
+        *   a = a & -1
+        * But, they should still zero high 32-bit.
+        */
+       BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "sub32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
+       BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "sub32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "mul32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+       BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "mul32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "div32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_MOV64_IMM(BPF_REG_0, -1),
+       BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "div32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "or32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+       BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "or32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "and32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
+       BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "and32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "lsh32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_MOV64_IMM(BPF_REG_1, 1),
+       BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "lsh32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "rsh32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_MOV64_IMM(BPF_REG_1, 1),
+       BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "rsh32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "neg32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "mod32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_MOV64_IMM(BPF_REG_0, -1),
+       BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "mod32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "xor32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+       BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "xor32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "mov32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
+       BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+       BPF_MOV32_REG(BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "mov32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_MOV32_IMM(BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "arsh32 reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_MOV64_IMM(BPF_REG_1, 1),
+       BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "arsh32 imm zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "end16 (to_le) reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "end32 (to_le) reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "end16 (to_be) reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "end32 (to_be) reg zero extend check",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+       BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "ldx_b zero extend check",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+       BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "ldx_h zero extend check",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+       BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
+{
+       "ldx_w zero extend check",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+       BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+       BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
index 1c30f302a1e75e42999ac97159673122cd026839..5c39e5f6a48005135b5385bf9862778365c6f1c3 100755 (executable)
@@ -28,6 +28,7 @@ ALL_TESTS="
        vlan_interface_uppers_test
        bridge_extern_learn_test
        neigh_offload_test
+       nexthop_offload_test
        devlink_reload_test
 "
 NUM_NETIFS=2
@@ -607,6 +608,52 @@ neigh_offload_test()
        ip -4 address del 192.0.2.1/24 dev $swp1
 }
 
+nexthop_offload_test()
+{
+       # Test that IPv4 and IPv6 nexthops are marked as offloaded
+       RET=0
+
+       sysctl_set net.ipv6.conf.$swp2.keep_addr_on_down 1
+       simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
+       simple_if_init $swp2 192.0.2.2/24 2001:db8:1::2/64
+       setup_wait
+
+       ip -4 route add 198.51.100.0/24 vrf v$swp1 \
+               nexthop via 192.0.2.2 dev $swp1
+       ip -6 route add 2001:db8:2::/64 vrf v$swp1 \
+               nexthop via 2001:db8:1::2 dev $swp1
+
+       ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+       check_err $? "ipv4 nexthop not marked as offloaded when should"
+       ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+       check_err $? "ipv6 nexthop not marked as offloaded when should"
+
+       ip link set dev $swp2 down
+       sleep 1
+
+       ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+       check_fail $? "ipv4 nexthop marked as offloaded when should not"
+       ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+       check_fail $? "ipv6 nexthop marked as offloaded when should not"
+
+       ip link set dev $swp2 up
+       setup_wait
+
+       ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+       check_err $? "ipv4 nexthop not marked as offloaded after neigh add"
+       ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+       check_err $? "ipv6 nexthop not marked as offloaded after neigh add"
+
+       log_test "nexthop offload indication"
+
+       ip -6 route del 2001:db8:2::/64 vrf v$swp1
+       ip -4 route del 198.51.100.0/24 vrf v$swp1
+
+       simple_if_fini $swp2 192.0.2.2/24 2001:db8:1::2/64
+       simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
+       sysctl_restore net.ipv6.conf.$swp2.keep_addr_on_down
+}
+
 devlink_reload_test()
 {
        # Test that after executing all the above configuration tests, a
index df1bf9230a7406c56368dc446898780444a7c5b2..41266af0d3dc8b37268ec0a2500ec88391ae82c1 100644 (file)
@@ -2,6 +2,7 @@
 /x86_64/evmcs_test
 /x86_64/hyperv_cpuid
 /x86_64/kvm_create_max_vcpus
+/x86_64/mmio_warning_test
 /x86_64/platform_info_test
 /x86_64/set_sregs_test
 /x86_64/smm_test
index 41280dc0629745dac02ccee22166ad0a75cc451e..62afd0b43074fd92d5d083219225741f52a70c48 100644 (file)
@@ -11,23 +11,24 @@ LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebi
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
 LIBKVM_aarch64 = lib/aarch64/processor.c
 
-TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
-TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
-TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
-TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
-TEST_GEN_PROGS_x86_64 += x86_64/state_test
+TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
-TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
+TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
+TEST_GEN_PROGS_x86_64 += x86_64/state_test
+TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
-TEST_GEN_PROGS_x86_64 += dirty_log_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
+TEST_GEN_PROGS_x86_64 += dirty_log_test
 
-TEST_GEN_PROGS_aarch64 += dirty_log_test
 TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
+TEST_GEN_PROGS_aarch64 += dirty_log_test
 
 TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
 LIBKVM += $(LIBKVM_$(UNAME_M))
index a5a4b28f14d8e92b070e4bd5e5b53bbba4a2aa96..bd8eb5579028ae10909ead4987ae5db2bd45bd60 100644 (file)
@@ -139,6 +139,8 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
                                 void *guest_code);
 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
 
+bool vm_is_unrestricted_guest(struct kvm_vm *vm);
+
 struct kvm_userspace_memory_region *
 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
                                 uint64_t end);
index 6063d5b2f3561c450778f86f3d1474390d79b5ec..af4d26de32d1a7eb5846f8032a35bf9fb2a683a2 100644 (file)
@@ -303,6 +303,8 @@ static inline unsigned long get_xmm(int n)
        return 0;
 }
 
+bool is_intel_cpu(void);
+
 struct kvm_x86_state;
 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
index 633b22df46a4673bd199998680ac3ed6aa024f00..267f2353e4abb2ac7dbb8bb51c5bb5b2464ebfa8 100644 (file)
@@ -1583,3 +1583,39 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
 {
        return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
 }
+
+/*
+ * Is Unrestricted Guest
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *
+ * Output Args: None
+ *
+ * Return: True if the unrestricted guest is set to 'Y', otherwise return false.
+ *
+ * Check if the unrestricted guest flag is enabled.
+ */
+bool vm_is_unrestricted_guest(struct kvm_vm *vm)
+{
+       char val = 'N';
+       size_t count;
+       FILE *f;
+
+       if (vm == NULL) {
+               /* Ensure that the KVM vendor-specific module is loaded. */
+               f = fopen(KVM_DEV_PATH, "r");
+               TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
+                           errno);
+               fclose(f);
+       }
+
+       f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
+       if (f) {
+               count = fread(&val, sizeof(char), 1, f);
+               TEST_ASSERT(count == 1, "Unable to read from param file.");
+               fclose(f);
+       }
+
+       return val == 'Y';
+}
index 21f3040d90cb1f0fc3cdd4ea545d9355e29ba9eb..2fe78bdf3beeaae4b5af5384fb333e9ae316392f 100644 (file)
@@ -1137,3 +1137,19 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
                        r);
        }
 }
+
+bool is_intel_cpu(void)
+{
+       int eax, ebx, ecx, edx;
+       const uint32_t *chunk;
+       const int leaf = 0;
+
+       __asm__ __volatile__(
+               "cpuid"
+               : /* output */ "=a"(eax), "=b"(ebx),
+                 "=c"(ecx), "=d"(edx)
+               : /* input */ "0"(leaf), "2"(0));
+
+       chunk = (const uint32_t *)("GenuineIntel");
+       return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
new file mode 100644 (file)
index 0000000..00bb97d
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * mmio_warning_test
+ *
+ * Copyright (C) 2019, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Test that we don't get a kernel warning when we call KVM_RUN after a
+ * triple fault occurs.  To get the triple fault to occur we call KVM_RUN
+ * on a VCPU that hasn't been properly setup.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <kvm_util.h>
+#include <linux/kvm.h>
+#include <processor.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <test_util.h>
+#include <unistd.h>
+
+#define NTHREAD 4
+#define NPROCESS 5
+
+struct thread_context {
+       int kvmcpu;
+       struct kvm_run *run;
+};
+
+void *thr(void *arg)
+{
+       struct thread_context *tc = (struct thread_context *)arg;
+       int res;
+       int kvmcpu = tc->kvmcpu;
+       struct kvm_run *run = tc->run;
+
+       res = ioctl(kvmcpu, KVM_RUN, 0);
+       printf("ret1=%d exit_reason=%d suberror=%d\n",
+               res, run->exit_reason, run->internal.suberror);
+
+       return 0;
+}
+
+void test(void)
+{
+       int i, kvm, kvmvm, kvmcpu;
+       pthread_t th[NTHREAD];
+       struct kvm_run *run;
+       struct thread_context tc;
+
+       kvm = open("/dev/kvm", O_RDWR);
+       TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
+       kvmvm = ioctl(kvm, KVM_CREATE_VM, 0);
+       TEST_ASSERT(kvmvm != -1, "KVM_CREATE_VM failed");
+       kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
+       TEST_ASSERT(kvmcpu != -1, "KVM_CREATE_VCPU failed");
+       run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
+                                   kvmcpu, 0);
+       tc.kvmcpu = kvmcpu;
+       tc.run = run;
+       srand(getpid());
+       for (i = 0; i < NTHREAD; i++) {
+               pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
+               usleep(rand() % 10000);
+       }
+       for (i = 0; i < NTHREAD; i++)
+               pthread_join(th[i], NULL);
+}
+
+int get_warnings_count(void)
+{
+       int warnings;
+       FILE *f;
+
+       f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
+       fscanf(f, "%d", &warnings);
+       fclose(f);
+
+       return warnings;
+}
+
+int main(void)
+{
+       int warnings_before, warnings_after;
+
+       if (!is_intel_cpu()) {
+               printf("Must be run on an Intel CPU, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+
+       if (vm_is_unrestricted_guest(NULL)) {
+               printf("Unrestricted guest must be disabled, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+
+       warnings_before = get_warnings_count();
+
+       for (int i = 0; i < NPROCESS; ++i) {
+               int status;
+               int pid = fork();
+
+               if (pid < 0)
+                       exit(1);
+               if (pid == 0) {
+                       test();
+                       exit(0);
+               }
+               while (waitpid(pid, &status, __WALL) != pid)
+                       ;
+       }
+
+       warnings_after = get_warnings_count();
+       TEST_ASSERT(warnings_before == warnings_after,
+                  "Warnings found in kernel.  Run 'dmesg' to inspect them.");
+
+       return 0;
+}
index 9d62e2c7e024d2ea8d60dac2d184d954b7a607c8..e64ca20b315a9312fd1775154eb39ff6f4393e87 100644 (file)
@@ -75,7 +75,7 @@ void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
                                u32 vmcs12_revision)
 {
        /* Set revision_id in vmcs12 to vmcs12_revision. */
-       memcpy(state->data, &vmcs12_revision, sizeof(u32));
+       memcpy(&state->data, &vmcs12_revision, sizeof(u32));
 }
 
 void set_default_state(struct kvm_nested_state *state)
@@ -95,9 +95,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
                        KVM_STATE_NESTED_EVMCS;
        state->format = 0;
        state->size = size;
-       state->vmx.vmxon_pa = 0x1000;
-       state->vmx.vmcs_pa = 0x2000;
-       state->vmx.smm.flags = 0;
+       state->hdr.vmx.vmxon_pa = 0x1000;
+       state->hdr.vmx.vmcs12_pa = 0x2000;
+       state->hdr.vmx.smm.flags = 0;
        set_revision_id_for_vmcs12(state, VMCS12_REVISION);
 }
 
@@ -123,39 +123,47 @@ void test_vmx_nested_state(struct kvm_vm *vm)
        /*
         * We cannot virtualize anything if the guest does not have VMX
         * enabled.  We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
-        * is set to -1ull.
+        * is set to -1ull, but the flags must be zero.
         */
        set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = -1ull;
+       state->hdr.vmx.vmxon_pa = -1ull;
+       test_nested_state_expect_einval(vm, state);
+
+       state->hdr.vmx.vmcs12_pa = -1ull;
+       state->flags = KVM_STATE_NESTED_EVMCS;
+       test_nested_state_expect_einval(vm, state);
+
+       state->flags = 0;
        test_nested_state(vm, state);
 
        /* Enable VMX in the guest CPUID. */
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 
-       /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
+       /*
+        * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
+        * setting the nested state but flags other than eVMCS must be clear.
+        */
        set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = -1ull;
-       state->vmx.smm.flags = 1;
+       state->hdr.vmx.vmxon_pa = -1ull;
+       state->hdr.vmx.vmcs12_pa = -1ull;
        test_nested_state_expect_einval(vm, state);
 
-       /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
-       set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = -1ull;
-       state->vmx.vmcs_pa = 0;
+       state->flags = KVM_STATE_NESTED_EVMCS;
+       test_nested_state(vm, state);
+
+       /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
+       state->hdr.vmx.smm.flags = 1;
        test_nested_state_expect_einval(vm, state);
 
-       /*
-        * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
-        * setting the nested state.
-        */
+       /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
        set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = -1ull;
-       state->vmx.vmcs_pa = -1ull;
-       test_nested_state(vm, state);
+       state->hdr.vmx.vmxon_pa = -1ull;
+       state->flags = 0;
+       test_nested_state_expect_einval(vm, state);
 
        /* It is invalid to have vmxon_pa set to a non-page aligned address. */
        set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = 1;
+       state->hdr.vmx.vmxon_pa = 1;
        test_nested_state_expect_einval(vm, state);
 
        /*
@@ -165,7 +173,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
        set_default_vmx_state(state, state_sz);
        state->flags = KVM_STATE_NESTED_GUEST_MODE  |
                      KVM_STATE_NESTED_RUN_PENDING;
-       state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
+       state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
        test_nested_state_expect_einval(vm, state);
 
        /*
@@ -174,14 +182,14 @@ void test_vmx_nested_state(struct kvm_vm *vm)
         *      KVM_STATE_NESTED_SMM_VMXON
         */
        set_default_vmx_state(state, state_sz);
-       state->vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
+       state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
                                KVM_STATE_NESTED_SMM_VMXON);
        test_nested_state_expect_einval(vm, state);
 
        /* Outside SMM, SMM flags must be zero. */
        set_default_vmx_state(state, state_sz);
        state->flags = 0;
-       state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
+       state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
        test_nested_state_expect_einval(vm, state);
 
        /* Size must be large enough to fit kvm_nested_state and vmcs12. */
@@ -191,8 +199,8 @@ void test_vmx_nested_state(struct kvm_vm *vm)
 
        /* vmxon_pa cannot be the same address as vmcs_pa. */
        set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = 0;
-       state->vmx.vmcs_pa = 0;
+       state->hdr.vmx.vmxon_pa = 0;
+       state->hdr.vmx.vmcs12_pa = 0;
        test_nested_state_expect_einval(vm, state);
 
        /* The revision id for vmcs12 must be VMCS12_REVISION. */
@@ -205,16 +213,16 @@ void test_vmx_nested_state(struct kvm_vm *vm)
         * it again.
         */
        set_default_vmx_state(state, state_sz);
-       state->vmx.vmxon_pa = -1ull;
-       state->vmx.vmcs_pa = -1ull;
+       state->hdr.vmx.vmxon_pa = -1ull;
+       state->hdr.vmx.vmcs12_pa = -1ull;
        state->flags = 0;
        test_nested_state(vm, state);
        vcpu_nested_state_get(vm, VCPU_ID, state);
        TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
                    "Size must be between %d and %d.  The size returned was %d.",
                    sizeof(*state), state_sz, state->size);
-       TEST_ASSERT(state->vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
-       TEST_ASSERT(state->vmx.vmcs_pa == -1ull, "vmcs_pa must be -1ull.");
+       TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
+       TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
 
        free(state);
 }
index 29bcfa84aec710702cc82ceb1e64c6ee098a8803..124803eea4a9d7395489f1a9eec36524009f0ecd 100755 (executable)
@@ -2,7 +2,8 @@
 # SPDX-License-Identifier: GPL-2.0
 
 ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
-       match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test"
+       match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
+       match_ip_tos_test"
 NUM_NETIFS=2
 source tc_common.sh
 source lib.sh
@@ -276,6 +277,39 @@ match_vlan_test()
        log_test "VLAN match ($tcflags)"
 }
 
+match_ip_tos_test()
+{
+       RET=0
+
+       tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+               $tcflags dst_ip 192.0.2.2 ip_tos 0x20 action drop
+       tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+               $tcflags dst_ip 192.0.2.2 ip_tos 0x18 action drop
+
+       $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+               -t ip tos=18 -q
+
+       tc_check_packets "dev $h2 ingress" 101 1
+       check_fail $? "Matched on a wrong filter (0x18)"
+
+       tc_check_packets "dev $h2 ingress" 102 1
+       check_err $? "Did not match on correct filter (0x18)"
+
+       $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+               -t ip tos=20 -q
+
+       tc_check_packets "dev $h2 ingress" 102 2
+       check_fail $? "Matched on a wrong filter (0x20)"
+
+       tc_check_packets "dev $h2 ingress" 101 1
+       check_err $? "Did not match on correct filter (0x20)"
+
+       tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+       tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+       log_test "ip_tos match ($tcflags)"
+}
+
 setup_prepare()
 {
        h1=${NETIFS[p1]}
index 05ddb6293b7981e83e4fab9c6508c4f422485714..1be486d5d7cb495c81a6e8168421cc699c774a2d 100644 (file)
@@ -309,14 +309,15 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
        }
 }
 
+/* Only called for a fully emulated timer */
 static void timer_emulate(struct arch_timer_context *ctx)
 {
        bool should_fire = kvm_timer_should_fire(ctx);
 
        trace_kvm_timer_emulate(ctx, should_fire);
 
-       if (should_fire) {
-               kvm_timer_update_irq(ctx->vcpu, true, ctx);
+       if (should_fire != ctx->irq.level) {
+               kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
                return;
        }
 
index 44ceaccb18cff19655c2668fcc2f5a2f482af34a..8c9fe831bce4501ff68bd9ea0e99ecbd8a943bb6 100644 (file)
@@ -1734,6 +1734,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
 
        mutex_unlock(&its->its_lock);
        kfree(its);
+       kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
 }
 
 static int vgic_its_has_attr_regs(struct kvm_device *dev,