]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branches 'iommu/fixes', 'arm/smmu', 'x86/amd', 'x86/vt-d' and 'core' into next
authorJoerg Roedel <jroedel@suse.de>
Fri, 24 Jan 2020 14:39:39 +0000 (15:39 +0100)
committerJoerg Roedel <jroedel@suse.de>
Fri, 24 Jan 2020 14:39:39 +0000 (15:39 +0100)
778 files changed:
.mailmap
Documentation/ABI/stable/sysfs-driver-mlxreg-io
Documentation/admin-guide/devices.txt
Documentation/dev-tools/kcov.rst
Documentation/dev-tools/kunit/start.rst
Documentation/devicetree/bindings/i2c/i2c-at91.txt
Documentation/devicetree/bindings/iommu/iommu.txt
Documentation/devicetree/bindings/spi/spi-controller.yaml
Documentation/features/debug/gcov-profile-all/arch-support.txt
Documentation/media/v4l-drivers/meye.rst
Documentation/networking/dsa/sja1105.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/netdev-FAQ.rst
Documentation/process/embargoed-hardware-issues.rst
Documentation/process/index.rst
Documentation/riscv/index.rst
Documentation/riscv/patch-acceptance.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/hugepage.h
arch/arc/kernel/asm-offsets.c
arch/arc/plat-eznps/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am572x-idk-common.dtsi
arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
arch/arm/boot/dts/aspeed-g6.dtsi
arch/arm/boot/dts/imx6dl-icore-mipi.dts
arch/arm/boot/dts/imx6q-dhcom-pdk2.dts
arch/arm/boot/dts/imx6q-dhcom-som.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/imx6sll-evk.dts
arch/arm/boot/dts/imx6sx-sdb-reva.dts
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/imx7s-colibri.dtsi
arch/arm/boot/dts/imx7ulp.dtsi
arch/arm/boot/dts/meson8.dtsi
arch/arm/boot/dts/mmp3.dtsi
arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
arch/arm/kernel/process.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-mmp/time.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/pdata-quirks.c
arch/arm64/Kconfig
arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
arch/arm64/boot/dts/rockchip/rk3328-a1.dts
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/uapi/asm/unistd.h
arch/arm64/kernel/process.c
arch/arm64/mm/fault.c
arch/arm64/mm/mmu.c
arch/hexagon/include/asm/atomic.h
arch/hexagon/include/asm/bitops.h
arch/hexagon/include/asm/cmpxchg.h
arch/hexagon/include/asm/futex.h
arch/hexagon/include/asm/io.h
arch/hexagon/include/asm/spinlock.h
arch/hexagon/kernel/stacktrace.c
arch/hexagon/kernel/vm_entry.S
arch/ia64/mm/init.c
arch/mips/Kconfig
arch/mips/boot/compressed/Makefile
arch/mips/include/asm/cpu-type.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/vdso/gettimeofday.h
arch/mips/kernel/cacheinfo.c
arch/mips/net/ebpf_jit.c
arch/mips/vdso/vgettimeofday.c
arch/nds32/include/asm/cacheflush.h
arch/nds32/include/asm/pgtable.h
arch/parisc/Kconfig
arch/parisc/kernel/drivers.c
arch/parisc/kernel/process.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/spinlock.h
arch/powerpc/mm/mem.c
arch/powerpc/mm/slice.c
arch/riscv/Kconfig
arch/riscv/boot/dts/sifive/fu540-c000.dtsi
arch/riscv/include/asm/asm-prototypes.h
arch/riscv/include/asm/csr.h
arch/riscv/kernel/entry.S
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/head.S
arch/riscv/kernel/irq.c
arch/riscv/kernel/process.c
arch/riscv/kernel/riscv_ksyms.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/lib/tishift.S
arch/riscv/lib/uaccess.S
arch/riscv/mm/cacheflush.c
arch/riscv/mm/init.c
arch/s390/kernel/setup.c
arch/s390/mm/init.c
arch/sh/mm/init.c
arch/um/Kconfig
arch/um/include/asm/ptrace-generic.h
arch/um/kernel/process.c
arch/x86/boot/compressed/head_64.S
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mce/therm_throt.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/um/tls_32.c
arch/x86/um/tls_64.c
arch/xtensa/Kconfig
arch/xtensa/kernel/process.c
block/bio.c
block/blk-merge.c
block/blk-settings.c
block/compat_ioctl.c
drivers/acpi/arm64/iort.c
drivers/ata/ahci_brcm.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/sata_fsl.c
drivers/ata/sata_mv.c
drivers/ata/sata_nv.c
drivers/atm/eni.c
drivers/base/firmware_loader/builtin/Makefile
drivers/block/null_blk_zoned.c
drivers/block/pktcdvd.c
drivers/bus/ti-sysc.c
drivers/char/agp/isoch.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm-dev.h
drivers/char/tpm/tpm_tis_core.c
drivers/clk/clk.c
drivers/clk/mmp/clk-of-mmp2.c
drivers/clk/qcom/gcc-sdm845.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
drivers/clk/sunxi-ng/ccu-sun8i-r.c
drivers/clk/sunxi-ng/ccu-sun8i-r40.c
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
drivers/clk/tegra/clk.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clocksource/timer-riscv.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpuidle/governors/teo.c
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_crypto.c
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/devfreq/Kconfig
drivers/dma/dma-jz4780.c
drivers/dma/ioat/dma.c
drivers/dma/k3dma.c
drivers/dma/virt-dma.c
drivers/edac/sifive_edac.c
drivers/firmware/broadcom/tee_bnxt_fw.c
drivers/firmware/efi/earlycon.c
drivers/firmware/efi/libstub/random.c
drivers/gpio/Kconfig
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-thunderx.c
drivers/gpio/gpio-xgs-iproc.c
drivers/gpio/gpio-xtensa.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/display/intel_frontbuffer.h
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/selftests/i915_random.h
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/rockchip/cdn-dp-core.h
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tcon.h
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-ite.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/uhid.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_wac.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/i2c/busses/i2c-at91-core.c
drivers/i2c/busses/i2c-bcm2835.c
drivers/i2c/busses/i2c-iop3xx.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core-base.c
drivers/iio/adc/ad7124.c
drivers/iio/chemical/Kconfig
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
drivers/iio/industrialio-buffer.c
drivers/iio/light/vcnl4000.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/hfi1/iowait.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/imx_sc_key.c
drivers/input/misc/uinput.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu-impl.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/arm-smmu.h
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.c
drivers/iommu/intel-pasid.h
drivers/iommu/intel-svm.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.c
drivers/iommu/iommu-sysfs.c
drivers/iommu/iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/of_iommu.c
drivers/iommu/qcom_iommu.c
drivers/irqchip/irq-ingenic.c
drivers/irqchip/irq-sifive-plic.c
drivers/md/dm-snap-persistent.c
drivers/md/raid0.c
drivers/media/cec/cec-adap.c
drivers/media/usb/pulse8-cec/pulse8-cec.c
drivers/message/fusion/mptctl.c
drivers/misc/enclosure.c
drivers/misc/lkdtm/bugs.c
drivers/mtd/nand/onenand/omap2.c
drivers/mtd/nand/onenand/onenand_base.c
drivers/mtd/nand/onenand/samsung_mtd.c
drivers/mtd/nand/raw/cadence-nand-controller.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/stm32_fmc2_nand.c
drivers/mtd/sm_ftl.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/can/m_can/tcan4x5x.c
drivers/net/can/mscan/mscan.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/dsa/sja1105/sja1105_static_config.c
drivers/net/dsa/sja1105/sja1105_tas.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sched.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/crdump.c
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/gtp.c
drivers/net/hyperv/rndis_filter.c
drivers/net/macvlan.c
drivers/net/netdevsim/dev.c
drivers/net/phy/Kconfig
drivers/net/phy/aquantia_main.c
drivers/net/phy/dp83867.c
drivers/net/phy/phylink.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vxlan.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wan/lapbether.c
drivers/net/wan/sdla.c
drivers/nfc/pn533/usb.c
drivers/nvme/host/core.c
drivers/nvme/target/admin-cmd.c
drivers/pci/ats.c
drivers/pci/pci.c
drivers/phy/motorola/phy-cpcap-usb.c
drivers/phy/motorola/phy-mapphone-mdm6600.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
drivers/pinctrl/cirrus/Kconfig
drivers/pinctrl/meson/pinctrl-meson.c
drivers/platform/chrome/wilco_ec/keyboard_leds.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/mips/Kconfig
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/gpd-pocket-fan.c
drivers/platform/x86/intel_ips.h
drivers/platform/x86/intel_pmc_core.h
drivers/platform/x86/intel_pmc_core_pltdrv.c
drivers/powercap/intel_rapl_common.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/regulator/axp20x-regulator.c
drivers/regulator/bd70528-regulator.c
drivers/rtc/rtc-mc146818-lib.c
drivers/rtc/rtc-mt6397.c
drivers/rtc/rtc-sun6i.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_ccamisc.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2c.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/soc/amlogic/meson-ee-pwrc.c
drivers/soc/sifive/sifive_l2_cache.c
drivers/soc/ti/wkup_m3_ipc.c
drivers/spi/spi-dw.c
drivers/spi/spi-dw.h
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-uniphier.c
drivers/spi/spi.c
drivers/staging/comedi/drivers/adv_pci1710.c
drivers/staging/comedi/drivers/ni_routes.c
drivers/staging/media/ipu3/include/intel-ipu3.h
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/vt6656/baseband.c
drivers/staging/vt6656/card.c
drivers/staging/vt6656/device.h
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/usbpipe.c
drivers/staging/vt6656/usbpipe.h
drivers/staging/vt6656/wcmd.c
drivers/target/target_core_iblock.c
drivers/tee/optee/shm_pool.c
drivers/thermal/qcom/tsens.c
drivers/tty/serdev/core.c
drivers/tty/tty_port.c
drivers/usb/cdns3/gadget.c
drivers/usb/chipidea/host.c
drivers/usb/core/config.c
drivers/usb/core/hub.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/host/ohci-da8xx.c
drivers/usb/musb/jz4740.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musbhsdma.c
drivers/usb/serial/ch341.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/quatech2.c
drivers/usb/serial/usb-serial-simple.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/usb-wwan.h
drivers/usb/serial/usb_wwan.c
drivers/usb/typec/tcpm/tcpci.c
drivers/usb/typec/ucsi/ucsi.h
drivers/watchdog/Kconfig
drivers/watchdog/imx7ulp_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/rn5t618_wdt.c
drivers/watchdog/w83627hf_wdt.c
fs/afs/dir.c
fs/btrfs/compression.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/volumes.c
fs/buffer.c
fs/char_dev.c
fs/cifs/cifsglob.h
fs/cifs/readdir.c
fs/cifs/smb2file.c
fs/direct-io.c
fs/file.c
fs/fuse/file.c
fs/hugetlbfs/inode.c
fs/internal.h
fs/io-wq.c
fs/io_uring.c
fs/locks.c
fs/mpage.c
fs/namei.c
fs/namespace.c
fs/nfs/nfstrace.h
fs/nsfs.c
fs/ocfs2/dlmglue.c
fs/ocfs2/journal.c
fs/posix_acl.c
fs/pstore/ram.c
fs/pstore/ram_core.c
include/asm-generic/cacheflush.h
include/drm/drm_dp_mst_helper.h
include/dt-bindings/reset/amlogic,meson8b-reset.h
include/linux/ahci_platform.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/bvec.h
include/linux/can/dev.h
include/linux/dmaengine.h
include/linux/if_ether.h
include/linux/intel-iommu.h
include/linux/io-pgtable.h
include/linux/iommu.h
include/linux/kernel.h
include/linux/libata.h
include/linux/memory_hotplug.h
include/linux/mfd/mt6397/rtc.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/mtd/flashchip.h
include/linux/namei.h
include/linux/of_mdio.h
include/linux/pci-ats.h
include/linux/posix-clock.h
include/linux/sched.h
include/linux/skmsg.h
include/linux/spi/spi.h
include/linux/sxgbe_platform.h
include/linux/syscalls.h
include/linux/tnum.h
include/net/cfg80211.h
include/net/devlink.h
include/net/dst.h
include/net/dst_ops.h
include/net/netfilter/nf_flow_table.h
include/net/sch_generic.h
include/net/tcp.h
include/soc/sifive/sifive_l2_cache.h [moved from arch/riscv/include/asm/sifive_l2_cache.h with 72% similarity]
include/trace/events/afs.h
include/trace/events/huge_memory.h
include/trace/events/intel_iommu.h
include/trace/events/preemptirq.h
include/uapi/linux/input.h
include/uapi/linux/kcov.h
init/main.c
kernel/bpf/cgroup.c
kernel/bpf/tnum.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/cred.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/locking/lockdep.c
kernel/locking/rwsem.c
kernel/ptrace.c
kernel/rseq.c
kernel/seccomp.c
kernel/taskstats.c
kernel/time/posix-clock.c
kernel/time/posix-stubs.c
kernel/time/tick-sched.c
kernel/trace/fgraph.c
kernel/trace/ftrace.c
kernel/trace/trace_events_inject.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_seq.c
kernel/trace/trace_stack.c
lib/vdso/gettimeofday.c
mm/gup_benchmark.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/memremap.c
mm/migrate.c
mm/mmap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/shmem.c
mm/slab.c
mm/slab_common.c
mm/slub.c
mm/sparse.c
mm/vmalloc.c
mm/zsmalloc.c
net/8021q/vlan.h
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/batman-adv/distributed-arp-table.c
net/bridge/br_nf_core.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/devlink.c
net/core/filter.c
net/core/skmsg.c
net/core/sock_map.c
net/decnet/dn_route.c
net/dsa/tag_gswip.c
net/dsa/tag_qca.c
net/hsr/hsr_debugfs.c
net/hsr/hsr_device.c
net/hsr/hsr_framereg.c
net/hsr/hsr_framereg.h
net/hsr/hsr_main.c
net/hsr/hsr_main.h
net/hsr/hsr_netlink.c
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/route.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_ulp.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/xfrm6_policy.c
net/mac80211/cfg.c
net/mac80211/mesh_hwmp.c
net/mac80211/tkip.c
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_nat_proto.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_tunnel.c
net/qrtr/qrtr.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/conn_event.c
net/rxrpc/conn_service.c
net/rxrpc/input.c
net/rxrpc/rxkad.c
net/rxrpc/security.c
net/sched/act_ctinfo.c
net/sched/act_ife.c
net/sched/act_mirred.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_cake.c
net/sched/sch_fq.c
net/sched/sch_prio.c
net/sctp/sm_sideeffect.c
net/sctp/stream.c
net/sctp/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/tipc/Makefile
net/tipc/netlink_compat.c
net/tipc/socket.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/vmw_vsock/hyperv_transport.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-core.c
net/x25/af_x25.c
samples/seccomp/user-trap.c
scripts/gcc-plugins/Kconfig
scripts/package/mkdebian
security/apparmor/apparmorfs.c
security/apparmor/domain.c
security/apparmor/file.c
security/apparmor/mount.c
security/apparmor/policy.c
security/tomoyo/common.c
security/tomoyo/domain.c
security/tomoyo/group.c
security/tomoyo/realpath.c
security/tomoyo/util.c
sound/core/seq/seq_timer.c
sound/firewire/dice/dice-extension.c
sound/firewire/tascam/amdtp-tascam.c
sound/hda/hdac_regmap.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1724.c
sound/soc/codecs/cros_ec_codec.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/msm8916-wcd-analog.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rt5640.c
sound/soc/fsl/fsl_audmix.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/intel/boards/cml_rt1011_rt5682.c
sound/soc/soc-component.c
sound/soc/soc-core.c
sound/soc/soc-topology.c
sound/soc/sof/imx/imx8.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/intel/hda-loader.c
sound/soc/sof/ipc.c
sound/soc/sti/uniperif_player.c
sound/soc/stm/stm32_adfsdm.c
sound/soc/stm/stm32_sai_sub.c
sound/soc/stm/stm32_spdifrx.c
sound/usb/card.h
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h
tools/bpf/bpftool/btf_dumper.c
tools/lib/bpf/Makefile
tools/lib/traceevent/parse-filter.c
tools/perf/builtin-report.c
tools/perf/util/hist.h
tools/perf/util/symbol-elf.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/selftests/bpf/.gitignore
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
tools/testing/selftests/filesystems/epoll/Makefile
tools/testing/selftests/firmware/fw_lib.sh
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/livepatch/test-state.sh
tools/testing/selftests/net/forwarding/loopback.sh
tools/testing/selftests/netfilter/nft_flowtable.sh
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/settings [new file with mode: 0644]
tools/testing/selftests/seccomp/seccomp_bpf.c
usr/gen_initramfs_list.sh

index a7bc8cabd157b89adbc2d8f6c4d6e3c88b14e3cd..d9d5c80252f9cf2d2f140d97fb86a87c1ecf4b22 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,7 @@ Jacob Shin <Jacob.Shin@amd.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
+Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
 James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
index 8ca498447aeb9f4bfbc8b6e71cdf2086893ea5f3..05601a90a9b6f2251c81c881256221b9bf1f42cb 100644 (file)
@@ -29,13 +29,13 @@ Description:        This file shows the system fans direction:
 
                The files are read only.
 
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version
 
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
 Description:   These files show with which CPLD versions have been burned
-               on LED board.
+               on LED or Gearbox board.
 
                The files are read only.
 
@@ -121,6 +121,15 @@ Description:       These files show the system reset cause, as following: ComEx
 
                The files are read only.
 
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version
+Date:          November 2018
+KernelVersion: 5.0
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   These files show with which CPLD versions have been burned
+               on LED board.
+
+               The files are read only.
+
 Date:          June 2019
 KernelVersion: 5.3
 Contact:       Vadim Pasternak <vadimpmellanox.com>
index 1c5d2281efc9744df8427f34b99cb6b3883877c4..2a97aaec8b122cd7322744a43413d742a246afa2 100644 (file)
                182 = /dev/perfctr      Performance-monitoring counters
                183 = /dev/hwrng        Generic random number generator
                184 = /dev/cpu/microcode CPU microcode update interface
-               186 = /dev/atomicps     Atomic shapshot of process state data
+               186 = /dev/atomicps     Atomic snapshot of process state data
                187 = /dev/irnet        IrNET device
                188 = /dev/smbusbios    SMBus BIOS
                189 = /dev/ussp_ctl     User space serial port control
index 36890b026e7777b206b8da3ca107f9ad81cb89b2..1c4e1825d769590a98aa9371546b8d716bd67ad5 100644 (file)
@@ -251,11 +251,11 @@ selectively from different subsystems.
 .. code-block:: c
 
     struct kcov_remote_arg {
-       unsigned        trace_mode;
-       unsigned        area_size;
-       unsigned        num_handles;
-       uint64_t        common_handle;
-       uint64_t        handles[0];
+       __u32           trace_mode;
+       __u32           area_size;
+       __u32           num_handles;
+       __aligned_u64   common_handle;
+       __aligned_u64   handles[0];
     };
 
     #define KCOV_INIT_TRACE                    _IOR('c', 1, unsigned long)
index 9d6db892c41c04d711289eb696216d667da12319..4e1d24db6b139f60f0576fae9e30f1b7ad7e5c6a 100644 (file)
@@ -24,19 +24,16 @@ The wrapper can be run with:
 For more information on this wrapper (also called kunit_tool) checkout the
 :doc:`kunit-tool` page.
 
-Creating a kunitconfig
-======================
+Creating a .kunitconfig
+=======================
 The Python script is a thin wrapper around Kbuild. As such, it needs to be
-configured with a ``kunitconfig`` file. This file essentially contains the
+configured with a ``.kunitconfig`` file. This file essentially contains the
 regular Kernel config, with the specific test targets as well.
 
 .. code-block:: bash
 
-       git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
        cd $PATH_TO_LINUX_REPO
-       ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
-
-You may want to add kunitconfig to your local gitignore.
+       cp arch/um/configs/kunit_defconfig .kunitconfig
 
 Verifying KUnit Works
 ---------------------
@@ -151,7 +148,7 @@ and the following to ``drivers/misc/Makefile``:
 
        obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
 
-Now add it to your ``kunitconfig``:
+Now add it to your ``.kunitconfig``:
 
 .. code-block:: none
 
index 2210f4359c4573a1d74af7dad7dbdc2ed6741d59..8347b1e7c08071023a149cbc5e75e83cded92a8d 100644 (file)
@@ -18,8 +18,10 @@ Optional properties:
 - dma-names: should contain "tx" and "rx".
 - atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
   capable I2C controllers.
-- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c"
-  and "atmel,sama5d2-i2c".
+- i2c-sda-hold-time-ns: TWD hold time, only available for:
+       "atmel,sama5d4-i2c",
+       "atmel,sama5d2-i2c",
+       "microchip,sam9x60-i2c".
 - Child nodes conforming to i2c bus binding
 
 Examples :
index 5a8b4624defcb981fc630b2b88ba88957efe6868..3c36334e4f94214ded1ae110fa6cee242e9ed4b2 100644 (file)
@@ -86,6 +86,12 @@ have a means to turn off translation. But it is invalid in such cases to
 disable the IOMMU's device tree node in the first place because it would
 prevent any driver from properly setting up the translations.
 
+Optional properties:
+--------------------
+- pasid-num-bits: Some masters support multiple address spaces for DMA, by
+  tagging DMA transactions with an address space identifier. By default,
+  this is 0, which means that the device only has one address space.
+
 
 Notes:
 ======
index 732339275848c12a52c438656f97e3e74a44b9ff..1e0ca6ccf64bbd0a2e257d8dbcda2d829fcb742f 100644 (file)
@@ -111,7 +111,7 @@ patternProperties:
       spi-rx-bus-width:
         allOf:
           - $ref: /schemas/types.yaml#/definitions/uint32
-          - enum: [ 1, 2, 4 ]
+          - enum: [ 1, 2, 4, 8 ]
           - default: 1
         description:
           Bus width to the SPI bus used for MISO.
@@ -123,7 +123,7 @@ patternProperties:
       spi-tx-bus-width:
         allOf:
           - $ref: /schemas/types.yaml#/definitions/uint32
-          - enum: [ 1, 2, 4 ]
+          - enum: [ 1, 2, 4, 8 ]
           - default: 1
         description:
           Bus width to the SPI bus used for MOSI.
index 059d58a549c7a6e71da1eae8408f5b6892e69c02..6fb2b0671994efd5d526762e58ef6f2b242a1276 100644 (file)
@@ -23,7 +23,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: |  ok  |
     |          sh: |  ok  |
     |       sparc: | TODO |
index a572996cdbf60ab602e3d4ddaaf4658b746eff6a..dc57a6a91b43f121d3258f7ce7924f7cf4fc676a 100644 (file)
@@ -95,7 +95,7 @@ so all video4linux tools (like xawtv) should work with this driver.
 
 Besides the video4linux interface, the driver has a private interface
 for accessing the Motion Eye extended parameters (camera sharpness,
-agc, video framerate), the shapshot and the MJPEG capture facilities.
+agc, video framerate), the snapshot and the MJPEG capture facilities.
 
 This interface consists of several ioctls (prototypes and structures
 can be found in include/linux/meye.h):
index eef20d0bcf7c15183b7db65ba847dd47f2c45b60..64553d8d91cb9a6e65f1ac1d73b4fbe554d4abde 100644 (file)
@@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules
 against this restriction and errors out when appropriate. Schedule analysis is
 needed to avoid this, which is outside the scope of the document.
 
-At the moment, the time-aware scheduler can only be triggered based on a
-standalone clock and not based on PTP time. This means the base-time argument
-from tc-taprio is ignored and the schedule starts right away. It also means it
-is more difficult to phase-align the scheduler with the other devices in the
-network.
-
 Device Tree bindings and board design
 =====================================
 
index fd26788e8c96c66716babef9683733af4deefb1a..48ccb1b31160287f5402b93a3105ff6f6aff0257 100644 (file)
@@ -603,7 +603,7 @@ tcp_synack_retries - INTEGER
        with the current initial RTO of 1second. With this the final timeout
        for a passive TCP connection will happen after 63seconds.
 
-tcp_syncookies - BOOLEAN
+tcp_syncookies - INTEGER
        Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
        Send out syncookies when the syn backlog queue of a socket
        overflows. This is to prevent against the common 'SYN flood attack'
index 642fa963be3cf8f325c29947072e6c6851213d4b..d5c9320901c3273b99bcc1b4cda78c5472d9f6fd 100644 (file)
@@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the
 mainline tree from Linus, and ``net-next`` is where the new code goes
 for the future release.  You can find the trees here:
 
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 
 Q: How often do changes from these trees make it to the mainline Linus tree?
 ----------------------------------------------------------------------------
index 799580acc8dec621b04aabec3211f4e2a78727cc..5d54946cfc750a9a86a6238cc2939dfe8ba3ca6c 100644 (file)
@@ -255,7 +255,7 @@ an involved disclosed party. The current ambassadors list:
   Red Hat      Josh Poimboeuf <jpoimboe@redhat.com>
   SUSE         Jiri Kosina <jkosina@suse.cz>
 
-  Amazon
+  Amazon       Peter Bowen <pzb@amzn.com>
   Google       Kees Cook <keescook@chromium.org>
   ============= ========================================================
 
index 21aa7d5358e621ab991ff9116278ce35c64b41ad..6399d92f0b21d6db95a6c4893dde1f0d243108c8 100644 (file)
@@ -60,6 +60,7 @@ lack of a better place.
    volatile-considered-harmful
    botching-up-ioctls
    clang-format
+   ../riscv/patch-acceptance
 
 .. only::  subproject and html
 
index 215fd3c1f2d57f5397b469ee6c42b17be7025732..fa33bffd8992771153162189cc05753d844097a8 100644 (file)
@@ -7,6 +7,7 @@ RISC-V architecture
 
     boot-image-header
     pmu
+    patch-acceptance
 
 .. only::  subproject and html
 
diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst
new file mode 100644 (file)
index 0000000..dfe0ac5
--- /dev/null
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+arch/riscv maintenance guidelines for developers
+================================================
+
+Overview
+--------
+The RISC-V instruction set architecture is developed in the open:
+in-progress drafts are available for all to review and to experiment
+with implementations.  New module or extension drafts can change
+during the development process - sometimes in ways that are
+incompatible with previous drafts.  This flexibility can present a
+challenge for RISC-V Linux maintenance.  Linux maintainers disapprove
+of churn, and the Linux development process prefers well-reviewed and
+tested code over experimental code.  We wish to extend these same
+principles to the RISC-V-related code that will be accepted for
+inclusion in the kernel.
+
+Submit Checklist Addendum
+-------------------------
+We'll only accept patches for new modules or extensions if the
+specifications for those modules or extensions are listed as being
+"Frozen" or "Ratified" by the RISC-V Foundation.  (Developers may, of
+course, maintain their own Linux kernel trees that contain code for
+any draft extensions that they wish.)
+
+Additionally, the RISC-V specification allows implementors to create
+their own custom extensions.  These custom extensions aren't required
+to go through any review or ratification process by the RISC-V
+Foundation.  To avoid the maintenance complexity and potential
+performance impact of adding kernel code for implementor-specific
+RISC-V extensions, we'll only to accept patches for extensions that
+have been officially frozen or ratified by the RISC-V Foundation.
+(Implementors, may, of course, maintain their own Linux kernel trees
+containing code for any custom extensions that they wish.)
index ffa3371bc750c673bbb6546ce13a5afe0e3301b7..cf6ccca6e61cfbb3e0594522d582792233dce769 100644 (file)
@@ -720,7 +720,7 @@ F:  Documentation/devicetree/bindings/i2c/i2c-altera.txt
 F:     drivers/i2c/busses/i2c-altera.c
 
 ALTERA MAILBOX DRIVER
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/mailbox/mailbox-altera.c
@@ -771,6 +771,8 @@ F:  drivers/thermal/thermal_mmio.c
 
 AMAZON ETHERNET DRIVERS
 M:     Netanel Belgazal <netanel@amazon.com>
+M:     Arthur Kiyanovski <akiyano@amazon.com>
+R:     Guy Tzalik <gtzalik@amazon.com>
 R:     Saeed Bishara <saeedb@amazon.com>
 R:     Zorik Machulsky <zorik@amazon.com>
 L:     netdev@vger.kernel.org
@@ -1405,7 +1407,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
 
 ARM/ACTIONS SEMI ARCHITECTURE
 M:     Andreas Färber <afaerber@suse.de>
-R:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 N:     owl
@@ -3148,7 +3150,7 @@ S:        Maintained
 F:     arch/mips/net/
 
 BPF JIT for NFP NICs
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Supported
@@ -7034,6 +7036,7 @@ L:        linux-acpi@vger.kernel.org
 S:     Maintained
 F:     Documentation/firmware-guide/acpi/gpio-properties.rst
 F:     drivers/gpio/gpiolib-acpi.c
+F:     drivers/gpio/gpiolib-acpi.h
 
 GPIO IR Transmitter
 M:     Sean Young <sean@mess.org>
@@ -11428,7 +11431,7 @@ F:      include/uapi/linux/netrom.h
 F:     net/netrom/
 
 NETRONOME ETHERNET DRIVERS
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     oss-drivers@netronome.com
 S:     Maintained
 F:     drivers/net/ethernet/netronome/
@@ -11457,8 +11460,8 @@ M:      "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 S:     Odd Fixes
 F:     Documentation/devicetree/bindings/net/
 F:     drivers/net/
@@ -11499,8 +11502,8 @@ M:      "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 B:     mailto:netdev@vger.kernel.org
 S:     Maintained
 F:     net/
@@ -11545,7 +11548,7 @@ M:      "David S. Miller" <davem@davemloft.net>
 M:     Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 M:     Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 L:     netdev@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 S:     Maintained
 F:     net/ipv4/
 F:     net/ipv6/
@@ -11588,7 +11591,7 @@ M:      Boris Pismenny <borisp@mellanox.com>
 M:     Aviad Yehezkel <aviadye@mellanox.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     net/tls/*
@@ -11600,7 +11603,7 @@ L:      linux-wireless@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-wireless/list/
 
 NETDEVSIM
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 S:     Maintained
 F:     drivers/net/netdevsim/*
 
@@ -11677,7 +11680,7 @@ F:      Documentation/scsi/NinjaSCSI.txt
 F:     drivers/scsi/nsp32*
 
 NIOS2 ARCHITECTURE
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
 S:     Maintained
@@ -12561,7 +12564,7 @@ F:      Documentation/devicetree/bindings/pci/aardvark-pci.txt
 F:     drivers/pci/controller/pci-aardvark.c
 
 PCI DRIVER FOR ALTERA PCIE IP
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     rfi@lists.rocketboards.org (moderated for non-subscribers)
 L:     linux-pci@vger.kernel.org
 S:     Supported
@@ -12740,7 +12743,7 @@ S:      Supported
 F:     Documentation/PCI/pci-error-recovery.rst
 
 PCI MSI DRIVER FOR ALTERA MSI IP
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     rfi@lists.rocketboards.org (moderated for non-subscribers)
 L:     linux-pci@vger.kernel.org
 S:     Supported
@@ -13676,7 +13679,6 @@ F:      drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM ETHQOS ETHERNET DRIVER
 M:     Vinod Koul <vkoul@kernel.org>
-M:     Niklas Cassel <niklas.cassel@linaro.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -14118,6 +14120,7 @@ M:      Paul Walmsley <paul.walmsley@sifive.com>
 M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Albert Ou <aou@eecs.berkeley.edu>
 L:     linux-riscv@lists.infradead.org
+P:     Documentation/riscv/patch-acceptance.rst
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
 S:     Supported
 F:     arch/riscv/
@@ -14545,8 +14548,6 @@ F:      include/linux/platform_data/spi-s3c64xx.h
 
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
-M:     Girish K S <ks.giri@samsung.com>
-M:     Vipul Pandya <vipul.pandya@samsung.com>
 S:     Supported
 L:     netdev@vger.kernel.org
 F:     drivers/net/ethernet/samsung/sxgbe/
@@ -18041,7 +18042,7 @@ XDP (eXpress Data Path)
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     David S. Miller <davem@davemloft.net>
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 M:     Jesper Dangaard Brouer <hawk@kernel.org>
 M:     John Fastabend <john.fastabend@gmail.com>
 L:     netdev@vger.kernel.org
index caf14acf1953947ea123cb0055f9a131d5979790..c50ef91f61363588e3750453baa95ed1a8a1731e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 41b16f21beecac28099ed7ee154db6947b6b0d14..0b8b63d0bec153285e1e4888a07a4fe9db8f40bf 100644 (file)
 #endif
 
 #ifdef CONFIG_ARC_HAS_ACCL_REGS
-       ST2     r58, r59, PT_sp + 12
+       ST2     r58, r59, PT_r58
 #endif
 
 .endm
 
        LD2     gp, fp, PT_r26          ; gp (r26), fp (r27)
 
-       ld      r12, [sp, PT_sp + 4]
-       ld      r30, [sp, PT_sp + 8]
+       ld      r12, [sp, PT_r12]
+       ld      r30, [sp, PT_r30]
 
        ; Restore SP (into AUX_USER_SP) only if returning to U mode
        ;  - for K mode, it will be implicitly restored as stack is unwound
 #endif
 
 #ifdef CONFIG_ARC_HAS_ACCL_REGS
-       LD2     r58, r59, PT_sp + 12
+       LD2     r58, r59, PT_r58
 #endif
 .endm
 
index 9a74ce71a7674f99349b51ec95cc44915bcc1aff..30ac40fed2c5c5affb5632e3979b84975d41698c 100644 (file)
@@ -8,7 +8,6 @@
 #define _ASM_ARC_HUGEPAGE_H
 
 #include <linux/types.h>
-#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 static inline pte_t pmd_pte(pmd_t pmd)
index 1f621e416521f8d1651453da9bee876ab864d21b..c783bcd35eb88c88d8ac39f2af097918bcf26be9 100644 (file)
@@ -66,7 +66,15 @@ int main(void)
 
        DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
        DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
-       DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
+
+#ifdef CONFIG_ISA_ARCV2
+       OFFSET(PT_r12, pt_regs, r12);
+       OFFSET(PT_r30, pt_regs, r30);
+#endif
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       OFFSET(PT_r58, pt_regs, r58);
+       OFFSET(PT_r59, pt_regs, r59);
+#endif
 
        return 0;
 }
index a376a50d3fea8f8d1a478f702beb8843630ae176..a931d0a256d01ab0e78d4b8402c77d2567792bff 100644 (file)
@@ -7,7 +7,7 @@
 menuconfig ARC_PLAT_EZNPS
        bool "\"EZchip\" ARC dev platform"
        select CPU_BIG_ENDIAN
-       select CLKSRC_NPS
+       select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
        select EZNPS_GIC
        select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
        help
index ba75e3661a4159afa45ac15516db9e16d2fdae29..96dab76da3b3962b8b22899516f47dd3274dcbca 100644 (file)
@@ -72,6 +72,7 @@ config ARM
        select HAVE_ARM_SMCCC if CPU_V7
        select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
        select HAVE_CONTEXT_TRACKING
+       select HAVE_COPY_THREAD_TLS
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS if MMU
index 820ce3b60bb6c50a5b7a44b457e428b4383536d4..669559c9c95b3a83c93e412d7f526cbc6131077c 100644 (file)
@@ -167,11 +167,7 @@ mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
 
 &pcie1_rc {
        status = "okay";
-       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-};
-
-&pcie1_ep {
-       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+       gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
 };
 
 &mmc1 {
index a064f13b38802d1483796a7e5c5e93c8e03bb640..ddf123620e962ed71e155136ce1ccf48cf4cf19d 100644 (file)
@@ -147,10 +147,6 @@ &pcie1_rc {
        gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
 };
 
-&pcie1_ep {
-       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-};
-
 &mailbox5 {
        status = "okay";
        mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
index bc76f1705c0f667dab3ad9af455b5f7685823e08..a813a0cf3ff39a97af53723a5973fe721a1c0117 100644 (file)
@@ -29,6 +29,27 @@ memory@0 {
                reg = <0x0 0x80000000 0x0 0x80000000>;
        };
 
+       main_12v0: fixedregulator-main_12v0 {
+               /* main supply */
+               compatible = "regulator-fixed";
+               regulator-name = "main_12v0";
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
+               regulator-always-on;
+               regulator-boot-on;
+       };
+
+       evm_5v0: fixedregulator-evm_5v0 {
+               /* Output of TPS54531D */
+               compatible = "regulator-fixed";
+               regulator-name = "evm_5v0";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               vin-supply = <&main_12v0>;
+               regulator-always-on;
+               regulator-boot-on;
+       };
+
        vdd_3v3: fixedregulator-vdd_3v3 {
                compatible = "regulator-fixed";
                regulator-name = "vdd_3v3";
@@ -547,10 +568,6 @@ &pcie1_rc {
        gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
 };
 
-&pcie1_ep {
-       gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
-};
-
 &mcasp3 {
        #sound-dai-cells = <0>;
        assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
index c1c9cd30f9803cd20cf7bdcb4f15b9479a4e0b39..13f7aefe045e6d59581bcf4d37ed4e328917c507 100644 (file)
@@ -258,9 +258,9 @@ fan@3 {
                };
        };
 
-       pca0: pca9552@60 {
+       pca0: pca9552@61 {
                compatible = "nxp,pca9552";
-               reg = <0x60>;
+               reg = <0x61>;
                #address-cells = <1>;
                #size-cells = <0>;
 
@@ -519,371 +519,6 @@ &i2c12 {
        status = "okay";
 };
 
-&i2c13 {
-       status = "okay";
-};
-
-&i2c14 {
-       status = "okay";
-};
-
-&i2c15 {
-       status = "okay";
-};
-
-&i2c0 {
-       status = "okay";
-};
-
-&i2c1 {
-       status = "okay";
-};
-
-&i2c2 {
-       status = "okay";
-};
-
-&i2c3 {
-       status = "okay";
-
-       power-supply@68 {
-               compatible = "ibm,cffps2";
-               reg = <0x68>;
-       };
-
-       power-supply@69 {
-               compatible = "ibm,cffps2";
-               reg = <0x69>;
-       };
-
-       power-supply@6a {
-               compatible = "ibm,cffps2";
-               reg = <0x6a>;
-       };
-
-       power-supply@6b {
-               compatible = "ibm,cffps2";
-               reg = <0x6b>;
-       };
-};
-
-&i2c4 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@49 {
-               compatible = "ti,tmp275";
-               reg = <0x49>;
-       };
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-};
-
-&i2c5 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@49 {
-               compatible = "ti,tmp275";
-               reg = <0x49>;
-       };
-};
-
-&i2c6 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-
-       tmp275@4b {
-               compatible = "ti,tmp275";
-               reg = <0x4b>;
-       };
-};
-
-&i2c7 {
-       status = "okay";
-
-       si7021-a20@20 {
-               compatible = "silabs,si7020";
-               reg = <0x20>;
-       };
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       max31785@52 {
-               compatible = "maxim,max31785a";
-               reg = <0x52>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               fan@0 {
-                       compatible = "pmbus-fan";
-                       reg = <0>;
-                       tach-pulses = <2>;
-               };
-
-               fan@1 {
-                       compatible = "pmbus-fan";
-                       reg = <1>;
-                       tach-pulses = <2>;
-               };
-
-               fan@2 {
-                       compatible = "pmbus-fan";
-                       reg = <2>;
-                       tach-pulses = <2>;
-               };
-
-               fan@3 {
-                       compatible = "pmbus-fan";
-                       reg = <3>;
-                       tach-pulses = <2>;
-               };
-       };
-
-       pca0: pca9552@60 {
-               compatible = "nxp,pca9552";
-               reg = <0x60>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               gpio-controller;
-               #gpio-cells = <2>;
-
-               gpio@0 {
-                       reg = <0>;
-               };
-
-               gpio@1 {
-                       reg = <1>;
-               };
-
-               gpio@2 {
-                       reg = <2>;
-               };
-
-               gpio@3 {
-                       reg = <3>;
-               };
-
-               gpio@4 {
-                       reg = <4>;
-               };
-
-               gpio@5 {
-                       reg = <5>;
-               };
-
-               gpio@6 {
-                       reg = <6>;
-               };
-
-               gpio@7 {
-                       reg = <7>;
-               };
-
-               gpio@8 {
-                       reg = <8>;
-               };
-
-               gpio@9 {
-                       reg = <9>;
-               };
-
-               gpio@10 {
-                       reg = <10>;
-               };
-
-               gpio@11 {
-                       reg = <11>;
-               };
-
-               gpio@12 {
-                       reg = <12>;
-               };
-
-               gpio@13 {
-                       reg = <13>;
-               };
-
-               gpio@14 {
-                       reg = <14>;
-               };
-
-               gpio@15 {
-                       reg = <15>;
-               };
-       };
-
-       dps: dps310@76 {
-               compatible = "infineon,dps310";
-               reg = <0x76>;
-               #io-channel-cells = <0>;
-       };
-};
-
-&i2c8 {
-       status = "okay";
-
-       ucd90320@b {
-               compatible = "ti,ucd90160";
-               reg = <0x0b>;
-       };
-
-       ucd90320@c {
-               compatible = "ti,ucd90160";
-               reg = <0x0c>;
-       };
-
-       ucd90320@11 {
-               compatible = "ti,ucd90160";
-               reg = <0x11>;
-       };
-
-       rtc@32 {
-               compatible = "epson,rx8900";
-               reg = <0x32>;
-       };
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-};
-
-&i2c9 {
-       status = "okay";
-
-       ir35221@42 {
-               compatible = "infineon,ir35221";
-               reg = <0x42>;
-       };
-
-       ir35221@43 {
-               compatible = "infineon,ir35221";
-               reg = <0x43>;
-       };
-
-       ir35221@44 {
-               compatible = "infineon,ir35221";
-               reg = <0x44>;
-       };
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       tmp423b@4d {
-               compatible = "ti,tmp423";
-               reg = <0x4d>;
-       };
-
-       ir35221@72 {
-               compatible = "infineon,ir35221";
-               reg = <0x72>;
-       };
-
-       ir35221@73 {
-               compatible = "infineon,ir35221";
-               reg = <0x73>;
-       };
-
-       ir35221@74 {
-               compatible = "infineon,ir35221";
-               reg = <0x74>;
-       };
-};
-
-&i2c10 {
-       status = "okay";
-
-       ir35221@42 {
-               compatible = "infineon,ir35221";
-               reg = <0x42>;
-       };
-
-       ir35221@43 {
-               compatible = "infineon,ir35221";
-               reg = <0x43>;
-       };
-
-       ir35221@44 {
-               compatible = "infineon,ir35221";
-               reg = <0x44>;
-       };
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       tmp423b@4d {
-               compatible = "ti,tmp423";
-               reg = <0x4d>;
-       };
-
-       ir35221@72 {
-               compatible = "infineon,ir35221";
-               reg = <0x72>;
-       };
-
-       ir35221@73 {
-               compatible = "infineon,ir35221";
-               reg = <0x73>;
-       };
-
-       ir35221@74 {
-               compatible = "infineon,ir35221";
-               reg = <0x74>;
-       };
-};
-
-&i2c11 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@49 {
-               compatible = "ti,tmp275";
-               reg = <0x49>;
-       };
-};
-
-&i2c12 {
-       status = "okay";
-};
-
 &i2c13 {
        status = "okay";
 
index f02de4ab058cd516cb956a3d8ebfcb9ef87028b0..ff49ec76fa7cd132f025baa177f537e9a6652232 100644 (file)
@@ -122,37 +122,6 @@ flash@0 {
        };
 };
 
-&fmc {
-       status = "okay";
-       flash@0 {
-               status = "okay";
-               m25p,fast-read;
-               label = "bmc";
-               spi-max-frequency = <50000000>;
-#include "openbmc-flash-layout-128.dtsi"
-       };
-
-       flash@1 {
-               status = "okay";
-               m25p,fast-read;
-               label = "alt-bmc";
-               spi-max-frequency = <50000000>;
-       };
-};
-
-&spi1 {
-       status = "okay";
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_spi1_default>;
-
-       flash@0 {
-               status = "okay";
-               m25p,fast-read;
-               label = "pnor";
-               spi-max-frequency = <100000000>;
-       };
-};
-
 &mac2 {
        status = "okay";
        pinctrl-names = "default";
@@ -165,6 +134,11 @@ &mac2 {
 
 &emmc {
        status = "okay";
+};
+
+&fsim0 {
+       status = "okay";
+
        #address-cells = <2>;
        #size-cells = <0>;
 
@@ -820,373 +794,6 @@ &wdt2 {
        status = "okay";
 };
 
-&i2c0 {
-       status = "okay";
-};
-
-&i2c1 {
-       status = "okay";
-};
-
-&i2c2 {
-       status = "okay";
-};
-
-&i2c3 {
-       status = "okay";
-
-       bmp: bmp280@77 {
-               compatible = "bosch,bmp280";
-               reg = <0x77>;
-               #io-channel-cells = <1>;
-       };
-
-       max31785@52 {
-               compatible = "maxim,max31785a";
-               reg = <0x52>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               fan@0 {
-                       compatible = "pmbus-fan";
-                       reg = <0>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-
-               fan@1 {
-                       compatible = "pmbus-fan";
-                       reg = <1>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-
-               fan@2 {
-                       compatible = "pmbus-fan";
-                       reg = <2>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-
-               fan@3 {
-                       compatible = "pmbus-fan";
-                       reg = <3>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-       };
-
-       dps: dps310@76 {
-               compatible = "infineon,dps310";
-               reg = <0x76>;
-               #io-channel-cells = <0>;
-       };
-
-       pca0: pca9552@60 {
-               compatible = "nxp,pca9552";
-               reg = <0x60>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               gpio-controller;
-               #gpio-cells = <2>;
-
-               gpio@0 {
-                       reg = <0>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@1 {
-                       reg = <1>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@2 {
-                       reg = <2>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@3 {
-                       reg = <3>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@4 {
-                       reg = <4>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@5 {
-                       reg = <5>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@6 {
-                       reg = <6>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@7 {
-                       reg = <7>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@8 {
-                       reg = <8>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@9 {
-                       reg = <9>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@10 {
-                       reg = <10>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@11 {
-                       reg = <11>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@12 {
-                       reg = <12>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@13 {
-                       reg = <13>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@14 {
-                       reg = <14>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@15 {
-                       reg = <15>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-       };
-
-       power-supply@68 {
-               compatible = "ibm,cffps1";
-               reg = <0x68>;
-       };
-
-       power-supply@69 {
-               compatible = "ibm,cffps1";
-               reg = <0x69>;
-       };
-};
-
-&i2c4 {
-       status = "okay";
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       ir35221@70 {
-               compatible = "infineon,ir35221";
-               reg = <0x70>;
-       };
-
-       ir35221@71 {
-               compatible = "infineon,ir35221";
-               reg = <0x71>;
-       };
-};
-
-&i2c5 {
-       status = "okay";
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       ir35221@70 {
-               compatible = "infineon,ir35221";
-               reg = <0x70>;
-       };
-
-       ir35221@71 {
-               compatible = "infineon,ir35221";
-               reg = <0x71>;
-       };
-};
-
-&i2c7 {
-       status = "okay";
-};
-
-&i2c9 {
-       status = "okay";
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-};
-
-&i2c10 {
-       status = "okay";
-};
-
-&i2c11 {
-       status = "okay";
-
-       pca9552: pca9552@60 {
-               compatible = "nxp,pca9552";
-               reg = <0x60>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               gpio-controller;
-               #gpio-cells = <2>;
-
-               gpio-line-names = "PS_SMBUS_RESET_N", "APSS_RESET_N",
-                       "GPU0_TH_OVERT_N_BUFF", "GPU1_TH_OVERT_N_BUFF",
-                       "GPU2_TH_OVERT_N_BUFF", "GPU3_TH_OVERT_N_BUFF",
-                       "GPU4_TH_OVERT_N_BUFF", "GPU5_TH_OVERT_N_BUFF",
-                       "GPU0_PWR_GOOD_BUFF", "GPU1_PWR_GOOD_BUFF",
-                       "GPU2_PWR_GOOD_BUFF", "GPU3_PWR_GOOD_BUFF",
-                       "GPU4_PWR_GOOD_BUFF", "GPU5_PWR_GOOD_BUFF",
-                       "12V_BREAKER_FLT_N", "THROTTLE_UNLATCHED_N";
-
-               gpio@0 {
-                       reg = <0>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@1 {
-                       reg = <1>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@2 {
-                       reg = <2>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@3 {
-                       reg = <3>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@4 {
-                       reg = <4>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@5 {
-                       reg = <5>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@6 {
-                       reg = <6>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@7 {
-                       reg = <7>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@8 {
-                       reg = <8>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@9 {
-                       reg = <9>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@10 {
-                       reg = <10>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@11 {
-                       reg = <11>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@12 {
-                       reg = <12>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@13 {
-                       reg = <13>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@14 {
-                       reg = <14>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@15 {
-                       reg = <15>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-       };
-
-       rtc@32 {
-               compatible = "epson,rx8900";
-               reg = <0x32>;
-       };
-
-       eeprom@51 {
-               compatible = "atmel,24c64";
-               reg = <0x51>;
-       };
-
-       ucd90160@64 {
-               compatible = "ti,ucd90160";
-               reg = <0x64>;
-       };
-};
-
-&i2c12 {
-       status = "okay";
-};
-
-&i2c13 {
-       status = "okay";
-};
-
 &pinctrl {
        /* Hog these as no driver is probed for the entire LPC block */
        pinctrl-names = "default";
index 5f6142d99eeb68c427333b3a52a40e17bdc90526..b72afbaadaf81fce5bc8ab687af34ae14222baf0 100644 (file)
@@ -163,26 +163,6 @@ flash@2 {
                                spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
-
-                       fsim0: fsi@1e79b000 {
-                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
-                               reg = <0x1e79b000 0x94>;
-                               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fsi1_default>;
-                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
-                               status = "disabled";
-                       };
-
-                       fsim1: fsi@1e79b100 {
-                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
-                               reg = <0x1e79b100 0x94>;
-                               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fsi2_default>;
-                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
-                               status = "disabled";
-                       };
                };
 
                mdio0: mdio@1e650000 {
@@ -595,6 +575,25 @@ i2c: bus@1e78a000 {
                                ranges = <0 0x1e78a000 0x1000>;
                        };
 
+                       fsim0: fsi@1e79b000 {
+                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+                               reg = <0x1e79b000 0x94>;
+                               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_fsi1_default>;
+                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+                               status = "disabled";
+                       };
+
+                       fsim1: fsi@1e79b100 {
+                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+                               reg = <0x1e79b100 0x94>;
+                               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_fsi2_default>;
+                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+                               status = "disabled";
+                       };
                };
        };
 };
index e43bccb78ab2ba44130603fb102254a3081ca858..d8f3821a0ffdc33069769942c660866efd824759 100644 (file)
@@ -8,7 +8,7 @@
 /dts-v1/;
 
 #include "imx6dl.dtsi"
-#include "imx6qdl-icore.dtsi"
+#include "imx6qdl-icore-1.5.dtsi"
 
 / {
        model = "Engicam i.CoreM6 DualLite/Solo MIPI Starter Kit";
index 5219553df1e73529f61cf790c476e25bbd000564..bb74fc62d9135863d6f58e1e1a0b53b1a9bb4dd7 100644 (file)
@@ -63,7 +63,7 @@ sgtl5000: codec@a {
                #sound-dai-cells = <0>;
                clocks = <&clk_ext_audio_codec>;
                VDDA-supply = <&reg_3p3v>;
-               VDDIO-supply = <&reg_3p3v>;
+               VDDIO-supply = <&sw2_reg>;
        };
 };
 
index 845cfad99bf9c41567cd935467e180409aef595d..87f0aa897086e100d9ea368f5fec9dfb269cec62 100644 (file)
@@ -204,7 +204,7 @@ eeprom@50 {
        };
 
        rtc@56 {
-               compatible = "rv3029c2";
+               compatible = "microcrystal,rv3029";
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_rtc_hw300>;
                reg = <0x56>;
index 71ca76a5e4a513730c1c0c1c0fe5b832c469a24e..fe59dde41b649cb7ab0a386b2faab9738a69dad6 100644 (file)
@@ -749,10 +749,6 @@ &reg_vdd1p1 {
        vin-supply = <&vgen5_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&vgen5_reg>;
 };
index 4829aa682aeb01cbc3e9a0c7485e87ca59bebdbf..bc86cfaaa9c270f80cf260b0599dfd931af516fe 100644 (file)
@@ -584,10 +584,6 @@ &reg_vdd1p1 {
        vin-supply = <&sw2_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&sw2_reg>;
 };
index 3e1d32fdf4b856f4742ae7685fa3d7e48da4d440..5ace9e6acf85ca9839d5c15db00d77662288146e 100644 (file)
@@ -265,10 +265,6 @@ &pwm1 {
        status = "okay";
 };
 
-&reg_3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &snvs_poweroff {
        status = "okay";
 };
index f1830ed387a5512ed99ea71b82cbc09876d2f8aa..91a7548fdb8db4d4339f844214778fd80c0702d7 100644 (file)
@@ -159,10 +159,6 @@ &reg_vdd1p1 {
        vin-supply = <&vgen6_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&vgen6_reg>;
 };
index a8ee7087af5a5e769d505a048f7cc438c14a7a47..5a63ca6157229ccc48d175f8045ef9446b203720 100644 (file)
@@ -141,10 +141,6 @@ &reg_vdd1p1 {
        vin-supply = <&vgen6_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&vgen6_reg>;
 };
index 1fb1ec5d3d70717f08026f47e42d9bfee0c81ca8..6d16e32aed899122efcb4279581844714e05f0ca 100644 (file)
@@ -49,3 +49,7 @@ memory@80000000 {
                reg = <0x80000000 0x10000000>;
        };
 };
+
+&gpmi {
+       status = "okay";
+};
index d37a1927c88ea3fae91f4d1e67407f5d39036f9f..ab91c98f21241127736dada1205d2b57ea7eef59 100644 (file)
@@ -37,10 +37,10 @@ cpus {
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu0: cpu@0 {
+               cpu0: cpu@f00 {
                        compatible = "arm,cortex-a7";
                        device_type = "cpu";
-                       reg = <0>;
+                       reg = <0xf00>;
                };
        };
 
index 5a7e3e5caebe2fc4e7f0880ab9f37d8ff026f9b3..3c534cd50ee3b2527c0e287d5216a1ffc95807c7 100644 (file)
@@ -253,7 +253,7 @@ mali: gpu@c0000 {
 &aobus {
        pmu: pmu@e0 {
                compatible = "amlogic,meson8-pmu", "syscon";
-               reg = <0xe0 0x8>;
+               reg = <0xe0 0x18>;
        };
 
        pinctrl_aobus: pinctrl@84 {
index d9762de0ed34be80c943e1bb3e237253ca6fc83a..6f480827b94d36fe47dd3c0c09dd9c8c9e1363ec 100644 (file)
@@ -356,7 +356,7 @@ gcb5: gpio@d4019108 {
 
                        twsi1: i2c@d4011000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4011000 0x1000>;
+                               reg = <0xd4011000 0x70>;
                                interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI0>;
                                resets = <&soc_clocks MMP2_CLK_TWSI0>;
@@ -368,7 +368,7 @@ twsi1: i2c@d4011000 {
 
                        twsi2: i2c@d4031000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4031000 0x1000>;
+                               reg = <0xd4031000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <0>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI1>;
@@ -380,7 +380,7 @@ twsi2: i2c@d4031000 {
 
                        twsi3: i2c@d4032000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4032000 0x1000>;
+                               reg = <0xd4032000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <1>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI2>;
@@ -392,7 +392,7 @@ twsi3: i2c@d4032000 {
 
                        twsi4: i2c@d4033000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4033000 0x1000>;
+                               reg = <0xd4033000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <2>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI3>;
@@ -405,7 +405,7 @@ twsi4: i2c@d4033000 {
 
                        twsi5: i2c@d4033800 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4033800 0x1000>;
+                               reg = <0xd4033800 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <3>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI4>;
@@ -417,7 +417,7 @@ twsi5: i2c@d4033800 {
 
                        twsi6: i2c@d4034000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4034000 0x1000>;
+                               reg = <0xd4034000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <4>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI5>;
index fb928503ad45da35e12b24189e3ece6ae66d1cca..d9be511f054f0ecf4e5be68bd68bfe7eb25d0d09 100644 (file)
@@ -101,7 +101,7 @@ usb-hub {
                initial-mode = <1>; /* initialize in HUB mode */
                disabled-ports = <1>;
                intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
-               reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
+               reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */
                connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
                refclk-frequency = <19200000>;
        };
index cea1c27c29cba2d81aefe19b5b94f94d55b87c85..46e478fb5ea203a7a42650ce4233d801db0c4277 100644 (file)
@@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task)
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 int
-copy_thread(unsigned long clone_flags, unsigned long stack_start,
-           unsigned long stk_sz, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+           unsigned long stk_sz, struct task_struct *p, unsigned long tls)
 {
        struct thread_info *thread = task_thread_info(p);
        struct pt_regs *childregs = task_pt_regs(p);
@@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
        clear_ptrace_hw_breakpoint(p);
 
        if (clone_flags & CLONE_SETTLS)
-               thread->tp_value[0] = childregs->ARM_r3;
+               thread->tp_value[0] = tls;
        thread->tp_value[1] = get_tpuser();
 
        thread_notify(THREAD_NOTIFY_COPY, thread);
index dd427bd2768c3cf6f49152a1443cf154a51ad439..02b180ad724540c022d861aab5c6929f37af8060 100644 (file)
@@ -9,6 +9,7 @@ menuconfig ARCH_DAVINCI
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
        select REGMAP_MMIO
+       select RESET_CONTROLLER
        select HAVE_IDE
        select PINCTRL_SINGLE
 
index 110dcb3314d13f04c4e783a5762dbcd401c6f77e..c65cfc1ad99b4798a69c0b233e984b0eaf01af3f 100644 (file)
@@ -207,7 +207,7 @@ static int __init mmp_dt_init_timer(struct device_node *np)
                ret = clk_prepare_enable(clk);
                if (ret)
                        return ret;
-               rate = clk_get_rate(clk) / 2;
+               rate = clk_get_rate(clk);
        } else if (cpu_is_pj4()) {
                rate = 6500000;
        } else {
index ad08d470a2cac3f9f767844e0857a371ecb7bdf9..dca7d06c0b9386192010ca469f7a751cad515d36 100644 (file)
@@ -95,6 +95,7 @@ config ARCH_OMAP2PLUS
        bool
        select ARCH_HAS_BANDGAP
        select ARCH_HAS_HOLES_MEMORYMODEL
+       select ARCH_HAS_RESET_CONTROLLER
        select ARCH_OMAP
        select CLKSRC_MMIO
        select GENERIC_IRQ_CHIP
@@ -105,11 +106,11 @@ config ARCH_OMAP2PLUS
        select OMAP_DM_TIMER
        select OMAP_GPMC
        select PINCTRL
+       select RESET_CONTROLLER
        select SOC_BUS
        select TI_SYSC
        select OMAP_IRQCHIP
        select CLKSRC_TI_32K
-       select ARCH_HAS_RESET_CONTROLLER
        help
          Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
index ca52271de5a880d7e7e51bb05b21efc04ae706aa..e95c224ffc4d876d38f7b2ab5d07c2a1ed61ec59 100644 (file)
@@ -306,10 +306,14 @@ static void __init dra7x_evm_mmc_quirk(void)
 
 static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk)
 {
+       struct clk_hw *hw = __clk_get_hw(clk);
        struct clockdomain *clkdm = NULL;
        struct clk_hw_omap *hwclk;
 
-       hwclk = to_clk_hw_omap(__clk_get_hw(clk));
+       hwclk = to_clk_hw_omap(hw);
+       if (!omap2_clk_is_hw_omap(hw))
+               return NULL;
+
        if (hwclk && hwclk->clkdm_name)
                clkdm = clkdm_lookup(hwclk->clkdm_name);
 
index b1b4476ddb834ba64a8ba93e55e88d3a7282fed1..e688dfad0b726284e49aec814f84a68d15b6176e 100644 (file)
@@ -138,6 +138,7 @@ config ARM64
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
        select HAVE_CONTEXT_TRACKING
+       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
index 96ab0227e82d187bc3b5eb50e3064dcabbe3ef3f..121e6cc4849b45139f5a396cec851352863fe841 100644 (file)
@@ -15,7 +15,7 @@ &mmc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc2_pins>;
        vmmc-supply = <&reg_dcdc1>;
-       vqmmc-supply = <&reg_dcdc1>;
+       vqmmc-supply = <&reg_eldo1>;
        bus-width = <8>;
        non-removable;
        cap-mmc-hw-reset;
index 01a9a52edae4afa88c43f821531345543742a64e..393c1948a4959beb2c07b87456f0a558fffc1c5f 100644 (file)
@@ -140,7 +140,7 @@ &mmc0 {
 &mmc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
-       vmmc-supply = <&reg_aldo2>;
+       vmmc-supply = <&reg_dcdc1>;
        vqmmc-supply = <&reg_dldo4>;
        mmc-pwrseq = <&wifi_pwrseq>;
        bus-width = <4>;
index 144a2c19ac02659a4b288d0df130ae26303d92cf..d1fc9c2055f4904c19e7661be8c1cb36082423ee 100644 (file)
@@ -61,10 +61,10 @@ cpu3: cpu@3 {
 
        pmu {
                compatible = "arm,armv8-pmuv3";
-               interrupts = <0 120 8>,
-                            <0 121 8>,
-                            <0 122 8>,
-                            <0 123 8>;
+               interrupts = <0 170 4>,
+                            <0 171 4>,
+                            <0 172 4>,
+                            <0 173 4>;
                interrupt-affinity = <&cpu0>,
                                     <&cpu1>,
                                     <&cpu2>,
index 5bd07469766bf53ed4f86b8096b5160bd68fc2c6..a8bb3fa9fec98e994ce2876b95e81e86b8369c02 100644 (file)
@@ -46,25 +46,47 @@ emmc_pwrseq: emmc-pwrseq {
        };
 
        gpio-keys {
-               compatible = "gpio-keys-polled";
-               poll-interval = <100>;
+               compatible = "gpio-keys";
 
                key1 {
                        label = "A";
                        linux,code = <BTN_0>;
                        gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <34 IRQ_TYPE_EDGE_BOTH>;
                };
 
                key2 {
                        label = "B";
                        linux,code = <BTN_1>;
                        gpios = <&gpio GPIOH_7 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <35 IRQ_TYPE_EDGE_BOTH>;
                };
 
                key3 {
                        label = "C";
                        linux,code = <BTN_2>;
                        gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <2 IRQ_TYPE_EDGE_BOTH>;
+               };
+
+               mic_mute {
+                       label = "MicMute";
+                       linux,code = <SW_MUTE_DEVICE>;
+                       linux,input-type = <EV_SW>;
+                       gpios = <&gpio_ao GPIOE_2 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <99 IRQ_TYPE_EDGE_BOTH>;
+               };
+
+               power_key {
+                       label = "PowerKey";
+                       linux,code = <KEY_POWER>;
+                       gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
                };
        };
 
@@ -569,6 +591,8 @@ &uart_A {
 
        bluetooth {
                compatible = "brcm,bcm43438-bt";
+               interrupt-parent = <&gpio_intc>;
+               interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
                shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
                max-speed = <2000000>;
                clocks = <&wifi32k>;
index 13a3cbe89b5a65deaf03202928d0209fb77dcb46..a6f9b7784e8fc8c98bcf90b152a136cc97691c7c 100644 (file)
@@ -175,7 +175,7 @@ ddr: memory-controller@1080000 {
                dcfg: syscon@1e00000 {
                        compatible = "fsl,ls1028a-dcfg", "syscon";
                        reg = <0x0 0x1e00000 0x0 0x10000>;
-                       big-endian;
+                       little-endian;
                };
 
                rst: syscon@1e60000 {
index 6edbdfe2d0d7c44bec5ff1aeec193b667e2f1050..3d95b66a2d71c0fa97aba2e37d8855c44d295706 100644 (file)
@@ -740,7 +740,7 @@ sdma1: dma-controller@30bd0000 {
                                reg = <0x30bd0000 0x10000>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
-                                        <&clk IMX8MM_CLK_SDMA1_ROOT>;
+                                        <&clk IMX8MM_CLK_AHB>;
                                clock-names = "ipg", "ahb";
                                #dma-cells = <3>;
                                fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
index 2a759dff9f87168f00a8c68f920cb846cf9d8ba6..596bc65f475c21a291caa3f143a1ac1e27b91d2f 100644 (file)
@@ -421,7 +421,7 @@ magnetometer@1e     {
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_imu>;
                interrupt-parent = <&gpio3>;
-               interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+               interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
                vdd-supply = <&reg_3v3_p>;
                vddio-supply = <&reg_3v3_p>;
        };
index 94090c6fb946a6bee451056b935a00d80fbe0ec7..d43e1299c8ef618d65232a7dfcd7fdc9150ebf5b 100644 (file)
@@ -60,10 +60,10 @@ cpu3: cpu@3 {
 
        pmu {
                compatible = "arm,armv8-pmuv3";
-               interrupts = <0 120 8>,
-                            <0 121 8>,
-                            <0 122 8>,
-                            <0 123 8>;
+               interrupts = <0 170 4>,
+                            <0 171 4>,
+                            <0 172 4>,
+                            <0 173 4>;
                interrupt-affinity = <&cpu0>,
                                     <&cpu1>,
                                     <&cpu2>,
index 76b49f57310152829a1705ae2b7be8e32ec016df..16f1656d5203bd95d9c0095bae10ebf0d17b46fe 100644 (file)
@@ -49,7 +49,8 @@ vcc_sys: vcc-sys {
 
        ir-receiver {
                compatible = "gpio-ir-receiver";
-               gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>;
+               gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>;
+               linux,rc-map-name = "rc-beelink-gs1";
        };
 };
 
index 8dc6c5cdabe62e2f63065abd525ff516fe685387..baf52baaa2a5d0e39c557ada3a18a6b9b89088a4 100644 (file)
 #define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY          __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
 
 #define __P000  PAGE_NONE
 #define __P001  PAGE_READONLY
 #define __P010  PAGE_READONLY
 #define __P011  PAGE_READONLY
-#define __P100  PAGE_EXECONLY
+#define __P100  PAGE_READONLY_EXEC
 #define __P101  PAGE_READONLY_EXEC
 #define __P110  PAGE_READONLY_EXEC
 #define __P111  PAGE_READONLY_EXEC
 #define __S001  PAGE_READONLY
 #define __S010  PAGE_SHARED
 #define __S011  PAGE_SHARED
-#define __S100  PAGE_EXECONLY
+#define __S100  PAGE_READONLY_EXEC
 #define __S101  PAGE_READONLY_EXEC
 #define __S110  PAGE_SHARED_EXEC
 #define __S111  PAGE_SHARED_EXEC
index 5d15b4735a0eba76569cae8598aa49468c702bad..cd5de0e40bfa06cf6e6fa5e094a80965b6796599 100644 (file)
@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define pte_dirty(pte)         (pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)         (!!(pte_val(pte) & PTE_VALID))
-/*
- * Execute-only user mappings do not have the PTE_USER bit set. All valid
- * kernel mappings have the PTE_UXN bit set.
- */
 #define pte_valid_not_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
+       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 #define pte_valid_young(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
 #define pte_valid_user(pte) \
@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 /*
  * p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check) other than user execute-only which do not have the
- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
+ * set.
  */
 #define pte_access_permitted(pte, write) \
        (pte_valid_user(pte) && (!(write) || pte_write(pte)))
index 2629a68b87244facd78a6ad5e9d4b18ca6ac18ad..5af82587909ef9931d4201f2a2850feb6d25bdcf 100644 (file)
@@ -42,7 +42,6 @@
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
 
 #ifndef __COMPAT_SYSCALL_NR
 #include <uapi/asm/unistd.h>
index 4703d218663a2ad81e7c8d4fd0749bed8199ef4f..f83a70e07df85ca5029a1e91cde93b8e0dd9fb7e 100644 (file)
@@ -19,5 +19,6 @@
 #define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SET_GET_RLIMIT
 #define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYS_CLONE3
 
 #include <asm-generic/unistd.h>
index 71f788cd2b18772629c4627be979f75753253d08..d54586d5b031f874e7fa70814cd618ac3c16ceca 100644 (file)
@@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 
 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
 
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
-               unsigned long stk_sz, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+               unsigned long stk_sz, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                }
 
                /*
-                * If a TLS pointer was passed to clone (4th argument), use it
-                * for the new thread.
+                * If a TLS pointer was passed to clone, use it for the new
+                * thread.
                 */
                if (clone_flags & CLONE_SETTLS)
-                       p->thread.uw.tp_value = childregs->regs[3];
+                       p->thread.uw.tp_value = tls;
        } else {
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h;
index 077b02a2d4d3333e9af010f6c92a011b60f0bd1a..85566d32958f5b8fcc5bdc35ebc2d9d2b2d0469a 100644 (file)
@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        const struct fault_info *inf;
        struct mm_struct *mm = current->mm;
        vm_fault_t fault, major = 0;
-       unsigned long vm_flags = VM_READ | VM_WRITE;
+       unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
        unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        if (kprobe_page_fault(regs, esr))
index 5a3b15a14a7f116bc6b61a9279ed5ce84723702e..40797cbfba2d67412eee7864325e9f89adeb08d2 100644 (file)
@@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
        /*
         * FIXME: Cleanup page tables (also in arch_add_memory() in case
@@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
         * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
         * unlocked yet.
         */
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
index 12cd9231c4b8fb33d7687a68fd593271a632c946..0231d69c8bf2bf35285a21a3117c5a8f2ab26f1a 100644 (file)
@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
                "1:     %0 = memw_locked(%1);\n"                        \
                "       %0 = "#op "(%0,%2);\n"                          \
                "       memw_locked(%1,P3)=%0;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output)                                        \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
                "1:     %0 = memw_locked(%1);\n"                        \
                "       %0 = "#op "(%0,%2);\n"                          \
                "       memw_locked(%1,P3)=%0;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output)                                        \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                     \
                "1:     %0 = memw_locked(%2);\n"                        \
                "       %1 = "#op "(%0,%3);\n"                          \
                "       memw_locked(%2,P3)=%1;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output), "=&r" (val)                           \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
                "       }"
                "       memw_locked(%2, p3) = %1;"
                "       {"
-               "               if !p3 jump 1b;"
+               "               if (!p3) jump 1b;"
                "       }"
                "2:"
                : "=&r" (__oldval), "=&r" (tmp)
index 47384b094b9445598bb2f859f26bba81e444a396..71429f756af0f45a846598529055b34b1d70f269 100644 (file)
@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -223,7 +223,7 @@ static inline int ffs(int x)
        int r;
 
        asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
-               "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
+               "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
                : "=&r" (r)
                : "r" (x)
                : "p0");
index 6091322c3af9639f2eaa144c5842f41e5a08e123..92b8a02e588ac256341a82053df70340089fd34b 100644 (file)
@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
        __asm__ __volatile__ (
        "1:     %0 = memw_locked(%1);\n"    /*  load into retval */
        "       memw_locked(%1,P0) = %2;\n" /*  store into memory */
-       "       if !P0 jump 1b;\n"
+       "       if (!P0) jump 1b;\n"
        : "=&r" (retval)
        : "r" (ptr), "r" (x)
        : "memory", "p0"
index cb635216a732c98c2d05e965762696543d7ec3e4..0191f7c7193e6bd2a8700d2813627b60a701a419 100644 (file)
@@ -16,7 +16,7 @@
            /* For example: %1 = %4 */ \
            insn \
        "2: memw_locked(%3,p2) = %1;\n" \
-       "   if !p2 jump 1b;\n" \
+       "   if (!p2) jump 1b;\n" \
        "   %1 = #0;\n" \
        "3:\n" \
        ".section .fixup,\"ax\"\n" \
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        "1: %1 = memw_locked(%3)\n"
        "   {\n"
        "      p2 = cmp.eq(%1,%4)\n"
-       "      if !p2.new jump:NT 3f\n"
+       "      if (!p2.new) jump:NT 3f\n"
        "   }\n"
        "2: memw_locked(%3,p2) = %5\n"
-       "   if !p2 jump 1b\n"
+       "   if (!p2) jump 1b\n"
        "3:\n"
        ".section .fixup,\"ax\"\n"
        "4: %0 = #%6\n"
index 539e3efcf39c6ed595fea142a39384340544e917..b0dbc347317240ca26a73cc422d4ced3154b1e08 100644 (file)
@@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
 
 void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
 #define ioremap_nocache ioremap
+#define ioremap_uc(X, Y) ioremap((X), (Y))
 
 
 #define __raw_writel writel
index bfe07d842ff35c4ac70b9e9d96e2b3ffe9789a8d..ef103b73bec8388df8258e4ac132cbc1db4ebfbc 100644 (file)
@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0);\n"
                "       { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
                "1:     R6 = memw_locked(%0);\n"
                "       R6 = add(R6,#-1);\n"
                "       memw_locked(%0,P3) = R6\n"
-               "       if !P3 jump 1b;\n"
+               "       if (!P3) jump 1b;\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1);\n"
                "       { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
-               "       { if !P3 jump 1f; }\n"
+               "       { if (!P3) jump 1f; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       { %0 = P3 }\n"
                "1:\n"
@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0)\n"
                "       { P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1)\n"
                "       { %0 = #0; P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
-               "       { if !P3 jump 1f; }\n"
+               "       { if (!P3) jump 1f; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       %0 = P3;\n"
                "1:\n"
@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0);\n"
                "       P3 = cmp.eq(R6,#0);\n"
-               "       { if !P3 jump 1b; R6 = #1; }\n"
+               "       { if (!P3) jump 1b; R6 = #1; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1);\n"
                "       P3 = cmp.eq(R6,#0);\n"
-               "       { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
+               "       { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       %0 = P3;\n"
                "1:\n"
index 35f29423fda808f6edd8202da932ce89ae728eae..5ed02f699479ad68c516444dceae7223452c1e0c 100644 (file)
@@ -11,8 +11,6 @@
 #include <linux/thread_info.h>
 #include <linux/module.h>
 
-register unsigned long current_frame_pointer asm("r30");
-
 struct stackframe {
        unsigned long fp;
        unsigned long rets;
@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
 
        low = (unsigned long)task_stack_page(current);
        high = low + THREAD_SIZE;
-       fp = current_frame_pointer;
+       fp = (unsigned long)__builtin_frame_address(0);
 
        while (fp >= low && fp <= (high - sizeof(*frame))) {
                frame = (struct stackframe *)fp;
index 12242c27e2df59257cd60fadbd24480e81d365d8..4023fdbea4902e090e0798cf2c2f1006b1d2b259 100644 (file)
@@ -369,7 +369,7 @@ ret_from_fork:
                R26.L = #LO(do_work_pending);
                R0 = #VM_INT_DISABLE;
        }
-       if P0 jump check_work_pending
+       if (P0) jump check_work_pending
        {
                R0 = R25;
                callr R24
index 58fd67068bac5036e96514039688fc3844a635aa..b01d68a2d5d97360d0b612e465fb70635aa80ef6 100644 (file)
@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
index add388236f4ef2cdf2558b0d282605fadce6912a..ed8e28b0fb3e0375ed8967a3d1e214485db696bb 100644 (file)
@@ -47,7 +47,7 @@ config MIPS
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
        select HAVE_ASM_MODVERSIONS
-       select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
+       select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
        select HAVE_CONTEXT_TRACKING
        select HAVE_COPY_THREAD_TLS
        select HAVE_C_RECORDMCOUNT
index 172801ed35b89994f6a52e492a7c486cb517d821..d859f079b771a158208b0cef571a22a22c3f67ac 100644 (file)
@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
        -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
        -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
 
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT                := n
+
 # decompressor objects (linked with vmlinuz)
 vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
 
index c46c59b0f1b40ce0b4b9abee2176056b1ba95253..49f0061a60514358358da251adc2a76634eb7969 100644 (file)
@@ -15,7 +15,8 @@
 static inline int __pure __get_cpu_type(const int cpu_type)
 {
        switch (cpu_type) {
-#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF)
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
+    defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
        case CPU_LOONGSON2EF:
 #endif
 
index 4993db40482c80fa17cacec1ef54f4e356b6d26d..ee26f9a4575dfc2b09568855015200043ca0ffbe 100644 (file)
@@ -49,8 +49,26 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-/* How to get the thread information struct from C.  */
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
 register struct thread_info *__current_thread_info __asm__("$28");
+#endif
 
 static inline struct thread_info *current_thread_info(void)
 {
index b08825531e9f9a3ce89e926ffc3e096fbfe14fc0..0ae9b4cbc1537ad3604c4b4cbca6179e05671ed7 100644 (file)
@@ -26,8 +26,6 @@
 
 #define __VDSO_USE_SYSCALL             ULLONG_MAX
 
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
 static __always_inline long gettimeofday_fallback(
                                struct __kernel_old_timeval *_tv,
                                struct timezone *_tz)
@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
        return error ? -ret : ret;
 }
 
-#else
-
-static __always_inline long gettimeofday_fallback(
-                               struct __kernel_old_timeval *_tv,
-                               struct timezone *_tz)
-{
-       return -1;
-}
-
-#endif
-
 static __always_inline long clock_gettime_fallback(
                                        clockid_t _clkid,
                                        struct __kernel_timespec *_ts)
index f777e44653d5767b953483f22158d24e84e6461f..47312c5294102264700c12b9955d39b9cd60ab79 100644 (file)
@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
        return 0;
 }
 
+static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
+{
+       int cpu1;
+
+       for_each_possible_cpu(cpu1)
+               if (cpus_are_siblings(cpu, cpu1))
+                       cpumask_set_cpu(cpu1, cpu_map);
+}
+
+static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
+{
+       int cpu1;
+       int cluster = cpu_cluster(&cpu_data[cpu]);
+
+       for_each_possible_cpu(cpu1)
+               if (cpu_cluster(&cpu_data[cpu1]) == cluster)
+                       cpumask_set_cpu(cpu1, cpu_map);
+}
+
 static int __populate_cache_leaves(unsigned int cpu)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
        struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
        if (c->icache.waysize) {
+               /* L1 caches are per core */
+               fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
                populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+               fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
                populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
        } else {
                populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
        }
 
-       if (c->scache.waysize)
+       if (c->scache.waysize) {
+               /* L2 cache is per cluster */
+               fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
                populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+       }
 
        if (c->tcache.waysize)
                populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
index 3ec69d9cbe88e755e55425779b2c47c761c7a304..561154cbcc401eb8e5eee381af4873f638583fc1 100644 (file)
@@ -1804,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        unsigned int image_size;
        u8 *image_ptr;
 
-       if (!prog->jit_requested || MIPS_ISA_REV < 2)
+       if (!prog->jit_requested)
                return prog;
 
        tmp = bpf_jit_blind_constants(prog);
index 6ebdc37c89fc6a11a32314136c99a7f1a95bf7f3..6b83b6376a4b58d340f1c464cbbf402c588caf73 100644 (file)
@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
        return __cvdso_clock_gettime32(clock, ts);
 }
 
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
                        struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
 int __vdso_clock_getres(clockid_t clock_id,
                        struct old_timespec32 *res)
 {
@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
        return __cvdso_clock_gettime(clock, ts);
 }
 
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
                        struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
 int __vdso_clock_getres(clockid_t clock_id,
                        struct __kernel_timespec *res)
 {
index d9ac7e6408ef3196253f3c6838d111a51a1f0973..caddded56e77f3603e93bbb0496b52a2dcb3a60a 100644 (file)
@@ -9,7 +9,11 @@
 #define PG_dcache_dirty PG_arch_1
 
 void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
+
 void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+#define flush_icache_page flush_icache_page
+
 #ifdef CONFIG_CPU_CACHE_ALIASING
 void flush_cache_mm(struct mm_struct *mm);
 void flush_cache_dup_mm(struct mm_struct *mm);
@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
 
 #else
-#include <asm-generic/cacheflush.h>
-#undef flush_icache_range
-#undef flush_icache_page
-#undef flush_icache_user_range
 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
                             unsigned long addr, int len);
+#define flush_icache_user_range flush_icache_user_range
+
+#include <asm-generic/cacheflush.h>
 #endif
 
 #endif /* __NDS32_CACHEFLUSH_H__ */
index 0214e415053902c34d65ecf03d4dd581c49fb72b..6abc58ac406dcd542b3bc5497d80452cba52ff0b 100644 (file)
@@ -195,7 +195,7 @@ extern void paging_init(void);
 #define pte_unmap(pte)         do { } while (0)
 #define pte_unmap_nested(pte)  do { } while (0)
 
-#define pmd_off_k(address)     pmd_offset(pgd_offset_k(address), address)
+#define pmd_off_k(address)     pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
 
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 /*
index b16237c95ea33bd1fb75f50b91fbd6ae053b1687..0c29d6cb2c8dfd1667e705710475b003fcad1a85 100644 (file)
@@ -62,6 +62,7 @@ config PARISC
        select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
        select HAVE_KPROBES_ON_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
+       select HAVE_COPY_THREAD_TLS
 
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
index a6c9f49c661287cde50d42c09a3c67bf1f9ddb31..a5f3e50fe97619eff506b92fceb75699bb530548 100644 (file)
@@ -889,8 +889,8 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
-               ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
+       pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+               ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
        if (dev->num_addrs) {
index ecc5c277120871efe35a6cebc6158397790d1c0e..230a6422b99f369c52a5d90d113a9a9ffd94b77c 100644 (file)
@@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init);
  * Copy architecture-specific thread state
  */
 int
-copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long kthread_arg, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+           unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *cregs = &(p->thread.regs);
        void *stack = task_stack_page(p);
@@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
                cregs->kpc = (unsigned long) &child_return;
 
-               /* Setup thread TLS area from the 4th parameter in clone */
+               /* Setup thread TLS area */
                if (clone_flags & CLONE_SETTLS)
-                       cregs->cr27 = cregs->gr[23];
+                       cregs->cr27 = tls;
        }
 
        return 0;
index ddca8287d43bac22e772854d7100975312146866..354cf060b67fc1d4964d0e24cb811b4a48c5cc32 100644 (file)
@@ -401,7 +401,7 @@ static void __init map_pages(unsigned long start_vaddr,
                        pmd = (pmd_t *) __pa(pmd);
                }
 
-               pgd_populate(NULL, pg_dir, __va(pmd));
+               pud_populate(NULL, (pud_t *)pg_dir, __va(pmd));
 #endif
                pg_dir++;
 
index 1b55fc08f8532c38b32c0349c5051e40e59be757..860228e917dce1d4e8279a4b1a1f97834e606d2c 100644 (file)
@@ -15,6 +15,7 @@
  *
  * (the type definitions are in asm/spinlock_types.h)
  */
+#include <linux/jump_label.h>
 #include <linux/irqflags.h>
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
index 617c2777926fc15344937701d9d02f2bbc09c962..f5535eae637fbcf52c91d7e6a7dea4e5c5a7e0ab 100644 (file)
@@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
        int ret;
 
-       __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 
        /* Remove htab bolted mappings for this section of memory */
        start = (unsigned long)__va(start);
index 42bbcd47cc85ffb1f6900a3ec045db5a2561138d..dffe1a45b6ed4df131f8e18c6cbe851bd1b3fded 100644 (file)
@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
 
 #endif
 
-static inline bool slice_addr_is_low(unsigned long addr)
+static inline notrace bool slice_addr_is_low(unsigned long addr)
 {
        u64 tmp = (u64)addr;
 
@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
                                       mm_ctx_user_psize(&current->mm->context), 1);
 }
 
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
+unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
 {
        unsigned char *psizes;
        int index, mask_index;
index d8efbaa78d67602bf46ff73a89cdd33f2381dd16..fa7dc03459e7f1e7168e9dbef0a3ecf14dae175e 100644 (file)
@@ -64,6 +64,8 @@ config RISCV
        select SPARSEMEM_STATIC if 32BIT
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select HAVE_COPY_THREAD_TLS
 
 config ARCH_MMAP_RND_BITS_MIN
        default 18 if 64BIT
index 70a1891e7cd07f472ede9f221e854671cbb23a3a..a2e3d54e830cc2c2de4ab0382254adf8674a11a5 100644 (file)
@@ -54,6 +54,7 @@ cpu1: cpu@1 {
                        reg = <1>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu1_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
@@ -77,6 +78,7 @@ cpu2: cpu@2 {
                        reg = <2>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu2_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
@@ -100,6 +102,7 @@ cpu3: cpu@3 {
                        reg = <3>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu3_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
@@ -123,6 +126,7 @@ cpu4: cpu@4 {
                        reg = <4>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu4_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
@@ -253,6 +257,17 @@ pwm1: pwm@10021000 {
                        #pwm-cells = <3>;
                        status = "disabled";
                };
+               l2cache: cache-controller@2010000 {
+                       compatible = "sifive,fu540-c000-ccache", "cache";
+                       cache-block-size = <64>;
+                       cache-level = <2>;
+                       cache-sets = <1024>;
+                       cache-size = <2097152>;
+                       cache-unified;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <1 2 3>;
+                       reg = <0x0 0x2010000 0x0 0x1000>;
+               };
 
        };
 };
index dd62b691c443d821163e7460c4376b482c30396f..27e005fca5849e059bc920d937250bdb251ff35c 100644 (file)
@@ -5,4 +5,8 @@
 #include <linux/ftrace.h>
 #include <asm-generic/asm-prototypes.h>
 
+long long __lshrti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __ashlti3(long long a, int b);
+
 #endif /* _ASM_RISCV_PROTOTYPES_H */
index 0a62d2d684552da8c665d17632bc590e61b1dbc0..435b65532e2945703cc17a9e5c4f7613a0d5b6b8 100644 (file)
 # define SR_PIE                SR_MPIE
 # define SR_PP         SR_MPP
 
-# define IRQ_SOFT      IRQ_M_SOFT
-# define IRQ_TIMER     IRQ_M_TIMER
-# define IRQ_EXT       IRQ_M_EXT
+# define RV_IRQ_SOFT           IRQ_M_SOFT
+# define RV_IRQ_TIMER  IRQ_M_TIMER
+# define RV_IRQ_EXT            IRQ_M_EXT
 #else /* CONFIG_RISCV_M_MODE */
 # define CSR_STATUS    CSR_SSTATUS
 # define CSR_IE                CSR_SIE
 # define SR_PIE                SR_SPIE
 # define SR_PP         SR_SPP
 
-# define IRQ_SOFT      IRQ_S_SOFT
-# define IRQ_TIMER     IRQ_S_TIMER
-# define IRQ_EXT       IRQ_S_EXT
+# define RV_IRQ_SOFT           IRQ_S_SOFT
+# define RV_IRQ_TIMER  IRQ_S_TIMER
+# define RV_IRQ_EXT            IRQ_S_EXT
 #endif /* CONFIG_RISCV_M_MODE */
 
 /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
-#define IE_SIE         (_AC(0x1, UL) << IRQ_SOFT)
-#define IE_TIE         (_AC(0x1, UL) << IRQ_TIMER)
-#define IE_EIE         (_AC(0x1, UL) << IRQ_EXT)
+#define IE_SIE         (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE         (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE         (_AC(0x1, UL) << RV_IRQ_EXT)
 
 #ifndef __ASSEMBLY__
 
index a1349ca6466961d8eef510a9d4de9391c0f7c5d1..e163b7b64c86cdac7111a53cc641e41a85fc00bb 100644 (file)
@@ -246,6 +246,7 @@ check_syscall_nr:
         */
        li t1, -1
        beq a7, t1, ret_from_syscall_rejected
+       blt a7, t1, 1f
        /* Call syscall */
        la s0, sys_call_table
        slli t0, a7, RISCV_LGPTR
index b94d8db5ddccbcac050da196c59c926a738894dc..c40fdcdeb950a59f48f1be56ff8776253883bf90 100644 (file)
@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
         */
        old = *parent;
 
-       if (function_graph_enter(old, self_addr, frame_pointer, parent))
+       if (!function_graph_enter(old, self_addr, frame_pointer, parent))
                *parent = return_hooker;
 }
 
index 797802c73dee2ea673402745e791f3c1a7e13d21..a4242be66966b2bdbfaee559b93b905e8a0e0d45 100644 (file)
@@ -80,7 +80,9 @@ _start_kernel:
 
 #ifdef CONFIG_SMP
        li t0, CONFIG_NR_CPUS
-       bgeu a0, t0, .Lsecondary_park
+       blt a0, t0, .Lgood_cores
+       tail .Lsecondary_park
+.Lgood_cores:
 #endif
 
        /* Pick one hart to run the main boot sequence */
@@ -209,11 +211,6 @@ relocate:
        tail smp_callin
 #endif
 
-.align 2
-.Lsecondary_park:
-       /* We lack SMP support or have too many harts, so park this hart */
-       wfi
-       j .Lsecondary_park
 END(_start)
 
 #ifdef CONFIG_RISCV_M_MODE
@@ -251,7 +248,7 @@ ENTRY(reset_regs)
 #ifdef CONFIG_FPU
        csrr    t0, CSR_MISA
        andi    t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
-       bnez    t0, .Lreset_regs_done
+       beqz    t0, .Lreset_regs_done
 
        li      t1, SR_FS
        csrs    CSR_STATUS, t1
@@ -295,6 +292,13 @@ ENTRY(reset_regs)
 END(reset_regs)
 #endif /* CONFIG_RISCV_M_MODE */
 
+.section ".text", "ax",@progbits
+.align 2
+.Lsecondary_park:
+       /* We lack SMP support or have too many harts, so park this hart */
+       wfi
+       j .Lsecondary_park
+
 __PAGE_ALIGNED_BSS
        /* Empty zero page */
        .balign PAGE_SIZE
index 3f07a91d5afb4571bb30eb706bd2bb9c49dacc7e..345c4f2eba13f41a58cdf2f2193f08371863a68f 100644 (file)
@@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
 
        irq_enter();
        switch (regs->cause & ~CAUSE_IRQ_FLAG) {
-       case IRQ_TIMER:
+       case RV_IRQ_TIMER:
                riscv_timer_interrupt();
                break;
 #ifdef CONFIG_SMP
-       case IRQ_SOFT:
+       case RV_IRQ_SOFT:
                /*
                 * We only use software interrupts to pass IPIs, so if a non-SMP
                 * system gets one, then we don't know what to do.
@@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
                riscv_software_interrupt();
                break;
 #endif
-       case IRQ_EXT:
+       case RV_IRQ_EXT:
                handle_arch_irq(regs);
                break;
        default:
index 95a3031e5c7c9dcfdebaa06ba20f8da31424f905..817cf7b0974ced30d75d536b192b73b4e3c25bdf 100644 (file)
@@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        return 0;
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-       unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+       unsigned long arg, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                if (usp) /* User fork */
                        childregs->sp = usp;
                if (clone_flags & CLONE_SETTLS)
-                       childregs->tp = childregs->a5;
+                       childregs->tp = tls;
                childregs->a0 = 0; /* Return value of fork() */
                p->thread.ra = (unsigned long)ret_from_fork;
        }
index 4800cf703186d389a6fd3446e3339375f332ee74..2a02b7eebee00b270f4d6b7365de0374c2beaba3 100644 (file)
@@ -9,8 +9,5 @@
 /*
  * Assembly functions that may be used (directly or indirectly) by modules
  */
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__asm_copy_to_user);
-EXPORT_SYMBOL(__asm_copy_from_user);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
index 49a5852fd07dd5a023c1899e80bffe542f6dedeb..33b16f4212f7a5fceb7feeff0a462e295bb4c0c0 100644 (file)
@@ -58,7 +58,8 @@ quiet_cmd_vdsold = VDSOLD  $@
       cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
                            -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
                    $(CROSS_COMPILE)objcopy \
-                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+                   rm $@.tmp
 
 # install commands for the unstripped file
 quiet_cmd_vdso_install = INSTALL $@
index 15f9d54c7db63859de60f86ced505165aa7a84cb..ef90075c4b0a9c153c02cdfc0d0fbdc98e151134 100644 (file)
@@ -4,34 +4,73 @@
  */
 
 #include <linux/linkage.h>
+#include <asm-generic/export.h>
 
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
        beqz    a2, .L1
        li      a5,64
        sub     a5,a5,a2
-       addi    sp,sp,-16
        sext.w  a4,a5
        blez    a5, .L2
        sext.w  a2,a2
-       sll     a4,a1,a4
        srl     a0,a0,a2
-       srl     a1,a1,a2
+       sll     a4,a1,a4
+       srl     a2,a1,a2
        or      a0,a0,a4
-       sd      a1,8(sp)
-       sd      a0,0(sp)
-       ld      a0,0(sp)
-       ld      a1,8(sp)
-       addi    sp,sp,16
-       ret
+       mv      a1,a2
 .L1:
        ret
 .L2:
-       negw    a4,a4
-       srl     a1,a1,a4
-       sd      a1,0(sp)
-       sd      zero,8(sp)
-       ld      a0,0(sp)
-       ld      a1,8(sp)
-       addi    sp,sp,16
+       negw    a0,a4
+       li      a2,0
+       srl     a0,a1,a0
+       mv      a1,a2
+       ret
+SYM_FUNC_END(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
+
+SYM_FUNC_START(__ashrti3)
+       beqz    a2, .L3
+       li      a5,64
+       sub     a5,a5,a2
+       sext.w  a4,a5
+       blez    a5, .L4
+       sext.w  a2,a2
+       srl     a0,a0,a2
+       sll     a4,a1,a4
+       sra     a2,a1,a2
+       or      a0,a0,a4
+       mv      a1,a2
+.L3:
+       ret
+.L4:
+       negw    a0,a4
+       srai    a2,a1,0x3f
+       sra     a0,a1,a0
+       mv      a1,a2
+       ret
+SYM_FUNC_END(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
+
+SYM_FUNC_START(__ashlti3)
+       beqz    a2, .L5
+       li      a5,64
+       sub     a5,a5,a2
+       sext.w  a4,a5
+       blez    a5, .L6
+       sext.w  a2,a2
+       sll     a1,a1,a2
+       srl     a4,a0,a4
+       sll     a2,a0,a2
+       or      a1,a1,a4
+       mv      a0,a2
+.L5:
+       ret
+.L6:
+       negw    a1,a4
+       li      a2,0
+       sll     a1,a0,a1
+       mv      a0,a2
        ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
index fecd65657a6fc0c3c1e4e18d281f0d2fb82c20f8..f29d2ba2c0a6ce02b80bba6032ddf9e1f897d484 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm-generic/export.h>
 #include <asm/asm.h>
 #include <asm/csr.h>
 
@@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user)
        j 3b
 ENDPROC(__asm_copy_to_user)
 ENDPROC(__asm_copy_from_user)
+EXPORT_SYMBOL(__asm_copy_to_user)
+EXPORT_SYMBOL(__asm_copy_from_user)
 
 
 ENTRY(__clear_user)
@@ -108,6 +111,7 @@ ENTRY(__clear_user)
        bltu a0, a3, 5b
        j 3b
 ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
 
        .section .fixup,"ax"
        .balign 4
index 8f190068664059d3ed2bfc5e40b27e93c1d7bb88..8930ab7278e6d51a7e14e05a765ca21a6e6e2798 100644 (file)
@@ -22,6 +22,7 @@ void flush_icache_all(void)
        else
                on_each_cpu(ipi_remote_fence_i, NULL, 1);
 }
+EXPORT_SYMBOL(flush_icache_all);
 
 /*
  * Performs an icache flush for the given MM context.  RISC-V has no direct
index 69f6678db7f370027e00f679f2a8262362fdc185..965a8cf4829ca33bfde4754bf47db0b8b17c6611 100644 (file)
@@ -99,13 +99,13 @@ static void __init setup_initrd(void)
                pr_info("initrd not found or empty");
                goto disable;
        }
-       if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+       if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
                pr_err("initrd extends beyond end of memory");
                goto disable;
        }
 
        size = initrd_end - initrd_start;
-       memblock_reserve(__pa(initrd_start), size);
+       memblock_reserve(__pa_symbol(initrd_start), size);
        initrd_below_start_ok = 1;
 
        pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@@ -124,8 +124,8 @@ void __init setup_bootmem(void)
 {
        struct memblock_region *reg;
        phys_addr_t mem_size = 0;
-       phys_addr_t vmlinux_end = __pa(&_end);
-       phys_addr_t vmlinux_start = __pa(&_start);
+       phys_addr_t vmlinux_end = __pa_symbol(&_end);
+       phys_addr_t vmlinux_start = __pa_symbol(&_start);
 
        /* Find the memory region containing the kernel */
        for_each_memblock(memory, reg) {
@@ -445,7 +445,7 @@ static void __init setup_vm_final(void)
 
        /* Setup swapper PGD for fixmap */
        create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
-                          __pa(fixmap_pgd_next),
+                          __pa_symbol(fixmap_pgd_next),
                           PGDIR_SIZE, PAGE_TABLE);
 
        /* Map all memory banks */
@@ -474,7 +474,7 @@ static void __init setup_vm_final(void)
        clear_fixmap(FIX_PMD);
 
        /* Move to swapper page table */
-       csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
+       csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
        local_flush_tlb_all();
 }
 #else
index 9cbf490fd162ec6b8df30a8cc0a9292cdbe90acb..d5fbd754f41a6fab29ee0ccfe9228859d2504de0 100644 (file)
@@ -1052,7 +1052,7 @@ static void __init log_component_list(void)
 
        if (!early_ipl_comp_list_addr)
                return;
-       if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
+       if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
                pr_info("Linux is running with Secure-IPL enabled\n");
        else
                pr_info("Linux is running with Secure-IPL disabled\n");
index f0ce2222056592fdd0941b43918f610c2ee958cb..ac44bd76db4be13443b3da63e4ef0377516d2ddb 100644 (file)
@@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
        vmem_remove_mapping(start, size);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
index dfdbaa50946ebbdfb2c1853b738eaf72ae0d734e..d1b1ff2be17aa728a84ba8e8701313664e9a76df 100644 (file)
@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = PFN_DOWN(start);
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
index 2a6d04fcb3e91c7e191cec43b7d1e2d6d832819b..6f0edd0c0220f40dac6fd3c89309458e9f0981e6 100644 (file)
@@ -14,6 +14,7 @@ config UML
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_BUGVERBOSE
+       select HAVE_COPY_THREAD_TLS
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
index 81c647ef9c6c8df98ae917178318a61647d9391e..adf91ef553ae91f215dcf1c457fcf258e8065423 100644 (file)
@@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request,
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
 
-extern int arch_copy_tls(struct task_struct *new);
+extern int arch_set_tls(struct task_struct *new, unsigned long tls);
 extern void clear_flushed_tls(struct task_struct *task);
 extern int syscall_trace_enter(struct pt_regs *regs);
 extern void syscall_trace_leave(struct pt_regs *regs);
index 263a8f06913341d1b0a9d013d89f12971e928391..17045e7211bfd461d9a055fcee9b0cb1f3ac6fed 100644 (file)
@@ -153,8 +153,8 @@ void fork_handler(void)
        userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp,
-               unsigned long arg, struct task_struct * p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+               unsigned long arg, struct task_struct * p, unsigned long tls)
 {
        void (*handler)(void);
        int kthread = current->flags & PF_KTHREAD;
@@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                 * Set a new TLS for the child thread?
                 */
                if (clone_flags & CLONE_SETTLS)
-                       ret = arch_copy_tls(p);
+                       ret = arch_set_tls(p, tls);
        }
 
        return ret;
index 58a512e33d8d628c60e33cfe21322d7823dbdc3e..ee60b81944a778b3d258d382361d87db8923561f 100644 (file)
@@ -244,6 +244,11 @@ SYM_FUNC_START(efi32_stub_entry)
        leal    efi32_config(%ebp), %eax
        movl    %eax, efi_config(%ebp)
 
+       /* Disable paging */
+       movl    %cr0, %eax
+       btrl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+
        jmp     startup_32
 SYM_FUNC_END(efi32_stub_entry)
 #endif
index dbaa1b088a30e6106b680a666540016180c694ad..c37cb12d0ef68db47b1054113ce53f7e70a6486d 100644 (file)
@@ -15,6 +15,7 @@
 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC         0x1910
 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC         0x190f
 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC         0x191f
+#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC         0x1918
 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC          0x590c
 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC          0x5904
 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC         0x5914
@@ -657,6 +658,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
        { /* IMC */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
@@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
        IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
        IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
+       IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
        IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
        IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
        IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
index b10a5ec79e48aac1af8d9edb1396e22449dd8d14..ad20220af303ad8a28acb865123c4a41267c87e9 100644 (file)
 #define SNR_M2M_PCI_PMON_BOX_CTL               0x438
 #define SNR_M2M_PCI_PMON_UMASK_EXT             0xff
 
-/* SNR PCIE3 */
-#define SNR_PCIE3_PCI_PMON_CTL0                        0x508
-#define SNR_PCIE3_PCI_PMON_CTR0                        0x4e8
-#define SNR_PCIE3_PCI_PMON_BOX_CTL             0x4e4
-
 /* SNR IMC */
 #define SNR_IMC_MMIO_PMON_FIXED_CTL            0x54
 #define SNR_IMC_MMIO_PMON_FIXED_CTR            0x38
@@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = {
        .format_group   = &snr_m2m_uncore_format_group,
 };
 
-static struct intel_uncore_type snr_uncore_pcie3 = {
-       .name           = "pcie3",
-       .num_counters   = 4,
-       .num_boxes      = 1,
-       .perf_ctr_bits  = 48,
-       .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
-       .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
-       .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
-       .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
-       .ops            = &ivbep_uncore_pci_ops,
-       .format_group   = &ivbep_uncore_format_group,
-};
-
 enum {
        SNR_PCI_UNCORE_M2M,
-       SNR_PCI_UNCORE_PCIE3,
 };
 
 static struct intel_uncore_type *snr_pci_uncores[] = {
        [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
-       [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
        NULL,
 };
 
@@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
        },
-       { /* PCIe3 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
-       },
        { /* end: all zeroes */ }
 };
 
@@ -4536,6 +4512,7 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
        INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
        INTEL_UNCORE_EVENT_DESC(write.scale,    "3.814697266e-6"),
        INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
+       { /* end: all zeroes */ },
 };
 
 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
index 90f75e5158769c8690f745184c1271ddc6dffd20..62c30279be777b3df46efb4908c2d35ed2e2fefc 100644 (file)
@@ -615,9 +615,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
                return;
 
 clear_all:
-               clear_cpu_cap(c, X86_FEATURE_SME);
+               setup_clear_cpu_cap(X86_FEATURE_SME);
 clear_sev:
-               clear_cpu_cap(c, X86_FEATURE_SEV);
+               setup_clear_cpu_cap(X86_FEATURE_SEV);
        }
 }
 
index b38010b541d6edff1f6b6cf885b6d76ba89847d5..6c3e1c92f1835ac30415649981ba7f411472c17d 100644 (file)
@@ -467,6 +467,7 @@ static int thermal_throttle_online(unsigned int cpu)
 {
        struct thermal_state *state = &per_cpu(thermal_state, cpu);
        struct device *dev = get_cpu_device(cpu);
+       u32 l;
 
        state->package_throttle.level = PACKAGE_LEVEL;
        state->core_throttle.level = CORE_LEVEL;
@@ -474,6 +475,10 @@ static int thermal_throttle_online(unsigned int cpu)
        INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
        INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
 
+       /* Unmask the thermal vector after the above workqueues are initialized. */
+       l = apic_read(APIC_LVTTHMR);
+       apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+
        return thermal_throttle_add_dev(dev, cpu);
 }
 
@@ -722,10 +727,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
        wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
 
-       /* Unmask the thermal vector: */
-       l = apic_read(APIC_LVTTHMR);
-       apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
-
        pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
                      tm2 ? "TM2" : "TM1");
 
index 03eb90d00af0fb693a2298f0e956d05f7116ce04..89049b343c7a8f27b0af635253c911d15f393f2b 100644 (file)
@@ -618,7 +618,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                if (static_branch_unlikely(&rdt_mon_enable_key))
                        rmdir_mondata_subdir_allrdtgrp(r, d->id);
                list_del(&d->list);
-               if (is_mbm_enabled())
+               if (r->mon_capable && is_mbm_enabled())
                        cancel_delayed_work(&d->mbm_over);
                if (is_llc_occupancy_enabled() &&  has_busy_rmid(r, d)) {
                        /*
index 2e3b06d6bbc6df9b311bd6e9b942ea0ea4b1fc67..dac7209a0708438ebc2293d9c996b1595ea482a2 100644 (file)
@@ -1741,9 +1741,6 @@ static int set_cache_qos_cfg(int level, bool enable)
        struct rdt_domain *d;
        int cpu;
 
-       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
-               return -ENOMEM;
-
        if (level == RDT_RESOURCE_L3)
                update = l3_qos_cfg_update;
        else if (level == RDT_RESOURCE_L2)
@@ -1751,6 +1748,9 @@ static int set_cache_qos_cfg(int level, bool enable)
        else
                return -EINVAL;
 
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
        r_l = &rdt_resources_all[level];
        list_for_each_entry(d, &r_l->domains, list) {
                /* Pick one CPU from each domain instance to update MSR */
index 930edeb41ec33417755d38293f85ad1717cdee90..0a74407ef92ea939aaa8425613ae8bcda2419b70 100644 (file)
@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
 
index dcb9bc961b39c3e2928eb4d15f681c8dad29591e..bcfede46fe02b54d0b73d74de7112e351a2b4272 100644 (file)
@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
-       struct zone *zone = page_zone(page);
 
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
        kernel_physical_mapping_remove(start, start + size);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
index 5bd949da7a4a5deb659f19e849dcd6036a2843ac..ac8eee093f9cd0fa112fe89e945202eb60203a84 100644 (file)
@@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info,
        return 0;
 }
 
-int arch_copy_tls(struct task_struct *new)
+int arch_set_tls(struct task_struct *new, unsigned long tls)
 {
        struct user_desc info;
        int idx, ret = -EFAULT;
 
-       if (copy_from_user(&info,
-                          (void __user *) UPT_SI(&new->thread.regs.regs),
-                          sizeof(info)))
+       if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
                goto out;
 
        ret = -EINVAL;
index 3a621e0d39253aa30a7d5eadb523cd4a2e1315cb..ebd3855d9b132379b7065222316a948e06b2b72e 100644 (file)
@@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task)
 {
 }
 
-int arch_copy_tls(struct task_struct *t)
+int arch_set_tls(struct task_struct *t, unsigned long tls)
 {
        /*
         * If CLONE_SETTLS is set, we need to save the thread id
-        * (which is argument 5, child_tid, of clone) so it can be set
-        * during context switches.
+        * so it can be set during context switches.
         */
-       t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
+       t->thread.arch.fs = tls;
 
        return 0;
 }
index 4a3fa295d8fed4a9427ea8c1eecf9832040d766a..296c5324dacef5cbdf3c003b151c8e34d529ad54 100644 (file)
@@ -24,6 +24,7 @@ config XTENSA
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_EXIT_THREAD
index 9e1c49134c07eca6a172e4b0f43e5ffeeef61f50..3edecc41ef8c36ffefb6a6b6ac1be62c1d9a296d 100644 (file)
@@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  * involved.  Much simpler to just not copy those live frames across.
  */
 
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
-               unsigned long thread_fn_arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
+               unsigned long thread_fn_arg, struct task_struct *p,
+               unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
 
                childregs->syscall = regs->syscall;
 
-               /* The thread pointer is passed in the '4th argument' (= a5) */
                if (clone_flags & CLONE_SETTLS)
-                       childregs->threadptr = childregs->areg[5];
+                       childregs->threadptr = tls;
        } else {
                p->thread.ra = MAKE_RA_FOR_CALL(
                                (unsigned long)ret_from_kernel_thread, 1);
index a5d75f6bf4c7eedc96454bade72dbc9fe88af74d..94d697217887aa1b9e8b214c7606a6f1e7bf8b42 100644 (file)
@@ -538,6 +538,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 }
 EXPORT_SYMBOL(zero_fill_bio_iter);
 
+/**
+ * bio_truncate - truncate the bio to small size of @new_size
+ * @bio:       the bio to be truncated
+ * @new_size:  new size for truncating the bio
+ *
+ * Description:
+ *   Truncate the bio to new size of @new_size. If bio_op(bio) is
+ *   REQ_OP_READ, zero the truncated part. This function should only
+ *   be used for handling corner cases, such as bio eod.
+ */
+void bio_truncate(struct bio *bio, unsigned new_size)
+{
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       unsigned int done = 0;
+       bool truncated = false;
+
+       if (new_size >= bio->bi_iter.bi_size)
+               return;
+
+       if (bio_op(bio) != REQ_OP_READ)
+               goto exit;
+
+       bio_for_each_segment(bv, bio, iter) {
+               if (done + bv.bv_len > new_size) {
+                       unsigned offset;
+
+                       if (!truncated)
+                               offset = new_size - done;
+                       else
+                               offset = 0;
+                       zero_user(bv.bv_page, offset, bv.bv_len - offset);
+                       truncated = true;
+               }
+               done += bv.bv_len;
+       }
+
+ exit:
+       /*
+        * Don't touch bvec table here and make it really immutable, since
+        * fs bio user has to retrieve all pages via bio_for_each_segment_all
+        * in its .end_bio() callback.
+        *
+        * It is enough to truncate bio by updating .bi_size since we can make
+        * correct bvec with the updated .bi_size for drivers.
+        */
+       bio->bi_iter.bi_size = new_size;
+}
+
 /**
  * bio_put - release a reference to a bio
  * @bio:   bio to release reference to
index d783bdc4559b4de54b19364579326b9f618ebe15..1534ed736363fd807998525d3ddd828e566004f8 100644 (file)
@@ -157,17 +157,20 @@ static inline unsigned get_max_io_size(struct request_queue *q,
        return sectors & (lbs - 1);
 }
 
-static unsigned get_max_segment_size(const struct request_queue *q,
-                                    unsigned offset)
+static inline unsigned get_max_segment_size(const struct request_queue *q,
+                                           struct page *start_page,
+                                           unsigned long offset)
 {
        unsigned long mask = queue_segment_boundary(q);
 
-       /* default segment boundary mask means no boundary limit */
-       if (mask == BLK_SEG_BOUNDARY_MASK)
-               return queue_max_segment_size(q);
+       offset = mask & (page_to_phys(start_page) + offset);
 
-       return min_t(unsigned long, mask - (mask & offset) + 1,
-                    queue_max_segment_size(q));
+       /*
+        * overflow may be triggered in case of zero page physical address
+        * on 32bit arch, use queue's max segment size when that happens.
+        */
+       return min_not_zero(mask - offset + 1,
+                       (unsigned long)queue_max_segment_size(q));
 }
 
 /**
@@ -201,7 +204,8 @@ static bool bvec_split_segs(const struct request_queue *q,
        unsigned seg_size = 0;
 
        while (len && *nsegs < max_segs) {
-               seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+               seg_size = get_max_segment_size(q, bv->bv_page,
+                                               bv->bv_offset + total_len);
                seg_size = min(seg_size, len);
 
                (*nsegs)++;
@@ -419,7 +423,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
 
        while (nbytes > 0) {
                unsigned offset = bvec->bv_offset + total;
-               unsigned len = min(get_max_segment_size(q, offset), nbytes);
+               unsigned len = min(get_max_segment_size(q, bvec->bv_page,
+                                       offset), nbytes);
                struct page *page = bvec->bv_page;
 
                /*
index 5f6dcc7a47bd92745341feee781cb2f2d3fa58b2..c8eda2e7b91e492453a9a454691865f4b8768a5d 100644 (file)
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
  *   storage device can address.  The default of 512 covers most
  *   hardware.
  **/
-void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
 {
        q->limits.logical_block_size = size;
 
index 6ca015f92766e9052d444f9e81ba3c307846166c..3ed7a0f144a994b28aa2bc269cadece8c6a821d8 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/compat.h>
 #include <linux/elevator.h>
 #include <linux/hdreg.h>
+#include <linux/pr.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/types.h>
@@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
         * but we call blkdev_ioctl, which gets the lock for us
         */
        case BLKRRPART:
+       case BLKREPORTZONE:
+       case BLKRESETZONE:
+       case BLKOPENZONE:
+       case BLKCLOSEZONE:
+       case BLKFINISHZONE:
+       case BLKGETZONESZ:
+       case BLKGETNRZONES:
                return blkdev_ioctl(bdev, mode, cmd,
                                (unsigned long)compat_ptr(arg));
        case BLKBSZSET_32:
@@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKTRACETEARDOWN: /* compatible */
                ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
                return ret;
+       case IOC_PR_REGISTER:
+       case IOC_PR_RESERVE:
+       case IOC_PR_RELEASE:
+       case IOC_PR_PREEMPT:
+       case IOC_PR_PREEMPT_ABORT:
+       case IOC_PR_CLEAR:
+               return blkdev_ioctl(bdev, mode, cmd,
+                               (unsigned long)compat_ptr(arg));
        default:
                if (disk->fops->compat_ioctl)
                        ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
index 33f71983e001758ab0c4be5c2caf40f24c9b293d..9f0a9f9339f9e617ed1cdc5eadfa1cfc96715047 100644 (file)
@@ -11,6 +11,7 @@
 #define pr_fmt(fmt)    "ACPI: IORT: " fmt
 
 #include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
 #include <linux/iommu.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -850,9 +851,9 @@ static inline bool iort_iommu_driver_enabled(u8 type)
 {
        switch (type) {
        case ACPI_IORT_NODE_SMMU_V3:
-               return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
+               return IS_ENABLED(CONFIG_ARM_SMMU_V3);
        case ACPI_IORT_NODE_SMMU:
-               return IS_BUILTIN(CONFIG_ARM_SMMU);
+               return IS_ENABLED(CONFIG_ARM_SMMU);
        default:
                pr_warn("IORT node type %u does not describe an SMMU\n", type);
                return false;
@@ -924,6 +925,20 @@ static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
        return iort_iommu_xlate(info->dev, parent, streamid);
 }
 
+static void iort_named_component_init(struct device *dev,
+                                     struct acpi_iort_node *node)
+{
+       struct acpi_iort_named_component *nc;
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+       if (!fwspec)
+               return;
+
+       nc = (struct acpi_iort_named_component *)node->node_data;
+       fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
+                                          nc->node_flags);
+}
+
 /**
  * iort_iommu_configure - Set-up IOMMU configuration for a device.
  *
@@ -978,6 +993,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
                        if (parent)
                                err = iort_iommu_xlate(dev, parent, streamid);
                } while (parent && !err);
+
+               if (!err)
+                       iort_named_component_init(dev, node);
        }
 
        /*
index f41744b9b38a6d5b47e11f5b98777a6b195be3e2..66a570d0da83743caf73b26d0a4c9212ef5e4f26 100644 (file)
@@ -76,8 +76,7 @@ enum brcm_ahci_version {
 };
 
 enum brcm_ahci_quirks {
-       BRCM_AHCI_QUIRK_NO_NCQ          = BIT(0),
-       BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
+       BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
 };
 
 struct brcm_ahci_priv {
@@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
                        brcm_sata_phy_disable(priv, i);
 }
 
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
                                  struct brcm_ahci_priv *priv)
 {
-       void __iomem *ahci;
-       struct resource *res;
        u32 impl;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
-       ahci = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(ahci))
-               return 0;
-
-       impl = readl(ahci + HOST_PORTS_IMPL);
+       impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
 
        if (fls(impl) > SATA_TOP_MAX_PHYS)
                dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
        else if (!impl)
                dev_info(priv->dev, "no ports found\n");
 
-       devm_iounmap(&pdev->dev, ahci);
-       devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
        return impl;
 }
 
@@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
        /* Perform the SATA PHY reset sequence */
        brcm_sata_phy_disable(priv, ap->port_no);
 
+       /* Reset the SATA clock */
+       ahci_platform_disable_clks(hpriv);
+       msleep(10);
+
+       ahci_platform_enable_clks(hpriv);
+       msleep(10);
+
        /* Bring the PHY back on */
        brcm_sata_phy_enable(priv, ap->port_no);
 
@@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct ahci_host_priv *hpriv = host->private_data;
        struct brcm_ahci_priv *priv = hpriv->plat_data;
-       int ret;
 
-       ret = ahci_platform_suspend(dev);
        brcm_sata_phys_disable(priv);
-       return ret;
+
+       return ahci_platform_suspend(dev);
 }
 
 static int brcm_ahci_resume(struct device *dev)
@@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct ahci_host_priv *hpriv = host->private_data;
        struct brcm_ahci_priv *priv = hpriv->plat_data;
+       int ret;
+
+       /* Make sure clocks are turned on before re-configuration */
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret)
+               return ret;
 
        brcm_sata_init(priv);
        brcm_sata_phys_enable(priv);
        brcm_sata_alpm_init(hpriv);
-       return ahci_platform_resume(dev);
+
+       /* Since we had to enable clocks earlier on, we cannot use
+        * ahci_platform_resume() as-is since a second call to
+        * ahci_platform_enable_resources() would bump up the resources
+        * (regulators, clocks, PHYs) count artificially so we copy the part
+        * after ahci_platform_enable_resources().
+        */
+       ret = ahci_platform_enable_phys(hpriv);
+       if (ret)
+               goto out_disable_phys;
+
+       ret = ahci_platform_resume_host(dev);
+       if (ret)
+               goto out_disable_platform_phys;
+
+       /* We resumed so update PM runtime state */
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       return 0;
+
+out_disable_platform_phys:
+       ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+       brcm_sata_phys_disable(priv);
+       ahci_platform_disable_clks(hpriv);
+       return ret;
 }
 #endif
 
@@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev)
        if (!IS_ERR_OR_NULL(priv->rcdev))
                reset_control_deassert(priv->rcdev);
 
-       if ((priv->version == BRCM_SATA_BCM7425) ||
-               (priv->version == BRCM_SATA_NSP)) {
-               priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+       hpriv = ahci_platform_get_resources(pdev, 0);
+       if (IS_ERR(hpriv)) {
+               ret = PTR_ERR(hpriv);
+               goto out_reset;
+       }
+
+       hpriv->plat_data = priv;
+       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
+
+       switch (priv->version) {
+       case BRCM_SATA_BCM7425:
+               hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
+               /* fall through */
+       case BRCM_SATA_NSP:
+               hpriv->flags |= AHCI_HFLAG_NO_NCQ;
                priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+               break;
+       default:
+               break;
        }
 
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret)
+               goto out_reset;
+
+       /* Must be first so as to configure endianness including that
+        * of the standard AHCI register space.
+        */
        brcm_sata_init(priv);
 
-       priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
-       if (!priv->port_mask)
-               return -ENODEV;
+       /* Initializes priv->port_mask which is used below */
+       priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+       if (!priv->port_mask) {
+               ret = -ENODEV;
+               goto out_disable_clks;
+       }
 
+       /* Must be done before ahci_platform_enable_phys() */
        brcm_sata_phys_enable(priv);
 
-       hpriv = ahci_platform_get_resources(pdev, 0);
-       if (IS_ERR(hpriv))
-               return PTR_ERR(hpriv);
-       hpriv->plat_data = priv;
-       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
-
        brcm_sata_alpm_init(hpriv);
 
-       ret = ahci_platform_enable_resources(hpriv);
+       ret = ahci_platform_enable_phys(hpriv);
        if (ret)
-               return ret;
-
-       if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
-               hpriv->flags |= AHCI_HFLAG_NO_NCQ;
-       hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+               goto out_disable_phys;
 
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
                                      &ahci_platform_sht);
        if (ret)
-               return ret;
+               goto out_disable_platform_phys;
 
        dev_info(dev, "Broadcom AHCI SATA3 registered\n");
 
        return 0;
+
+out_disable_platform_phys:
+       ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+       brcm_sata_phys_disable(priv);
+out_disable_clks:
+       ahci_platform_disable_clks(hpriv);
+out_reset:
+       if (!IS_ERR_OR_NULL(priv->rcdev))
+               reset_control_assert(priv->rcdev);
+       return ret;
 }
 
 static int brcm_ahci_remove(struct platform_device *pdev)
@@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
        struct brcm_ahci_priv *priv = hpriv->plat_data;
        int ret;
 
+       brcm_sata_phys_disable(priv);
+
        ret = ata_platform_remove_one(pdev);
        if (ret)
                return ret;
 
-       brcm_sata_phys_disable(priv);
-
        return 0;
 }
 
index 8befce036af84810b0a3c815829b87f6fd904d8c..129556fcf6be760ce48834873ea3ea057c4bd8a5 100644 (file)
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
  * RETURNS:
  * 0 on success otherwise a negative error code
  */
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
 {
        int rc, i;
 
@@ -74,6 +74,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
        }
        return rc;
 }
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
 
 /**
  * ahci_platform_disable_phys - Disable PHYs
@@ -81,7 +82,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
  *
  * This function disables all PHYs found in hpriv->phys.
  */
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
 {
        int i;
 
@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
                phy_exit(hpriv->phys[i]);
        }
 }
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
 
 /**
  * ahci_platform_enable_clks - Enable platform clocks
index e9017c570bc51667c127847eb13561f09333794d..6f4ab5c5b52dde6f072a9eea90427d7fd8a7a791 100644 (file)
@@ -5328,6 +5328,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
        }
 }
 
+/**
+ *     ata_qc_get_active - get bitmask of active qcs
+ *     @ap: port in question
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ *     RETURNS:
+ *     Bitmask of active qcs
+ */
+u64 ata_qc_get_active(struct ata_port *ap)
+{
+       u64 qc_active = ap->qc_active;
+
+       /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
+       if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+               qc_active |= (1 << 0);
+               qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
+       }
+
+       return qc_active;
+}
+EXPORT_SYMBOL_GPL(ata_qc_get_active);
+
 /**
  *     ata_qc_complete_multiple - Complete multiple qcs successfully
  *     @ap: port in question
index 9239615d8a0472967fa534f0dcd8523da944ea1a..d55ee244d6931fcae2089dce4e3a07409685276d 100644 (file)
@@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
                                     i, ioread32(hcr_base + CC),
                                     ioread32(hcr_base + CA));
                }
-               ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+               ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
                return;
 
        } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
index 277f11909fc1ab37338f3390781db00814104a6a..d7228f8e9297c001e730c7f62c8901ebad20715f 100644 (file)
@@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
        }
 
        if (work_done) {
-               ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+               ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
 
                /* Update the software queue position index in hardware */
                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
index f3e62f5528bdb17735f28d847f8cc485bc2e5164..eb9dc14e5147aaebbc210c1481ce146a6d9c57a4 100644 (file)
@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
                                        check_commands = 0;
                                check_commands &= ~(1 << pos);
                        }
-                       ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+                       ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
                }
        }
 
index b23d1e4bad33b20653b213047d815f388dfd0d47..9d0d65efcd94e97da049e48dd7e9f5aedfccc8f3 100644 (file)
@@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
                here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
                dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
                    << MID_DMA_VCI_SHIFT) | MID_DT_JK;
-               j++;
+               dma[j++] = 0;
        }
        here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
        if (!eff) size += skip;
@@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
        if (size != eff) {
                dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
                    (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
-               j++;
+               dma[j++] = 0;
        }
        if (!j || j > 2*RX_DMA_BUF) {
                printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
index 4a66888e7253d34d00674824d7fab6593dd794fd..5fa7ce3745a0d63e7c7736de89386e06ee38fe0c 100644 (file)
@@ -17,7 +17,7 @@ PROGBITS  = $(if $(CONFIG_ARM),%,@)progbits
 filechk_fwbin = \
        echo "/* Generated by $(src)/Makefile */"               ;\
        echo "    .section .rodata"                             ;\
-       echo "    .p2align $(ASM_ALIGN)"                        ;\
+       echo "    .p2align 4"                                   ;\
        echo "_fw_$(FWSTR)_bin:"                                ;\
        echo "    .incbin \"$(fwdir)/$(FWNAME)\""               ;\
        echo "_fw_end:"                                         ;\
index d4d88b5818225b6e3fe441b9190c4e7d82a0e04a..ed34785dd64bd7b913d8003993e4b741fd58eb41 100644 (file)
@@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
                return BLK_STS_IOERR;
        case BLK_ZONE_COND_EMPTY:
        case BLK_ZONE_COND_IMP_OPEN:
+       case BLK_ZONE_COND_EXP_OPEN:
+       case BLK_ZONE_COND_CLOSED:
                /* Writes must be at the write pointer position */
                if (sector != zone->wp)
                        return BLK_STS_IOERR;
 
-               if (zone->cond == BLK_ZONE_COND_EMPTY)
+               if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
                        zone->cond = BLK_ZONE_COND_IMP_OPEN;
 
                zone->wp += nr_sectors;
@@ -186,7 +188,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
                if (zone->cond == BLK_ZONE_COND_FULL)
                        return BLK_STS_IOERR;
 
-               zone->cond = BLK_ZONE_COND_CLOSED;
+               if (zone->wp == zone->start)
+                       zone->cond = BLK_ZONE_COND_EMPTY;
+               else
+                       zone->cond = BLK_ZONE_COND_CLOSED;
                break;
        case REQ_OP_ZONE_FINISH:
                if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
index ee67bf929fac8d57e5750c60dacf3ca58a73891d..861fc65a1b751a5a19ae8a5be8c1ede985df26ad 100644 (file)
@@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = {
        .release =              pkt_close,
        .ioctl =                pkt_ioctl,
 #ifdef CONFIG_COMPAT
-       .ioctl =                pkt_compat_ioctl,
+       .compat_ioctl =         pkt_compat_ioctl,
 #endif
        .check_events =         pkt_check_events,
 };
index f4d1597df0a27152dc80bee29e7d2820abebcdc2..ccb44fe790a71ebbc5ccfa07216cbffaad29d6bd 100644 (file)
@@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata)
                return -EINVAL;
        }
 
+       /* Always add a slot for main clocks fck and ick even if unused */
+       if (!nr_fck)
+               ddata->nr_clocks++;
+       if (!nr_ick)
+               ddata->nr_clocks++;
+
        ddata->clocks = devm_kcalloc(ddata->dev,
                                     ddata->nr_clocks, sizeof(*ddata->clocks),
                                     GFP_KERNEL);
@@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata)
        struct clk *clock;
        int i, error;
 
-       if (!ddata->clocks)
+       if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
                return 0;
 
        for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
        struct clk *clock;
        int i;
 
-       if (!ddata->clocks)
+       if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
                return;
 
        for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
index 31c374b1b91b038827254824b48bb91b8b577d68..7ecf20a6d19cb62564f9b17878717483c009755a 100644 (file)
@@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
        unsigned int cdev = 0;
        u32 mnistat, tnistat, tstatus, mcmd;
        u16 tnicmd, mnicmd;
-       u8 mcapndx;
        u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
        u32 step, rem, rem_isoch, rem_async;
        int ret = 0;
@@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
                cur = list_entry(pos, struct agp_3_5_dev, list);
                dev = cur->dev;
 
-               mcapndx = cur->capndx;
-
                pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
 
                master[cdev].maxbw = (mnistat >> 16) & 0xff;
@@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
                cur = master[cdev].dev;
                dev = cur->dev;
 
-               mcapndx = cur->capndx;
-
                master[cdev].rq += (cdev == ndevs - 1)
                              ? (rem_async + rem_isoch) : step;
 
@@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
 {
        struct pci_dev *td = bridge->dev, *dev = NULL;
        u8 mcapndx;
-       u32 isoch, arqsz;
+       u32 isoch;
        u32 tstatus, mstatus, ncapid;
        u32 mmajor;
        u16 mpstat;
@@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
        if (isoch == 0) /* isoch xfers not available, bail out. */
                return -ENODEV;
 
-       arqsz     = (tstatus >> 13) & 0x7;
-
        /*
         * Allocate a head for our AGP 3.5 device list
         * (multiple AGP v3 devices are allowed behind a single bridge).
index b23b0b9992322419d709eca2d1711b55f0f35658..87f4493402021b6af277881cbdb928cddbdf10db 100644 (file)
@@ -130,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
                priv->response_read = true;
 
                ret_size = min_t(ssize_t, size, priv->response_length);
-               if (!ret_size) {
+               if (ret_size <= 0) {
                        priv->response_length = 0;
                        goto out;
                }
index 1089fc0bb290cb59098321fbf6605872167f7a15..f3742bcc73e377ce8b1bbc35fdbe1c4a2978d7af 100644 (file)
@@ -14,7 +14,7 @@ struct file_priv {
        struct work_struct timeout_work;
        struct work_struct async_work;
        wait_queue_head_t async_wait;
-       size_t response_length;
+       ssize_t response_length;
        bool response_read;
        bool command_enqueued;
 
index bb0343ffd2357fa7ac1c7490b3d39a4a152b944e..27c6ca031e23ec8a26401dc281271404e8e3a008 100644 (file)
@@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
 
        if (wait_startup(chip, 0) != 0) {
                rc = -ENODEV;
-               goto err_start;
+               goto out_err;
        }
 
        /* Take control of the TPM's interrupt hardware and shut it off */
        rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
        if (rc < 0)
-               goto err_start;
+               goto out_err;
 
        intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
                   TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
@@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
 
        rc = tpm_chip_start(chip);
        if (rc)
-               goto err_start;
-
+               goto out_err;
        rc = tpm2_probe(chip);
+       tpm_chip_stop(chip);
        if (rc)
-               goto err_probe;
+               goto out_err;
 
        rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
        if (rc < 0)
-               goto err_probe;
+               goto out_err;
 
        priv->manufacturer_id = vendor;
 
        rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
        if (rc < 0)
-               goto err_probe;
+               goto out_err;
 
        dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
                 (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
@@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
        probe = probe_itpm(chip);
        if (probe < 0) {
                rc = -ENODEV;
-               goto err_probe;
+               goto out_err;
        }
 
        /* Figure out the capabilities */
        rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
        if (rc < 0)
-               goto err_probe;
+               goto out_err;
 
        dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
                intfcaps);
@@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
                if (tpm_get_timeouts(chip)) {
                        dev_err(dev, "Could not get TPM timeouts and durations\n");
                        rc = -ENODEV;
-                       goto err_probe;
+                       goto out_err;
                }
 
-               chip->flags |= TPM_CHIP_FLAG_IRQ;
                if (irq) {
                        tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
                                                 irq);
@@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
                }
        }
 
-       tpm_chip_stop(chip);
-
        rc = tpm_chip_register(chip);
        if (rc)
-               goto err_start;
-
-       return 0;
+               goto out_err;
 
-err_probe:
-       tpm_chip_stop(chip);
+       if (chip->ops->clk_enable != NULL)
+               chip->ops->clk_enable(chip, false);
 
-err_start:
+       return 0;
+out_err:
        if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
                chip->ops->clk_enable(chip, false);
 
index 6a11239ccde311e3839824c7377bb5fb7d20ebe7..772258de2d1f3c270e0c5747fb848be5e7c506e4 100644 (file)
@@ -3426,11 +3426,17 @@ static int __clk_core_init(struct clk_core *core)
        if (core->flags & CLK_IS_CRITICAL) {
                unsigned long flags;
 
-               clk_core_prepare(core);
+               ret = clk_core_prepare(core);
+               if (ret)
+                       goto out;
 
                flags = clk_enable_lock();
-               clk_core_enable(core);
+               ret = clk_core_enable(core);
                clk_enable_unlock(flags);
+               if (ret) {
+                       clk_core_unprepare(core);
+                       goto out;
+               }
        }
 
        clk_core_reparent_orphans_nolock();
index a60a1be937ad6566a50270fef46b458ba8edf954..b4a95cbbda989fae6dbc2aca8bd90f4ea945ee77 100644 (file)
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
 static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
 
 static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
 
 static DEFINE_SPINLOCK(reset_lock);
 
index f7b370f3acef6f15de4887e95aa22f448e88d863..f6ce888098be9b3fbeab9852e84359614ac45d54 100644 (file)
@@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
@@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
@@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
@@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
@@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
@@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct clk_regmap *gcc_sdm845_clocks[] = {
index 3a991ca1ee3605af87831a603914db9ee3fe5deb..c9e5a1fb66539eed0f078d01849aa947752c2d5b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/clk.h>
 
 #include "clk.h"
 #include "clk-cpu.h"
@@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np,
                                     exynos5x_subcmus);
        }
 
+       /*
+        * Keep top part of G3D clock path enabled permanently to ensure
+        * that the internal busses get their clock regardless of the
+        * main G3D clock enablement status.
+        */
+       clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+
        samsung_clk_of_add_provider(np, ctx);
 }
 
index 45a1ed3fe6742fd7ed9edc8f797a0f80e63a220b..50f8d1bc7046552804a1bad0576be558e9b89cb6 100644 (file)
@@ -23,9 +23,9 @@
  */
 
 static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
-                                            "pll-periph0", "iosc" };
+                                                    "iosc", "pll-periph0" };
 static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
-       { .index = 2, .shift = 0, .width = 5 },
+       { .index = 3, .shift = 0, .width = 5 },
 };
 
 static struct ccu_div ar100_clk = {
@@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = {
 
 static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0);
 
-static struct ccu_div r_apb1_clk = {
-       .div            = _SUNXI_CCU_DIV(0, 2),
-
-       .common         = {
-               .reg            = 0x00c,
-               .hw.init        = CLK_HW_INIT("r-apb1",
-                                             "r-ahb",
-                                             &ccu_div_ops,
-                                             0),
-       },
-};
+static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0);
 
 static struct ccu_div r_apb2_clk = {
        .div            = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
index 4646fdc61053b0c65d857a8424c71e5e517a969d..4c8c491b87c2777d636cdd15b603e0c45c72cc71 100644 (file)
@@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = {
 
 static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
 
-static struct ccu_div apb0_clk = {
-       .div            = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
-
-       .common         = {
-               .reg            = 0x0c,
-               .hw.init        = CLK_HW_INIT_HW("apb0",
-                                                &ahb0_clk.hw,
-                                                &ccu_div_ops,
-                                                0),
-       },
-};
-
-static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
 
 /*
  * Define the parent as an array that can be reused to save space
@@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = {
 
 static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
        &ar100_clk.common,
-       &a83t_apb0_clk.common,
+       &apb0_clk.common,
        &apb0_pio_clk.common,
        &apb0_ir_clk.common,
        &apb0_timer_clk.common,
@@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
        .hws    = {
                [CLK_AR100]             = &ar100_clk.common.hw,
                [CLK_AHB0]              = &ahb0_clk.hw,
-               [CLK_APB0]              = &a83t_apb0_clk.common.hw,
+               [CLK_APB0]              = &apb0_clk.common.hw,
                [CLK_APB0_PIO]          = &apb0_pio_clk.common.hw,
                [CLK_APB0_IR]           = &apb0_ir_clk.common.hw,
                [CLK_APB0_TIMER]        = &apb0_timer_clk.common.hw,
@@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
 
 static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
 {
-       /* Fix apb0 bus gate parents here */
-       apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
-
        sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
 }
 CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
index 897490800102f3612b79d9c6e37efed0e74ef08e..23bfe1d12f217efdde824e3288d0727d29064e73 100644 (file)
@@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = {
                .reg            = 0x1f0,
                .features       = CCU_FEATURE_FIXED_PREDIV,
                .hw.init        = CLK_HW_INIT_PARENTS("outa", out_parents,
-                                                     &ccu_mp_ops, 0),
+                                                     &ccu_mp_ops,
+                                                     CLK_SET_RATE_PARENT),
        }
 };
 
@@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = {
                .reg            = 0x1f4,
                .features       = CCU_FEATURE_FIXED_PREDIV,
                .hw.init        = CLK_HW_INIT_PARENTS("outb", out_parents,
-                                                     &ccu_mp_ops, 0),
+                                                     &ccu_mp_ops,
+                                                     CLK_SET_RATE_PARENT),
        }
 };
 
index 5c779eec454b6edc734ced3a9fd7d5b1a020f4de..0e36ca3bf3d528d1788c195f84bdeea59ef5897b 100644 (file)
@@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
                [CLK_MBUS]              = &mbus_clk.common.hw,
                [CLK_MIPI_CSI]          = &mipi_csi_clk.common.hw,
        },
-       .num    = CLK_NUMBER,
+       .num    = CLK_PLL_DDR1 + 1,
 };
 
 static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
@@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
                [CLK_MBUS]              = &mbus_clk.common.hw,
                [CLK_MIPI_CSI]          = &mipi_csi_clk.common.hw,
        },
-       .num    = CLK_NUMBER,
+       .num    = CLK_I2S0 + 1,
 };
 
 static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
index b0160d305a6775a3721a5438b4223e8a77251367..108eeeedcbf7602dfbe55b40624ccae9786b27d0 100644 (file)
@@ -51,6 +51,4 @@
 
 #define CLK_PLL_DDR1           74
 
-#define CLK_NUMBER             (CLK_I2S0 + 1)
-
 #endif /* _CCU_SUN8I_H3_H_ */
index e6bd6d1ea0128525f76fb4b07f12e7a31054f133..f6cdce441cf7ac4e45eb9bb26576187c47d546e1 100644 (file)
@@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
        periph_banks = banks;
 
        clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
-       if (!clks)
+       if (!clks) {
                kfree(periph_clk_enb_refcnt);
+               return NULL;
+       }
 
        clk_num = num;
 
index f65e16c4f3c4b8585ea56cc97633730a6882a01f..8d4c08b034bdde50e77b60bd15cacba7860698b2 100644 (file)
@@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
        cinfo->iobase = of_iomap(node, 0);
        cinfo->dev = &pdev->dev;
        pm_runtime_enable(cinfo->dev);
-       pm_runtime_irq_safe(cinfo->dev);
 
        pm_runtime_get_sync(cinfo->dev);
        atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
index 4e54856ce2a5f3c79d36d7d650b5c538128f66d8..c4f15c4068c02b0cc37831543a33a2c87fe9aa51 100644 (file)
@@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
        return get_cycles64();
 }
 
-static u64 riscv_sched_clock(void)
+static u64 notrace riscv_sched_clock(void)
 {
        return get_cycles64();
 }
index f1d170dcf4d310cfa7c936ab1638b93b67f109ca..aba591d57c67468bf323f0a9d70c53a830b0240f 100644 (file)
@@ -121,6 +121,8 @@ static const struct of_device_id blacklist[] __initconst = {
        { .compatible = "mediatek,mt8176", },
        { .compatible = "mediatek,mt8183", },
 
+       { .compatible = "nvidia,tegra20", },
+       { .compatible = "nvidia,tegra30", },
        { .compatible = "nvidia,tegra124", },
        { .compatible = "nvidia,tegra210", },
 
index de7e706efd460e86dea44c7b21fc5d078d6bf8f4..6deaaf5f05b5765598115132492edc4f654df9da 100644 (file)
@@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * pattern detection.
         */
        cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
-       if (cpu_data->interval_idx > INTERVALS)
+       if (cpu_data->interval_idx >= INTERVALS)
                cpu_data->interval_idx = 0;
 }
 
index 26754d0570ba9cffcf829b873ec2ecab69c2abe6..b846d73d9a855ceafb74129377c581460760821c 100644 (file)
@@ -40,7 +40,7 @@ struct sec_req {
        int req_id;
 
        /* Status of the SEC request */
-       int fake_busy;
+       atomic_t fake_busy;
 };
 
 /**
@@ -132,8 +132,8 @@ struct sec_debug_file {
 };
 
 struct sec_dfx {
-       u64 send_cnt;
-       u64 recv_cnt;
+       atomic64_t send_cnt;
+       atomic64_t recv_cnt;
 };
 
 struct sec_debug {
index 62b04e19067c3cfb5fb1856ab15885f9e7197237..0a5391fff485c43447498f837fb05f4f6803f4a9 100644 (file)
@@ -120,7 +120,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
                return;
        }
 
-       __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+       atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
 
        req->ctx->req_op->buf_unmap(req->ctx, req);
 
@@ -135,13 +135,13 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
        mutex_lock(&qp_ctx->req_lock);
        ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
        mutex_unlock(&qp_ctx->req_lock);
-       __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+       atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
 
        if (ret == -EBUSY)
                return -ENOBUFS;
 
        if (!ret) {
-               if (req->fake_busy)
+               if (atomic_read(&req->fake_busy))
                        ret = -EBUSY;
                else
                        ret = -EINPROGRESS;
@@ -641,7 +641,7 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
        if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
                sec_update_iv(req);
 
-       if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+       if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
                sk_req->base.complete(&sk_req->base, -EINPROGRESS);
 
        sk_req->base.complete(&sk_req->base, req->err_type);
@@ -672,9 +672,9 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
        }
 
        if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
-               req->fake_busy = 1;
+               atomic_set(&req->fake_busy, 1);
        else
-               req->fake_busy = 0;
+               atomic_set(&req->fake_busy, 0);
 
        ret = ctx->req_op->get_res(ctx, req);
        if (ret) {
index 74f0654028c968c7582ddb133ea30a74e13b2b74..ab742dfbab997562b9a6a9b55a2b1fe700797258 100644 (file)
@@ -608,6 +608,14 @@ static const struct file_operations sec_dbg_fops = {
        .write = sec_debug_write,
 };
 
+static int debugfs_atomic64_t_get(void *data, u64 *val)
+{
+        *val = atomic64_read((atomic64_t *)data);
+        return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
+                        "%lld\n");
+
 static int sec_core_debug_init(struct sec_dev *sec)
 {
        struct hisi_qm *qm = &sec->qm;
@@ -628,9 +636,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
 
        debugfs_create_regset32("regs", 0444, tmp_d, regset);
 
-       debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+       debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
+                           &fops_atomic64_t_ro);
 
-       debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+       debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
+                           &fops_atomic64_t_ro);
 
        return 0;
 }
index defe1d4387105e4a4ff368b57d8a0fbcadb9fd61..35535833b6f78e314eea7dea48f31093b6790845 100644 (file)
@@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ
        select DEVFREQ_GOV_PASSIVE
        select DEVFREQ_EVENT_EXYNOS_PPMU
        select PM_DEVFREQ_EVENT
-       select PM_OPP
        help
          This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
          Memory bus has one more group of memory bus (e.g, MIF and INT block).
@@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ
                ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
                ARCH_TEGRA_210_SOC || \
                COMPILE_TEST
-       select PM_OPP
+       depends on COMMON_CLK
        help
          This adds the DEVFREQ driver for the Tegra family of SoCs.
          It reads ACTMON counters of memory controllers and adjusts the
@@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ
        depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
        depends on COMMON_CLK
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
-       select PM_OPP
        help
          This adds the DEVFREQ driver for the Tegra20 family of SoCs.
          It reads Memory Controller counters and adjusts the operating
@@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ
        select DEVFREQ_EVENT_ROCKCHIP_DFI
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
        select PM_DEVFREQ_EVENT
-       select PM_OPP
        help
           This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
           It sets the frequency for the memory controller and reads the usage counts
index fa626acdc9b961919208a316d719ad9d2f25118b..44af435628f805ec856769386625be62d437e1fb 100644 (file)
@@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
        .nb_channels = 6,
        .transfer_ord_max = 5,
-       .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+       .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
+                JZ_SOC_DATA_BREAK_LINKS,
 };
 
 static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
index 1a422a8b43cfb29806026b0be2996dc1dee4aaa5..18c011e57592ebc838a30d118b7465d8c8a09a46 100644 (file)
@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
 
                descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
                                                 SZ_2M, &descs->hw, flags);
-               if (!descs->virt && (i > 0)) {
+               if (!descs->virt) {
                        int idx;
 
                        for (idx = 0; idx < i; idx++) {
+                               descs = &ioat_chan->descs[idx];
                                dma_free_coherent(to_dev(ioat_chan), SZ_2M,
                                                  descs->virt, descs->hw);
                                descs->virt = NULL;
index adecea51814f0a2cb6a26f8593bbf86e9c931e3d..c5c1aa0dcaeddc9c52a5a7f6004e2c20c58dd515 100644 (file)
@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
                        c = p->vchan;
                        if (c && (tc1 & BIT(i))) {
                                spin_lock_irqsave(&c->vc.lock, flags);
-                               vchan_cookie_complete(&p->ds_run->vd);
-                               p->ds_done = p->ds_run;
-                               p->ds_run = NULL;
+                               if (p->ds_run != NULL) {
+                                       vchan_cookie_complete(&p->ds_run->vd);
+                                       p->ds_done = p->ds_run;
+                                       p->ds_run = NULL;
+                               }
                                spin_unlock_irqrestore(&c->vc.lock, flags);
                        }
                        if (c && (tc2 & BIT(i))) {
@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
        if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
                return -EAGAIN;
 
+       /* Avoid losing track of  ds_run if a transaction is in flight */
+       if (c->phy->ds_run)
+               return -EAGAIN;
+
        if (vd) {
                struct k3_dma_desc_sw *ds =
                        container_of(vd, struct k3_dma_desc_sw, vd);
index ec4adf4260a098488fff4471e82c2f1a89b90c54..256fc662c500f389fc898f2e5bc4da7a708695ae 100644 (file)
@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
                dmaengine_desc_get_callback(&vd->tx, &cb);
 
                list_del(&vd->node);
-               vchan_vdesc_fini(vd);
-
                dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+               vchan_vdesc_fini(vd);
        }
 }
 
index 413cdb4a591db1758d799277646bcce6601c31f1..c0cc72a3b2be93e3dd3fad4f13b7cf532e37c7ac 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/edac.h>
 #include <linux/platform_device.h>
 #include "edac_module.h"
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
 
 #define DRVNAME "sifive_edac"
 
index 5b7ef89eb70143875ad4dd6a1d07da0bfc67c973..ed10da5313e8652b3ff72bc16283a9315eb9370e 100644 (file)
@@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev)
        fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
                                    TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
        if (IS_ERR(fw_shm_pool)) {
-               tee_client_close_context(pvt_data.ctx);
                dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
                err = PTR_ERR(fw_shm_pool);
                goto out_sess;
index d4077db6dc97fe4dac16a7446f2d37cf060c22f0..5d4f84781aa036bf83d7b842442d24e438111649 100644 (file)
@@ -17,7 +17,7 @@ static const struct console *earlycon_console __initdata;
 static const struct font_desc *font;
 static u32 efi_x, efi_y;
 static u64 fb_base;
-static pgprot_t fb_prot;
+static bool fb_wb;
 static void *efi_fb;
 
 /*
@@ -33,10 +33,8 @@ static int __init efi_earlycon_remap_fb(void)
        if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
                return 0;
 
-       if (pgprot_val(fb_prot) == pgprot_val(PAGE_KERNEL))
-               efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WB);
-       else
-               efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WC);
+       efi_fb = memremap(fb_base, screen_info.lfb_size,
+                         fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
 
        return efi_fb ? 0 : -ENOMEM;
 }
@@ -53,9 +51,12 @@ late_initcall(efi_earlycon_unmap_fb);
 
 static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
 {
+       pgprot_t fb_prot;
+
        if (efi_fb)
                return efi_fb + start;
 
+       fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
        return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
 }
 
@@ -215,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
        if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
                fb_base |= (u64)screen_info.ext_lfb_base << 32;
 
-       if (opt && !strcmp(opt, "ram"))
-               fb_prot = PAGE_KERNEL;
-       else
-               fb_prot = pgprot_writecombine(PAGE_KERNEL);
+       fb_wb = opt && !strcmp(opt, "ram");
 
        si = &screen_info;
        xres = si->lfb_width;
index 35edd7cfb6a13d2002101d3e2b6874180d52c3da..97378cf96a2ee29198c9e8d8f9c70d46f0a27c6c 100644 (file)
@@ -33,7 +33,7 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
 {
        efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
        efi_status_t status;
-       struct efi_rng_protocol *rng;
+       struct efi_rng_protocol *rng = NULL;
 
        status = efi_call_early(locate_protocol, &rng_proto, NULL,
                                (void **)&rng);
@@ -162,8 +162,8 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
        efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
        efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
        efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
-       struct efi_rng_protocol *rng;
-       struct linux_efi_random_seed *seed;
+       struct efi_rng_protocol *rng = NULL;
+       struct linux_efi_random_seed *seed = NULL;
        efi_status_t status;
 
        status = efi_call_early(locate_protocol, &rng_proto, NULL,
index 8adffd42f8cb0559031837899b25c7f6ba23c313..4b6d2ef15c39dde4fab406bfe68167cd8f71ec0b 100644 (file)
@@ -553,8 +553,8 @@ config GPIO_TEGRA
 
 config GPIO_TEGRA186
        tristate "NVIDIA Tegra186 GPIO support"
-       default ARCH_TEGRA_186_SOC
-       depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+       default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
+       depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
        depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
@@ -573,7 +573,6 @@ config GPIO_THUNDERX
        tristate "Cavium ThunderX/OCTEON-TX GPIO"
        depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
        depends on PCI_MSI
-       select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        select IRQ_FASTEOI_HIERARCHY_HANDLERS
        help
@@ -1148,6 +1147,7 @@ config GPIO_MADERA
 config GPIO_MAX77620
        tristate "GPIO support for PMIC MAX77620 and MAX20024"
        depends on MFD_MAX77620
+       select GPIOLIB_IRQCHIP
        help
          GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
          MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
index 7e99860ca447ed5f5a345d5cb070955d5cbb3c60..8319812593e31295c40638e3667cfa5a1fda2234 100644 (file)
@@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
                return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
        default:
                /* acturally if code runs to here, it's an error case */
-               BUG_ON(1);
+               BUG();
        }
 }
 
index 56d647a30e3eafee0178b6d416952e697635a745..94b8d3ae27bc306f13d981f0027f43a75a403e81 100644 (file)
@@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
        mutex_lock(&chip->lock);
 
        if (test_bit(FLAG_REQUESTED, &desc->flags) &&
-               !test_bit(FLAG_IS_OUT, &desc->flags)) {
+           !test_bit(FLAG_IS_OUT, &desc->flags)) {
                curr = __gpio_mockup_get(chip, offset);
                if (curr == value)
                        goto out;
@@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
                irq_type = irq_get_trigger_type(irq);
 
                if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
-                       (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+                   (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
                        irq_sim_fire(sim, offset);
        }
 
@@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
        int direction;
 
        mutex_lock(&chip->lock);
-       direction = !chip->lines[offset].dir;
+       direction = chip->lines[offset].dir;
        mutex_unlock(&chip->lock);
 
        return direction;
@@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
        struct gpio_chip *gc;
        struct device *dev;
        const char *name;
-       int rv, base;
+       int rv, base, i;
        u16 ngpio;
 
        dev = &pdev->dev;
@@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
        if (!chip->lines)
                return -ENOMEM;
 
+       for (i = 0; i < gc->ngpio; i++)
+               chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
+
        if (device_property_read_bool(dev, "named-gpio-lines")) {
                rv = gpio_mockup_name_lines(dev, chip);
                if (rv)
index f1e164cecff80375ddcfa89017d15fe2af277886..5ae30de3490ac84118da2da0b169f8f24b9da4b9 100644 (file)
@@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        gc = &mpc8xxx_gc->gc;
+       gc->parent = &pdev->dev;
 
        if (of_property_read_bool(np, "little-endian")) {
                ret = bgpio_init(gc, &pdev->dev, 4,
index 6652bee01966dc1e2c883c0909820b374599f293..9853547e72766678e35f2f4fbab9e1fb908675bb 100644 (file)
@@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
+       clear_bit(hwirq, chip->irq_mask);
 }
 
 static void pca953x_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
+       set_bit(hwirq, chip->irq_mask);
 }
 
 static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       int bank_nb = d->hwirq / BANK_SZ;
-       u8 mask = BIT(d->hwirq % BANK_SZ);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
        if (!(type & IRQ_TYPE_EDGE_BOTH)) {
                dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
                return -EINVAL;
        }
 
-       if (type & IRQ_TYPE_EDGE_FALLING)
-               chip->irq_trig_fall[bank_nb] |= mask;
-       else
-               chip->irq_trig_fall[bank_nb] &= ~mask;
-
-       if (type & IRQ_TYPE_EDGE_RISING)
-               chip->irq_trig_raise[bank_nb] |= mask;
-       else
-               chip->irq_trig_raise[bank_nb] &= ~mask;
+       assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
+       assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
 
        return 0;
 }
@@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       u8 mask = BIT(d->hwirq % BANK_SZ);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
-       chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
+       clear_bit(hwirq, chip->irq_trig_raise);
+       clear_bit(hwirq, chip->irq_trig_fall);
 }
 
 static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
index d08d86a22b1f531854b85d571870a6b99b4ab359..46277047904571d2bd12f34d7a66b79bb349db4c 100644 (file)
@@ -53,6 +53,7 @@ struct thunderx_line {
 struct thunderx_gpio {
        struct gpio_chip        chip;
        u8 __iomem              *register_base;
+       struct irq_domain       *irqd;
        struct msix_entry       *msix_entries;  /* per line MSI-X */
        struct thunderx_line    *line_entries;  /* per line irq info */
        raw_spinlock_t          lock;
@@ -285,60 +286,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
        }
 }
 
-static void thunderx_gpio_irq_ack(struct irq_data *d)
+static void thunderx_gpio_irq_ack(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_INTR,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static void thunderx_gpio_irq_mask(struct irq_data *d)
+static void thunderx_gpio_irq_mask(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_ENA_W1C,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static void thunderx_gpio_irq_mask_ack(struct irq_data *d)
+static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static void thunderx_gpio_irq_unmask(struct irq_data *d)
+static void thunderx_gpio_irq_unmask(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_ENA_W1S,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static int thunderx_gpio_irq_set_type(struct irq_data *d,
+static int thunderx_gpio_irq_set_type(struct irq_data *data,
                                      unsigned int flow_type)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-       struct thunderx_line *txline =
-               &txgpio->line_entries[irqd_to_hwirq(d)];
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct thunderx_gpio *txgpio = txline->txgpio;
        u64 bit_cfg;
 
-       irqd_set_trigger_type(d, flow_type);
+       irqd_set_trigger_type(data, flow_type);
 
        bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
 
        if (flow_type & IRQ_TYPE_EDGE_BOTH) {
-               irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+               irq_set_handler_locked(data, handle_fasteoi_ack_irq);
                bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
        } else {
-               irq_set_handler_locked(d, handle_fasteoi_mask_irq);
+               irq_set_handler_locked(data, handle_fasteoi_mask_irq);
        }
 
        raw_spin_lock(&txgpio->lock);
@@ -367,6 +362,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data)
        irq_chip_disable_parent(data);
 }
 
+static int thunderx_gpio_irq_request_resources(struct irq_data *data)
+{
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct thunderx_gpio *txgpio = txline->txgpio;
+       int r;
+
+       r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
+       if (r)
+               return r;
+
+       r = irq_chip_request_resources_parent(data);
+       if (r)
+               gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+
+       return r;
+}
+
+static void thunderx_gpio_irq_release_resources(struct irq_data *data)
+{
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct thunderx_gpio *txgpio = txline->txgpio;
+
+       irq_chip_release_resources_parent(data);
+
+       gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+}
+
 /*
  * Interrupts are chained from underlying MSI-X vectors.  We have
  * these irq_chip functions to be able to handle level triggering
@@ -383,24 +405,50 @@ static struct irq_chip thunderx_gpio_irq_chip = {
        .irq_unmask             = thunderx_gpio_irq_unmask,
        .irq_eoi                = irq_chip_eoi_parent,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
+       .irq_request_resources  = thunderx_gpio_irq_request_resources,
+       .irq_release_resources  = thunderx_gpio_irq_release_resources,
        .irq_set_type           = thunderx_gpio_irq_set_type,
 
        .flags                  = IRQCHIP_SET_TYPE_MASKED
 };
 
-static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
-                                              unsigned int child,
-                                              unsigned int child_type,
-                                              unsigned int *parent,
-                                              unsigned int *parent_type)
+static int thunderx_gpio_irq_translate(struct irq_domain *d,
+                                      struct irq_fwspec *fwspec,
+                                      irq_hw_number_t *hwirq,
+                                      unsigned int *type)
 {
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-
-       *parent = txgpio->base_msi + (2 * child);
-       *parent_type = IRQ_TYPE_LEVEL_HIGH;
+       struct thunderx_gpio *txgpio = d->host_data;
+
+       if (WARN_ON(fwspec->param_count < 2))
+               return -EINVAL;
+       if (fwspec->param[0] >= txgpio->chip.ngpio)
+               return -EINVAL;
+       *hwirq = fwspec->param[0];
+       *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
        return 0;
 }
 
+static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
+                                  unsigned int nr_irqs, void *arg)
+{
+       struct thunderx_line *txline = arg;
+
+       return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
+                                            &thunderx_gpio_irq_chip, txline);
+}
+
+static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
+       .alloc          = thunderx_gpio_irq_alloc,
+       .translate      = thunderx_gpio_irq_translate
+};
+
+static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+       struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+       return irq_find_mapping(txgpio->irqd, offset);
+}
+
 static int thunderx_gpio_probe(struct pci_dev *pdev,
                               const struct pci_device_id *id)
 {
@@ -408,7 +456,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        struct device *dev = &pdev->dev;
        struct thunderx_gpio *txgpio;
        struct gpio_chip *chip;
-       struct gpio_irq_chip *girq;
        int ngpio, i;
        int err = 0;
 
@@ -453,8 +500,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        }
 
        txgpio->msix_entries = devm_kcalloc(dev,
-                                           ngpio, sizeof(struct msix_entry),
-                                           GFP_KERNEL);
+                                         ngpio, sizeof(struct msix_entry),
+                                         GFP_KERNEL);
        if (!txgpio->msix_entries) {
                err = -ENOMEM;
                goto out;
@@ -495,6 +542,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        if (err < 0)
                goto out;
 
+       /*
+        * Push GPIO specific irqdomain on hierarchy created as a side
+        * effect of the pci_enable_msix()
+        */
+       txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
+                                                  0, 0, of_node_to_fwnode(dev->of_node),
+                                                  &thunderx_gpio_irqd_ops, txgpio);
+       if (!txgpio->irqd) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       /* Push on irq_data and the domain for each line. */
+       for (i = 0; i < ngpio; i++) {
+               err = irq_domain_push_irq(txgpio->irqd,
+                                         txgpio->msix_entries[i].vector,
+                                         &txgpio->line_entries[i]);
+               if (err < 0)
+                       dev_err(dev, "irq_domain_push_irq: %d\n", err);
+       }
+
        chip->label = KBUILD_MODNAME;
        chip->parent = dev;
        chip->owner = THIS_MODULE;
@@ -509,28 +577,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        chip->set = thunderx_gpio_set;
        chip->set_multiple = thunderx_gpio_set_multiple;
        chip->set_config = thunderx_gpio_set_config;
-       girq = &chip->irq;
-       girq->chip = &thunderx_gpio_irq_chip;
-       girq->fwnode = of_node_to_fwnode(dev->of_node);
-       girq->parent_domain =
-               irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
-       girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
-       girq->handler = handle_bad_irq;
-       girq->default_type = IRQ_TYPE_NONE;
-
+       chip->to_irq = thunderx_gpio_to_irq;
        err = devm_gpiochip_add_data(dev, chip, txgpio);
        if (err)
                goto out;
 
-       /* Push on irq_data and the domain for each line. */
-       for (i = 0; i < ngpio; i++) {
-               err = irq_domain_push_irq(chip->irq.domain,
-                                         txgpio->msix_entries[i].vector,
-                                         chip);
-               if (err < 0)
-                       dev_err(dev, "irq_domain_push_irq: %d\n", err);
-       }
-
        dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
                 ngpio, chip->base);
        return 0;
@@ -545,10 +596,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
        struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
 
        for (i = 0; i < txgpio->chip.ngpio; i++)
-               irq_domain_pop_irq(txgpio->chip.irq.domain,
+               irq_domain_pop_irq(txgpio->irqd,
                                   txgpio->msix_entries[i].vector);
 
-       irq_domain_remove(txgpio->chip.irq.domain);
+       irq_domain_remove(txgpio->irqd);
 
        pci_set_drvdata(pdev, NULL);
 }
index 773e5c24309e49b3f989a453df40373311afc15b..b21c2e436b61023ca38a83a1e5b690359f2191b7 100644 (file)
@@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit iproc_gpio_remove(struct platform_device *pdev)
+static int iproc_gpio_remove(struct platform_device *pdev)
 {
        struct iproc_gpio_chip *chip;
 
index 08d7c3b3203869b5dd75cc42a4355746ea19ec48..c8af34a6368f4f69f9960fff7f8c27a2c8ddea8b 100644 (file)
@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
        unsigned long flags;
 
        local_irq_save(flags);
-       RSR_CPENABLE(*cpenable);
-       WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
-
+       *cpenable = xtensa_get_sr(cpenable);
+       xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
        return flags;
 }
 
 static inline void disable_cp(unsigned long flags, unsigned long cpenable)
 {
-       WSR_CPENABLE(cpenable);
+       xtensa_set_sr(cpenable, cpenable);
        local_irq_restore(flags);
 }
 
index 4c3f6370eab4c104a8975356ea0f652e3f92372e..05ba16fffdad0e7e285a6cf520f87ff7630a917e 100644 (file)
@@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
        unsigned int bank_num;
 
        for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+               writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
+                               ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
                writel_relaxed(gpio->context.datalsw[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
@@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
                writel_relaxed(gpio->context.dirm[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_DIRM_OFFSET(bank_num));
-               writel_relaxed(gpio->context.int_en[bank_num],
-                              gpio->base_addr +
-                              ZYNQ_GPIO_INTEN_OFFSET(bank_num));
                writel_relaxed(gpio->context.int_type[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
@@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
                writel_relaxed(gpio->context.int_any[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+               writel_relaxed(~(gpio->context.int_en[bank_num]),
+                              gpio->base_addr +
+                              ZYNQ_GPIO_INTEN_OFFSET(bank_num));
        }
 }
 
index d30e57dc755cf6114d984b65cf56175e78757bb9..31fee5e918b7d14fafde0477231316fee14dbb53 100644 (file)
 #include "gpiolib.h"
 #include "gpiolib-acpi.h"
 
+#define QUIRK_NO_EDGE_EVENTS_ON_BOOT           0x01l
+#define QUIRK_NO_WAKEUP                                0x02l
+
 static int run_edge_events_on_boot = -1;
 module_param(run_edge_events_on_boot, int, 0444);
 MODULE_PARM_DESC(run_edge_events_on_boot,
                 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
 
+static int honor_wakeup = -1;
+module_param(honor_wakeup, int, 0444);
+MODULE_PARM_DESC(honor_wakeup,
+                "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
  *
@@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        event->handle = evt_handle;
        event->handler = handler;
        event->irq = irq;
-       event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+       event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
        event->pin = pin;
        event->desc = desc;
 
@@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
 /* We must use _sync so that this runs after the first deferred_probe run */
 late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
 
-static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] = {
        {
                /*
                 * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
-               }
+               },
+               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
        },
        {
                /*
@@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
-               }
+               },
+               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+       },
+       {
+               /*
+                * Various HP X2 10 Cherry Trail models use an external
+                * embedded-controller connected via I2C + an ACPI GPIO
+                * event handler. The embedded controller generates various
+                * spurious wakeup events when suspended. So disable wakeup
+                * for its handler (it uses the only ACPI GPIO event handler).
+                * This breaks wakeup when opening the lid, the user needs
+                * to press the power-button to wakeup the system. The
+                * alternative is suspend simply not working, which is worse.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+               },
+               .driver_data = (void *)QUIRK_NO_WAKEUP,
        },
        {} /* Terminating entry */
 };
 
 static int acpi_gpio_setup_params(void)
 {
+       const struct dmi_system_id *id;
+       long quirks = 0;
+
+       id = dmi_first_match(gpiolib_acpi_quirks);
+       if (id)
+               quirks = (long)id->driver_data;
+
        if (run_edge_events_on_boot < 0) {
-               if (dmi_check_system(run_edge_events_on_boot_blacklist))
+               if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
                        run_edge_events_on_boot = 0;
                else
                        run_edge_events_on_boot = 1;
        }
 
+       if (honor_wakeup < 0) {
+               if (quirks & QUIRK_NO_WAKEUP)
+                       honor_wakeup = 0;
+               else
+                       honor_wakeup = 1;
+       }
+
        return 0;
 }
 
index 9913886ede904bb09e40cca32792a01b39bb2822..78a16e42f222ebd81dc38f80da606f10daaef57e 100644 (file)
@@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
        chip = gpiod_to_chip(desc);
        offset = gpio_chip_hwgpio(desc);
 
+       /*
+        * Open drain emulation using input mode may incorrectly report
+        * input here, fix that up.
+        */
+       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+           test_bit(FLAG_IS_OUT, &desc->flags))
+               return 0;
+
        if (!chip->get_direction)
                return -ENOTSUPP;
 
@@ -4472,8 +4480,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
 
                if (chip->ngpio <= p->chip_hwnum) {
                        dev_err(dev,
-                               "requested GPIO %d is out of range [0..%d] for chip %s\n",
-                               idx, chip->ngpio, chip->label);
+                               "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
+                               idx, p->chip_hwnum, chip->ngpio - 1,
+                               chip->label);
                        return ERR_PTR(-EINVAL);
                }
 
index a97fb759e2f42a94b004743219881f47e79a20ee..3e35a8f2c5e553e4c714deff5e838ac1510a2f59 100644 (file)
@@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
        bool d3_supported = false;
        struct pci_dev *parent_pdev;
 
-       while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               vga_count++;
+
+               has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+               parent_pdev = pci_upstream_bridge(pdev);
+               d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+               amdgpu_atpx_get_quirks(pdev);
+       }
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
                vga_count++;
 
                has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
index 0ffc9447b573a3f532d1fb37337d240052bdaa18..01a793a0cbf7dbf36169d556715340307620381d 100644 (file)
@@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = 0;
 int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
-int amdgpu_noretry = 1;
+int amdgpu_noretry;
 int amdgpu_force_asic_type = -1;
 
 struct amdgpu_mgpu_info mgpu_info = {
@@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes,
 module_param_named(mes, amdgpu_mes, int, 0444);
 
 MODULE_PARM_DESC(noretry,
-       "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
+       "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
 module_param_named(noretry, amdgpu_noretry, int, 0644);
 
 /**
@@ -1359,7 +1359,8 @@ static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_ATOMIC |
            DRIVER_GEM |
-           DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+           DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+           DRIVER_SYNCOBJ_TIMELINE,
        .load = amdgpu_driver_load_kms,
        .open = amdgpu_driver_open_kms,
        .postclose = amdgpu_driver_postclose_kms,
index 44be3a45b25eaf2453a649d244365f2f5a727eb5..e1b8d8daeafcb6716e2854e2785c2221eeec115f 100644 (file)
@@ -1488,7 +1488,7 @@ static int psp_np_fw_load(struct psp_context *psp)
 
                /* Start rlc autoload after psp recieved all the gfx firmware */
                if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
-                   AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
+                   AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
                        ret = psp_rlc_autoload(psp);
                        if (ret) {
                                DRM_ERROR("Failed to start rlc autoload\n");
index 410587b950f3c8490c881c982281e1e45aae2b20..914acecda5cfab806943ff242e5d229d231cd3c0 100644 (file)
@@ -292,10 +292,10 @@ enum AMDGPU_UCODE_ID {
        AMDGPU_UCODE_ID_CP_MEC2_JT,
        AMDGPU_UCODE_ID_CP_MES,
        AMDGPU_UCODE_ID_CP_MES_DATA,
-       AMDGPU_UCODE_ID_RLC_G,
        AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
        AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
        AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+       AMDGPU_UCODE_ID_RLC_G,
        AMDGPU_UCODE_ID_STORAGE,
        AMDGPU_UCODE_ID_SMC,
        AMDGPU_UCODE_ID_UVD,
index 66328ffa395af240cb6f069206a4a0217de69aeb..97105a5bb246c34d1306819d123d61d1e615fe77 100644 (file)
@@ -1052,17 +1052,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
        case CHIP_VEGA20:
                break;
        case CHIP_RAVEN:
-               /* Disable GFXOFF on original raven.  There are combinations
-                * of sbios and platforms that are not stable.
-                */
-               if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
-                       adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-               else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
-                        &&((adev->gfx.rlc_fw_version != 106 &&
-                            adev->gfx.rlc_fw_version < 531) ||
-                           (adev->gfx.rlc_fw_version == 53815) ||
-                           (adev->gfx.rlc_feature_version < 1) ||
-                           !adev->gfx.rlc.is_rlc_v2_1))
+               if (!(adev->rev_id >= 0x8 ||
+                     adev->pdev->device == 0x15d8) &&
+                   (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
+                    !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
                        adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
                if (adev->pm.pp_feature & PP_GFXOFF_MASK)
index 4ef4d31f52318086961e6637e9a32ece34778f0b..2f52b7f4d25c828acf162ba572d680ecc355d070 100644 (file)
@@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
 };
 
 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
index 7aac9568d3bec2a95e5ef422a567a27f970ab83d..803e59d9741119b9790bb471fb395ac575f1cb49 100644 (file)
@@ -3356,27 +3356,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
        return color_space;
 }
 
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
-       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-               return;
-
-       timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
-                                               const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+       struct dc_crtc_timing *timing_out,
+       const struct drm_display_info *info)
 {
+       enum dc_color_depth depth = timing_out->display_color_depth;
        int normalized_clk;
-       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-               return;
        do {
                normalized_clk = timing_out->pix_clk_100hz / 10;
                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
                if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
                        normalized_clk /= 2;
                /* Adjusting pix clock following on HDMI spec based on colour depth */
-               switch (timing_out->display_color_depth) {
+               switch (depth) {
+               case COLOR_DEPTH_888:
+                       break;
                case COLOR_DEPTH_101010:
                        normalized_clk = (normalized_clk * 30) / 24;
                        break;
@@ -3387,14 +3381,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
                        normalized_clk = (normalized_clk * 48) / 24;
                        break;
                default:
-                       return;
+                       /* The above depths are the only ones valid for HDMI. */
+                       return false;
                }
-               if (normalized_clk <= info->max_tmds_clock)
-                       return;
-               reduce_mode_colour_depth(timing_out);
-
-       } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+               if (normalized_clk <= info->max_tmds_clock) {
+                       timing_out->display_color_depth = depth;
+                       return true;
+               }
+       } while (--depth > COLOR_DEPTH_666);
+       return false;
 }
 
 static void fill_stream_properties_from_drm_display_mode(
@@ -3474,8 +3469,14 @@ static void fill_stream_properties_from_drm_display_mode(
 
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
-       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
-               adjust_colour_depth_from_display_info(timing_out, info);
+       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+                   drm_mode_is_420_also(info, mode_in) &&
+                   timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+                       timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+                       adjust_colour_depth_from_display_info(timing_out, info);
+               }
+       }
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
index 62d8289abb4efb2f0654d33dc8270abf2bff7dc0..4619f94f0ac78e138503345c54aa216b21b0c33c 100644 (file)
@@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
                }
 
                case SIGNAL_TYPE_EDP: {
-                       read_current_link_settings_on_detect(link);
                        detect_edp_sink_caps(link);
+                       read_current_link_settings_on_detect(link);
                        sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
                        sink_caps.signal = SIGNAL_TYPE_EDP;
                        break;
index 5ff7ccedfbed45212865c75bad0289f42688d8e9..a23729d3174b4e205c3bb67007879b0f526e7e61 100644 (file)
@@ -866,6 +866,7 @@ static int smu_sw_init(void *handle)
        smu->smu_baco.platform_support = false;
 
        mutex_init(&smu->sensor_lock);
+       mutex_init(&smu->metrics_lock);
 
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
index cc71a1078a7a69204d6f2ba295a425c7f4fb9777..472e9fed411a4d087a1fab5add0f6031a9123e21 100644 (file)
@@ -862,18 +862,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
+       mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time ||
             time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
+                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
        memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+       mutex_unlock(&smu->metrics_lock);
 
        return ret;
 }
index ac9758305ab3be1063025e67b126e87f81aeab79..41fce75b263f7b7d6d5c665f93a72e6259672c73 100644 (file)
@@ -349,6 +349,7 @@ struct smu_context
        const struct pptable_funcs      *ppt_funcs;
        struct mutex                    mutex;
        struct mutex                    sensor_lock;
+       struct mutex                    metrics_lock;
        uint64_t pool_size;
 
        struct smu_table_context        smu_table;
index 4a14fd1f9fd59ad7db9cd4727c91ef44ccffd297..ca62e92e5a4f303ace3810a7ad134f11281ec795 100644 (file)
@@ -562,17 +562,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
+       mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
+                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
        memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+       mutex_unlock(&smu->metrics_lock);
 
        return ret;
 }
index 60b9ff097142639d18fbc037f6cb2cfcfa2db3d8..0d3a3b0a934e9a767ebe98fa105c8ac80f39e863 100644 (file)
@@ -1678,17 +1678,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
+       mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
+                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
        memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+       mutex_unlock(&smu->metrics_lock);
 
        return ret;
 }
index 875a3a9eabfa1e08068dd265c5dbfc7cf3f36411..7d0e7b031e447f24a83e9197d92879754eab4169 100644 (file)
@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
        .get_modes = malidp_mw_connector_get_modes,
        .mode_valid = malidp_mw_connector_mode_valid,
 };
index 273dd80fabf3cadb53d30be128fbd29c69f969aa..5a61a5596912c4e83dc8396cc369dc39f416bc5d 100644 (file)
@@ -393,7 +393,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
                        memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
                        idx += req->u.i2c_read.transactions[i].num_bytes;
 
-                       buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+                       buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
                        buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
                        idx++;
                }
@@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
                    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
                        mstb->tx_slots[txmsg->seqno] = NULL;
                }
+               mgr->is_waiting_for_dwn_reply = false;
+
        }
 out:
        if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1199,6 +1201,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
        }
        mutex_unlock(&mgr->qlock);
 
+       drm_dp_mst_kick_tx(mgr);
        return ret;
 }
 
@@ -2318,7 +2321,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
 {
        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
        struct drm_dp_mst_port *port;
-       int old_ddps, ret;
+       int old_ddps, old_input, ret, i;
        u8 new_pdt;
        bool dowork = false, create_connector = false;
 
@@ -2349,6 +2352,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        old_ddps = port->ddps;
+       old_input = port->input;
        port->input = conn_stat->input_port;
        port->mcs = conn_stat->message_capability_status;
        port->ldps = conn_stat->legacy_device_plug_status;
@@ -2373,6 +2377,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
                dowork = false;
        }
 
+       if (!old_input && old_ddps != port->ddps && !port->ddps) {
+               for (i = 0; i < mgr->max_payloads; i++) {
+                       struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+                       struct drm_dp_mst_port *port_validated;
+
+                       if (!vcpi)
+                               continue;
+
+                       port_validated =
+                               container_of(vcpi, struct drm_dp_mst_port, vcpi);
+                       port_validated =
+                               drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+                       if (!port_validated) {
+                               mutex_lock(&mgr->payload_lock);
+                               vcpi->num_slots = 0;
+                               mutex_unlock(&mgr->payload_lock);
+                       } else {
+                               drm_dp_mst_topology_put_port(port_validated);
+                       }
+               }
+       }
+
        if (port->connector)
                drm_modeset_unlock(&mgr->base.lock);
        else if (create_connector)
@@ -2718,9 +2744,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        ret = process_single_tx_qlock(mgr, txmsg, false);
        if (ret == 1) {
                /* txmsg is sent it should be in the slots now */
+               mgr->is_waiting_for_dwn_reply = true;
                list_del(&txmsg->next);
        } else if (ret) {
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+               mgr->is_waiting_for_dwn_reply = false;
                list_del(&txmsg->next);
                if (txmsg->seqno != -1)
                        txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2760,7 +2788,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
        }
 
-       if (list_is_singular(&mgr->tx_msg_downq))
+       if (list_is_singular(&mgr->tx_msg_downq) &&
+           !mgr->is_waiting_for_dwn_reply)
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -3678,6 +3707,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
        mutex_lock(&mgr->qlock);
        txmsg->state = DRM_DP_SIDEBAND_TX_RX;
        mstb->tx_slots[slot] = NULL;
+       mgr->is_waiting_for_dwn_reply = false;
        mutex_unlock(&mgr->qlock);
 
        wake_up_all(&mgr->tx_waitq);
@@ -3687,6 +3717,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 no_msg:
        drm_dp_mst_topology_put_mstb(mstb);
 clear_down_rep_recv:
+       mutex_lock(&mgr->qlock);
+       mgr->is_waiting_for_dwn_reply = false;
+       mutex_unlock(&mgr->qlock);
        memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
 
        return 0;
@@ -4497,7 +4530,7 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (!list_empty(&mgr->tx_msg_downq))
+       if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
index 8ebeccdeed2334ebb06f9562668705074c7342b1..d8e8f3960f4dbd4f86139957fbb11351ceffb6f2 100644 (file)
@@ -1283,7 +1283,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
         * Changes struct fb_var_screeninfo are currently not pushed back
         * to KMS, hence fail if different settings are requested.
         */
-       if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+       if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
            var->xres > fb->width || var->yres > fb->height ||
            var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
                DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
@@ -1308,6 +1308,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
        }
 
+       /*
+        * Likewise, bits_per_pixel should be rounded up to a supported value.
+        */
+       var->bits_per_pixel = fb->format->cpp[0] * 8;
+
        /*
         * drm fbdev emulation doesn't support changing the pixel format at all,
         * so reject all pixel format changing requests.
index 85e6b2bbb34fc1132716544254d53d2a6b3dc17e..3a5ac13d58018c7a629c3816ae014214cf37e659 100644 (file)
@@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
                }
 
                /* Force CDCLK to 2*BCLK as long as we need audio powered. */
-               if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               if (IS_GEMINILAKE(dev_priv))
                        glk_force_audio_cdclk(dev_priv, true);
 
                if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
 
        /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
        if (--dev_priv->audio_power_refcount == 0)
-               if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               if (IS_GEMINILAKE(dev_priv))
                        glk_force_audio_cdclk(dev_priv, false);
 
        intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
index 6f5e3bd13ad186fabdb451113949959a2699aa5c..301897791627e8c8f602193b51b345b623c692a2 100644 (file)
@@ -4515,8 +4515,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
 {
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       i915_reg_t reg;
-       u32 trans_ddi_func_ctl2_val;
 
        if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
                return;
@@ -4524,10 +4522,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
        DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
                      transcoder_name(old_crtc_state->cpu_transcoder));
 
-       reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
-       trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
-                                   PORT_SYNC_MODE_MASTER_SELECT_MASK);
-       I915_WRITE(reg, trans_ddi_func_ctl2_val);
+       I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
 }
 
 static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -15112,7 +15107,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                return ret;
 
        fb_obj_bump_render_priority(obj);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
        if (!new_plane_state->base.fence) { /* implicit fencing */
                struct dma_fence *fence;
index 84b164f31895b79ebb59ab1000ec79af699e7acd..6cb02c912accf079529cba521718c7c8492ba76c 100644 (file)
@@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref)
                vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
        spin_unlock(&obj->vma.lock);
 
-       obj->frontbuffer = NULL;
+       RCU_INIT_POINTER(obj->frontbuffer, NULL);
        spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
 
        i915_gem_object_put(obj);
-       kfree(front);
+       kfree_rcu(front, rcu);
 }
 
 struct intel_frontbuffer *
@@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct intel_frontbuffer *front;
 
-       spin_lock(&i915->fb_tracking.lock);
-       front = obj->frontbuffer;
-       if (front)
-               kref_get(&front->ref);
-       spin_unlock(&i915->fb_tracking.lock);
+       front = __intel_frontbuffer_get(obj);
        if (front)
                return front;
 
@@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
                         i915_active_may_sleep(frontbuffer_retire));
 
        spin_lock(&i915->fb_tracking.lock);
-       if (obj->frontbuffer) {
+       if (rcu_access_pointer(obj->frontbuffer)) {
                kfree(front);
-               front = obj->frontbuffer;
+               front = rcu_dereference_protected(obj->frontbuffer, true);
                kref_get(&front->ref);
        } else {
                i915_gem_object_get(obj);
-               obj->frontbuffer = front;
+               rcu_assign_pointer(obj->frontbuffer, front);
        }
        spin_unlock(&i915->fb_tracking.lock);
 
index adc64d61a4a5c424275d1b2eb71f0101531624d2..6d41f539442508b373bed76d584ca08657cddcec 100644 (file)
 #include <linux/atomic.h>
 #include <linux/kref.h>
 
+#include "gem/i915_gem_object_types.h"
 #include "i915_active.h"
 
 struct drm_i915_private;
-struct drm_i915_gem_object;
 
 enum fb_op_origin {
        ORIGIN_GTT,
@@ -45,6 +45,7 @@ struct intel_frontbuffer {
        atomic_t bits;
        struct i915_active write;
        struct drm_i915_gem_object *obj;
+       struct rcu_head rcu;
 };
 
 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
@@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 void intel_frontbuffer_flip(struct drm_i915_private *i915,
                            unsigned frontbuffer_bits);
 
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+       struct intel_frontbuffer *front;
+
+       if (likely(!rcu_access_pointer(obj->frontbuffer)))
+               return NULL;
+
+       rcu_read_lock();
+       do {
+               front = rcu_dereference(obj->frontbuffer);
+               if (!front)
+                       break;
+
+               if (unlikely(!kref_get_unless_zero(&front->ref)))
+                       continue;
+
+               if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+                       break;
+
+               intel_frontbuffer_put(front);
+       } while (1);
+       rcu_read_unlock();
+
+       return front;
+}
+
 struct intel_frontbuffer *
 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
 
@@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
                             struct intel_frontbuffer *new,
                             unsigned int frontbuffer_bits);
 
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
 #endif /* __INTEL_FRONTBUFFER_H__ */
index 848ce07a8ec2e505a4d561d2d27cf96785e359e6..8a98a1aa7adcd455a04a9f809869408aa0b19103 100644 (file)
@@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
                                       struct i915_vma *vma)
 {
        enum pipe pipe = overlay->crtc->pipe;
+       struct intel_frontbuffer *from = NULL, *to = NULL;
 
        WARN_ON(overlay->old_vma);
 
-       intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
-                               vma ? vma->obj->frontbuffer : NULL,
-                               INTEL_FRONTBUFFER_OVERLAY(pipe));
+       if (overlay->vma)
+               from = intel_frontbuffer_get(overlay->vma->obj);
+       if (vma)
+               to = intel_frontbuffer_get(vma->obj);
+
+       intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+       if (to)
+               intel_frontbuffer_put(to);
+       if (from)
+               intel_frontbuffer_put(from);
 
        intel_frontbuffer_flip_prepare(overlay->i915,
                                       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -766,7 +775,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                ret = PTR_ERR(vma);
                goto out_pin_section;
        }
-       intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
 
        if (!overlay->active) {
                u32 oconfig;
index b9f504ba3b32056e97fca71b652d34780f2d6593..18ee708585a94073cd51fa54a78f3aac93d7e149 100644 (file)
@@ -20,7 +20,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        drm_clflush_sg(obj->mm.pages);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 }
 
 static int clflush_work(struct dma_fence_work *base)
index 9937b4c341f1a37c8e22fee8ad87e9ac7dfcf218..f86400a191b040813956fd6e00af31a42742962b 100644 (file)
@@ -664,7 +664,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        i915_gem_object_unlock(obj);
 
        if (write_domain)
-               intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
 out_unpin:
        i915_gem_object_unpin_pages(obj);
@@ -784,7 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
        }
 
 out:
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
        obj->mm.dirty = true;
        /* return with the pages pinned */
        return 0;
index a50296cce0d88efb56a07b349ff56b88f7b689ba..a596548c07bf818e397a953210934da5c19b78d3 100644 (file)
@@ -280,7 +280,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                for_each_ggtt_vma(vma, obj)
                        intel_gt_flush_ggtt_writes(vma->vm->gt);
 
-               intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
                for_each_ggtt_vma(vma, obj) {
                        if (vma->iomap)
@@ -308,6 +308,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
        obj->write_domain = 0;
 }
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_flush(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_invalidate(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
        INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
index 458cd51331f1826d0a479c0356b1c85c0ca959d0..4b93591fd5c7a4f586f5d6e3d6606db3c89c66a7 100644 (file)
@@ -13,8 +13,8 @@
 
 #include <drm/i915_drm.h>
 
+#include "display/intel_frontbuffer.h"
 #include "i915_gem_object_types.h"
-
 #include "i915_gem_gtt.h"
 
 void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -463,4 +463,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
                                  unsigned int flags,
                                  const struct i915_sched_attr *attr);
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                 enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                      enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
+
 #endif
index 96008374a4120480baee67ad656d7eb9472c1e07..e3f3944fbd90d1d849f37aee4d109cf95cc73e17 100644 (file)
@@ -150,7 +150,7 @@ struct drm_i915_gem_object {
         */
        u16 write_domain;
 
-       struct intel_frontbuffer *frontbuffer;
+       struct intel_frontbuffer __rcu *frontbuffer;
 
        /** Current tiling stride for the object, if it's tiled. */
        unsigned int tiling_and_stride;
index ef7bc41ffffad537b981181129bb9772d3dc36fb..5b7ff3ccfa8ea8329657cdaea3555615e77de852 100644 (file)
@@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma)
        if (err)
                return err;
 
+       err = i915_active_acquire(&vma->active);
+       if (err)
+               goto err_unpin;
+
        /*
         * And mark it as a globally pinned object to let the shrinker know
         * it cannot reclaim the object until we release it.
@@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma)
        vma->obj->mm.dirty = true;
 
        return 0;
+
+err_unpin:
+       i915_vma_unpin(vma);
+       return err;
 }
 
 static void __context_unpin_state(struct i915_vma *vma)
 {
        i915_vma_make_shrinkable(vma);
+       i915_active_release(&vma->active);
        __i915_vma_unpin(vma);
 }
 
+static int __ring_active(struct intel_ring *ring)
+{
+       int err;
+
+       err = i915_active_acquire(&ring->vma->active);
+       if (err)
+               return err;
+
+       err = intel_ring_pin(ring);
+       if (err)
+               goto err_active;
+
+       return 0;
+
+err_active:
+       i915_active_release(&ring->vma->active);
+       return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+       intel_ring_unpin(ring);
+       i915_active_release(&ring->vma->active);
+}
+
 __i915_active_call
 static void __intel_context_retire(struct i915_active *active)
 {
@@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active)
                __context_unpin_state(ce->state);
 
        intel_timeline_unpin(ce->timeline);
-       intel_ring_unpin(ce->ring);
+       __ring_retire(ce->ring);
 
        intel_context_put(ce);
 }
@@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active)
 
        intel_context_get(ce);
 
-       err = intel_ring_pin(ce->ring);
+       err = __ring_active(ce->ring);
        if (err)
                goto err_put;
 
@@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active)
 err_timeline:
        intel_timeline_unpin(ce->timeline);
 err_ring:
-       intel_ring_unpin(ce->ring);
+       __ring_retire(ce->ring);
 err_put:
        intel_context_put(ce);
        return err;
index a459a42ad5c22a3e8e5b9491fd5c8df8fec07045..7e64b7d7d33015526324d1bceed8104c453a3363 100644 (file)
@@ -94,8 +94,9 @@ static int __gt_park(struct intel_wakeref *wf)
                intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
        }
 
+       /* Defer dropping the display power well for 100ms, it's slow! */
        GEM_BUG_ON(!wakeref);
-       intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+       intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
        i915_globals_park();
 
index 75dd0e0367b7a5c467bd5dee3313feae6941da1b..d925a1035c9d2f0ab0dd242a5218d79b61579a66 100644 (file)
@@ -2664,6 +2664,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
+       /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+       batch = gen8_emit_pipe_control(batch,
+                                      PIPE_CONTROL_FLUSH_L3 |
+                                      PIPE_CONTROL_STORE_DATA_INDEX |
+                                      PIPE_CONTROL_CS_STALL |
+                                      PIPE_CONTROL_QW_WRITE,
+                                      LRC_PPHWSP_SCRATCH_ADDR);
+
        batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
        /* WaMediaPoolStateCmdInWABB:bxt,glk */
@@ -4416,9 +4424,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
        ve->base.gt = siblings[0]->gt;
        ve->base.uncore = siblings[0]->uncore;
        ve->base.id = -1;
+
        ve->base.class = OTHER_CLASS;
        ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
        ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+       ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
 
        /*
         * The decision on whether to submit a request using semaphores
index a47d5a7c32c9633f5e1a839b1ee68a5107579055..93026217c121cc58022c5b4cf35af68ac3e318a1 100644 (file)
@@ -1413,14 +1413,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        int len;
        u32 *cs;
 
-       flags |= MI_MM_SPACE_GTT;
-       if (IS_HASWELL(i915))
-               /* These flags are for resource streamer on HSW+ */
-               flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
-       else
-               /* We need to save the extended state for powersaving modes */
-               flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
        len = 4;
        if (IS_GEN(i915, 7))
                len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
@@ -1589,22 +1581,21 @@ static int switch_context(struct i915_request *rq)
        }
 
        if (ce->state) {
-               u32 hw_flags;
+               u32 flags;
 
                GEM_BUG_ON(rq->engine->id != RCS0);
 
-               /*
-                * The kernel context(s) is treated as pure scratch and is not
-                * expected to retain any state (as we sacrifice it during
-                * suspend and on resume it may be corrupted). This is ok,
-                * as nothing actually executes using the kernel context; it
-                * is purely used for flushing user contexts.
-                */
-               hw_flags = 0;
-               if (i915_gem_context_is_kernel(rq->gem_context))
-                       hw_flags = MI_RESTORE_INHIBIT;
+               /* For resource streamer on HSW+ and power context elsewhere */
+               BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+               BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+               flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+               if (!i915_gem_context_is_kernel(rq->gem_context))
+                       flags |= MI_RESTORE_EXT_STATE_EN;
+               else
+                       flags |= MI_RESTORE_INHIBIT;
 
-               ret = mi_set_context(rq, hw_flags);
+               ret = mi_set_context(rq, flags);
                if (ret)
                        return ret;
        }
index e29bc137e7bad722f6b6b7d9bfe7a763ceefec4c..21aa08f558110e842267acc24b87a2da0ecae6cd 100644 (file)
@@ -1660,8 +1660,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
        (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
 
 /* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
-       (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)                   \
+       (IS_CANNONLAKE(dev_priv) ||                                     \
+        IS_SKL_GT3(dev_priv) ||                                        \
+        IS_SKL_GT4(dev_priv))
 
 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
index d034fa413164bfeb3f68bdb9bf18a17ce131405b..905890e3ac24139a470dfc2ea47529b4a08317eb 100644 (file)
@@ -161,7 +161,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
         * We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        if (copy_from_user(vaddr, user_data, args->size))
                return -EFAULT;
@@ -169,7 +169,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        drm_clflush_virt_range(vaddr, args->size);
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        return 0;
 }
 
@@ -589,7 +589,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                goto out_unpin;
        }
 
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -631,7 +631,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                user_data += page_length;
                offset += page_length;
        }
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
@@ -721,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
                offset = 0;
        }
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        i915_gem_object_unlock_fence(obj, fence);
 
        return ret;
index 6239a9adbf14e4cf0e1973039d58303af1737101..c083f516fd3598c8384f1c46c29063f77dfef8ea 100644 (file)
@@ -3304,7 +3304,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
 
 static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 {
-       struct i915_vma *vma, *vn;
+       struct i915_vma *vma;
        bool flush = false;
        int open;
 
@@ -3319,15 +3319,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
        open = atomic_xchg(&ggtt->vm.open, 0);
 
        /* clflush objects bound into the GGTT and rebind them. */
-       list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+       list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
                        continue;
 
-               if (!__i915_vma_unbind(vma))
-                       continue;
-
                clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
                WARN_ON(i915_vma_bind(vma,
                                      obj ? obj->cache_level : 0,
index 2814218c5ba18773f053d05e2733f5e67b38de17..d6d2e6fb867432a035b9df0e60a63823ef94e72f 100644 (file)
@@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt)
        return ktime_to_ns(ktime_sub(ktime_get(), kt));
 }
 
-static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
-{
-       u64 val;
-
-       /*
-        * We think we are runtime suspended.
-        *
-        * Report the delta from when the device was suspended to now,
-        * on top of the last known real value, as the approximated RC6
-        * counter value.
-        */
-       val = ktime_since(pmu->sleep_last);
-       val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
-       pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-
-       return val;
-}
-
-static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
-{
-       /*
-        * If we are coming back from being runtime suspended we must
-        * be careful not to report a larger value than returned
-        * previously.
-        */
-       if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
-               pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
-               pmu->sample[__I915_SAMPLE_RC6].cur = val;
-       } else {
-               val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
-       }
-
-       return val;
-}
-
 static u64 get_rc6(struct intel_gt *gt)
 {
        struct drm_i915_private *i915 = gt->i915;
        struct i915_pmu *pmu = &i915->pmu;
        unsigned long flags;
+       bool awake = false;
        u64 val;
 
-       val = 0;
        if (intel_gt_pm_get_if_awake(gt)) {
                val = __get_rc6(gt);
                intel_gt_pm_put_async(gt);
+               awake = true;
        }
 
        spin_lock_irqsave(&pmu->lock, flags);
 
-       if (val)
-               val = __pmu_update_rc6(pmu, val);
+       if (awake) {
+               pmu->sample[__I915_SAMPLE_RC6].cur = val;
+       } else {
+               /*
+                * We think we are runtime suspended.
+                *
+                * Report the delta from when the device was suspended to now,
+                * on top of the last known real value, as the approximated RC6
+                * counter value.
+                */
+               val = ktime_since(pmu->sleep_last);
+               val += pmu->sample[__I915_SAMPLE_RC6].cur;
+       }
+
+       if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
+               val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
        else
-               val = __pmu_estimate_rc6(pmu);
+               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
 
        spin_unlock_irqrestore(&pmu->lock, flags);
 
@@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915)
        struct i915_pmu *pmu = &i915->pmu;
 
        if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
-               __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+               pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
 
        pmu->sleep_last = ktime_get();
 }
 
-static void unpark_rc6(struct drm_i915_private *i915)
-{
-       struct i915_pmu *pmu = &i915->pmu;
-
-       /* Estimate how long we slept and accumulate that into rc6 counters */
-       if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
-               __pmu_estimate_rc6(pmu);
-}
-
 #else
 
 static u64 get_rc6(struct intel_gt *gt)
@@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt)
 }
 
 static void park_rc6(struct drm_i915_private *i915) {}
-static void unpark_rc6(struct drm_i915_private *i915) {}
 
 #endif
 
@@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
         */
        __i915_pmu_maybe_start_timer(pmu);
 
-       unpark_rc6(i915);
-
        spin_unlock_irq(&pmu->lock);
 }
 
@@ -1107,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
 
-       if (!is_igp(i915))
+       if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
-                                     "i915-%s",
+                                     "i915_%s",
                                      dev_name(i915->drm.dev));
-       else
+               if (pmu->name) {
+                       /* tools/perf reserves colons as special. */
+                       strreplace((char *)pmu->name, ':', '_');
+               }
+       } else {
                pmu->name = "i915";
+       }
        if (!pmu->name)
                goto err;
 
index bf52e3983631b7278d4930b5c752190e01954e56..6c1647c5daf255ff4d27fb538dd2ec358ed782e2 100644 (file)
@@ -18,7 +18,7 @@ enum {
        __I915_SAMPLE_FREQ_ACT = 0,
        __I915_SAMPLE_FREQ_REQ,
        __I915_SAMPLE_RC6,
-       __I915_SAMPLE_RC6_ESTIMATED,
+       __I915_SAMPLE_RC6_LAST_REPORTED,
        __I915_NUM_PMU_SAMPLERS
 };
 
index 4fd3d76db346c1023b5f584b0fc0e5b6cac8222a..094011b8f64d6ad6339a0168a781b0e6d26ecdc9 100644 (file)
@@ -4177,7 +4177,13 @@ enum {
 #define  CPSSUNIT_CLKGATE_DIS          REG_BIT(9)
 
 #define UNSLICE_UNIT_LEVEL_CLKGATE     _MMIO(0x9434)
-#define  VFUNIT_CLKGATE_DIS            (1 << 20)
+#define   VFUNIT_CLKGATE_DIS           REG_BIT(20)
+#define   HSUNIT_CLKGATE_DIS           REG_BIT(8)
+#define   VSUNIT_CLKGATE_DIS           REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2    _MMIO(0x94e4)
+#define   VSUNIT_CLKGATE_DIS_TGL       REG_BIT(19)
+#define   PSDUNIT_CLKGATE_DIS          REG_BIT(5)
 
 #define INF_UNIT_LEVEL_CLKGATE         _MMIO(0x9560)
 #define   CGPSF_CLKGATE_DIS            (1 << 3)
index e5512f26e20a69f22ae0f8797d736d41fdfe1550..01c822256b395d5edd7b0ee6e2cf59cab5ad6e46 100644 (file)
@@ -1104,8 +1104,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                return err;
 
        if (flags & EXEC_OBJECT_WRITE) {
-               if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
-                       i915_active_add_request(&obj->frontbuffer->write, rq);
+               struct intel_frontbuffer *front;
+
+               front = __intel_frontbuffer_get(obj);
+               if (unlikely(front)) {
+                       if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+                               i915_active_add_request(&front->write, rq);
+                       intel_frontbuffer_put(front);
+               }
 
                dma_resv_add_excl_fence(vma->resv, &rq->fence);
                obj->write_domain = I915_GEM_DOMAIN_RENDER;
index 75ae6f4951616785378267b2b4a3824c5409b52a..86379eddc908301741c0c83bc4f882865adb074d 100644 (file)
@@ -6565,6 +6565,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
        /* WaEnable32PlaneMode:icl */
        I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
                   _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
+
+       /*
+        * Wa_1408615072:icl,ehl  (vsunit)
+        * Wa_1407596294:icl,ehl  (hsunit)
+        */
+       intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
+                        0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+       /* Wa_1407352427:icl,ehl */
+       intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+                        0, PSDUNIT_CLKGATE_DIS);
 }
 
 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
index 35cc69a3a1b99abcb7e7fa181bac46abec8fcd74..05364eca20f7537d6ae6f651ba3c065b4305af72 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef __I915_SELFTESTS_RANDOM_H__
 #define __I915_SELFTESTS_RANDOM_H__
 
+#include <linux/math64.h>
 #include <linux/random.h>
 
 #include "../i915_selftest.h"
index f80a8ba759770f82fa435cc97f5b63649477c52e..3305a94fc9305e1ead84aa1a8d359940e76a0151 100644 (file)
@@ -215,11 +215,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_ddp_comp *comp;
        int i, count = 0;
+       unsigned int local_index = plane - mtk_crtc->planes;
 
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                comp = mtk_crtc->ddp_comp[i];
-               if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
-                       *local_layer = plane->index - count;
+               if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
+                       *local_layer = local_index - count;
                        return comp;
                }
                count += mtk_ddp_comp_layer_nr(comp);
@@ -310,7 +311,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 
                plane_state = to_mtk_plane_state(plane->state);
                comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
-               mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
+               if (comp)
+                       mtk_ddp_comp_layer_config(comp, local_layer,
+                                                 plane_state);
        }
 
        return 0;
@@ -386,8 +389,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
                        comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
                                                          &local_layer);
 
-                       mtk_ddp_comp_layer_config(comp, local_layer,
-                                                 plane_state);
+                       if (comp)
+                               mtk_ddp_comp_layer_config(comp, local_layer,
+                                                         plane_state);
                        plane_state->pending.config = false;
                }
                mtk_crtc->pending_planes = false;
@@ -401,7 +405,9 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
        struct mtk_ddp_comp *comp;
 
        comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
-       return mtk_ddp_comp_layer_check(comp, local_layer, state);
+       if (comp)
+               return mtk_ddp_comp_layer_check(comp, local_layer, state);
+       return 0;
 }
 
 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
index e9931bbbe846478efce7b294e001d1b916ed4493..d77c9f484ce3d04b9d8e0088cd2c01802b8c0881 100644 (file)
@@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
 {
        u32 timcon0, timcon1, timcon2, timcon3;
-       u32 ui, cycle_time;
+       u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
        struct mtk_phy_timing *timing = &dsi->phy_timing;
 
-       ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
-       cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+       timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+       timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+       timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+                            timing->da_hs_prepare;
+       timing->da_hs_trail = timing->da_hs_prepare + 1;
 
-       timing->lpx = NS_TO_CYCLE(60, cycle_time);
-       timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
-       timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
-       timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+       timing->ta_go = 4 * timing->lpx - 2;
+       timing->ta_sure = timing->lpx + 2;
+       timing->ta_get = 4 * timing->lpx;
+       timing->da_hs_exit = 2 * timing->lpx + 1;
 
-       timing->ta_go = 4 * timing->lpx;
-       timing->ta_sure = 3 * timing->lpx / 2;
-       timing->ta_get = 5 * timing->lpx;
-       timing->da_hs_exit = 2 * timing->lpx;
-
-       timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
-       timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
-
-       timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
-       timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
-       timing->clk_hs_exit = 2 * timing->lpx;
+       timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+       timing->clk_hs_post = timing->clk_hs_prepare + 8;
+       timing->clk_hs_trail = timing->clk_hs_prepare;
+       timing->clk_hs_zero = timing->clk_hs_trail * 4;
+       timing->clk_hs_exit = 2 * timing->clk_hs_trail;
 
        timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
                  timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
@@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
                        dsi_tmp_buf_bpp - 10);
 
        data_phy_cycles = timing->lpx + timing->da_hs_prepare +
-                                 timing->da_hs_zero + timing->da_hs_exit + 2;
+                         timing->da_hs_zero + timing->da_hs_exit + 3;
 
        if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
-               if (vm->hfront_porch * dsi_tmp_buf_bpp >
+               if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
                    data_phy_cycles * dsi->lanes + 18) {
-                       horizontal_frontporch_byte = vm->hfront_porch *
-                                                    dsi_tmp_buf_bpp -
-                                                    data_phy_cycles *
-                                                    dsi->lanes - 18;
+                       horizontal_frontporch_byte =
+                               vm->hfront_porch * dsi_tmp_buf_bpp -
+                               (data_phy_cycles * dsi->lanes + 18) *
+                               vm->hfront_porch /
+                               (vm->hfront_porch + vm->hback_porch);
+
+                       horizontal_backporch_byte =
+                               horizontal_backporch_byte -
+                               (data_phy_cycles * dsi->lanes + 18) *
+                               vm->hback_porch /
+                               (vm->hfront_porch + vm->hback_porch);
                } else {
                        DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
                        horizontal_frontporch_byte = vm->hfront_porch *
                                                     dsi_tmp_buf_bpp;
                }
        } else {
-               if (vm->hfront_porch * dsi_tmp_buf_bpp >
+               if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
                    data_phy_cycles * dsi->lanes + 12) {
-                       horizontal_frontporch_byte = vm->hfront_porch *
-                                                    dsi_tmp_buf_bpp -
-                                                    data_phy_cycles *
-                                                    dsi->lanes - 12;
+                       horizontal_frontporch_byte =
+                               vm->hfront_porch * dsi_tmp_buf_bpp -
+                               (data_phy_cycles * dsi->lanes + 12) *
+                               vm->hfront_porch /
+                               (vm->hfront_porch + vm->hback_porch);
+                       horizontal_backporch_byte = horizontal_backporch_byte -
+                               (data_phy_cycles * dsi->lanes + 12) *
+                               vm->hback_porch /
+                               (vm->hfront_porch + vm->hback_porch);
                } else {
                        DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
                        horizontal_frontporch_byte = vm->hfront_porch *
index 83c4586665b4f3258f58fea49f162baf4ec642e9..81ac9b658a70a469661becf4272ba7ae1cc9ea3c 100644 (file)
@@ -95,7 +95,7 @@ struct cdn_dp_device {
        struct cdn_dp_port *port[MAX_PHY];
        u8 ports;
        u8 max_lanes;
-       u8 max_rate;
+       unsigned int max_rate;
        u8 lanes;
        int active_port;
 
index a7c4654445c75b652a8b3d8462f2d52023994ed9..68d4644ac2dcc32a2d4118d824f13eba5d284921 100644 (file)
@@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
        struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
 
        cec_unregister_adapter(hdmi->cec_adap);
-       drm_connector_cleanup(&hdmi->connector);
-       drm_encoder_cleanup(&hdmi->encoder);
        i2c_del_adapter(hdmi->i2c);
        i2c_put_adapter(hdmi->ddc_i2c);
        clk_disable_unprepare(hdmi->mod_clk);
index 42651d737c55b3bb1285a0008608e66861588c1d..c81cdce6ed559b0e0b67b30d2d13b5ba5d020c22 100644 (file)
@@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 
        WARN_ON(!tcon->quirks->has_channel_0);
 
-       tcon->dclk_min_div = 1;
+       tcon->dclk_min_div = tcon->quirks->dclk_min_div;
        tcon->dclk_max_div = 127;
        sun4i_tcon0_mode_set_common(tcon, mode);
 
@@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
 static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
+       .dclk_min_div           = 4,
        .set_mux                = sun4i_a10_tcon_set_mux,
 };
 
 static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
+       .dclk_min_div           = 4,
        .set_mux                = sun5i_a13_tcon_set_mux,
 };
 
@@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
        .has_channel_1          = true,
        .has_lvds_alt           = true,
        .needs_de_be_mux        = true,
+       .dclk_min_div           = 1,
        .set_mux                = sun6i_tcon_set_mux,
 };
 
@@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
        .needs_de_be_mux        = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
+       .dclk_min_div           = 4,
        /* Same display pipeline structure as A10 */
        .set_mux                = sun4i_a10_tcon_set_mux,
 };
@@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
 static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
        .has_channel_0          = true,
        .has_lvds_alt           = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
        .supports_lvds          = true,
        .has_channel_0          = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
 
 static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
        .has_channel_0          = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
-       .has_channel_0  = true,
-       .needs_edp_reset = true,
+       .has_channel_0          = true,
+       .needs_edp_reset        = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
index f9f1fe80b206c36772021d88beb54b2f8551eea6..a62ec826ae71e0d429d334c97f0c9e4f0a3a2d8f 100644 (file)
@@ -224,6 +224,7 @@ struct sun4i_tcon_quirks {
        bool    needs_de_be_mux; /* sun6i needs mux to select backend */
        bool    needs_edp_reset; /* a80 edp reset needed for tcon0 access */
        bool    supports_lvds;   /* Does the TCON support an LVDS output? */
+       u8      dclk_min_div;   /* minimum divider for TCON0 DCLK */
 
        /* callback to handle tcon muxing options */
        int     (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
index 390524143139b6999a05f3413726780558114e2c..1635a9ff47943a06e9eea053571eb96f301678c9 100644 (file)
@@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
                if (!objs)
                        return;
                virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+               virtio_gpu_array_lock_resv(objs);
                virtio_gpu_cmd_transfer_to_host_2d
                        (vgdev, 0,
                         plane->state->crtc_w,
index 8063b1d567b1dd2fda9a0e9ed150c9d258f3d810..e6e4c841fb06f4c0dd1c73bf086b466c609fd23f 100644 (file)
@@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field,
                      struct hid_usage *usage, __s32 value)
 {
        if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
-           (usage->hid & HID_USAGE) != 0x00 && !usage->type) {
+           (usage->hid & HID_USAGE) != 0x00 &&
+           (usage->hid & HID_USAGE) != 0xff && !usage->type) {
                hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
                         usage->hid & HID_USAGE);
        }
index e0b241bd3070c5a15e9ac90ecc81573aad6452ec..851fe54ea59e7c2636ab204372c49b749a136014 100644 (file)
@@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
        offset = report->size;
        report->size += parser->global.report_size * parser->global.report_count;
 
+       /* Total size check: Allow for possible report index byte */
+       if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+               hid_err(parser->device, "report is too long\n");
+               return -1;
+       }
+
        if (!parser->local.usage_index) /* Ignore padding fields */
                return 0;
 
index 7e1689ef35f5de36ff681bc931539971e39a8b39..3a400ce603c4f39cd3e21e8210f6f06a7d4366bd 100644 (file)
 #define USB_VENDOR_ID_ITE               0x048d
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA   0x8386
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2  0x8350
+#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720   0x837a
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA900       0x8396
 #define USB_DEVICE_ID_ITE8595          0x8595
 
 #define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
 #define USB_DEVICE_ID_LG_MELFAS_MT     0x6007
 #define I2C_DEVICE_ID_LG_8001          0x8001
+#define I2C_DEVICE_ID_LG_7010          0x7010
 
 #define USB_VENDOR_ID_LOGITECH         0x046d
 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
 #define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
 
index 63855f275a38c3f39eaa917744909a2ec97b9c8f..dea9cc65bf8007bc101c26b308c28e32b7714abb 100644 (file)
@@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
        }
 
 mapped:
-       if (device->driver->input_mapped && device->driver->input_mapped(device,
-                               hidinput, field, usage, &bit, &max) < 0)
-               goto ignore;
+       if (device->driver->input_mapped &&
+           device->driver->input_mapped(device, hidinput, field, usage,
+                                        &bit, &max) < 0) {
+               /*
+                * The driver indicated that no further generic handling
+                * of the usage is desired.
+                */
+               return;
+       }
 
        set_bit(usage->type, input->evbit);
 
@@ -1215,9 +1221,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                set_bit(MSC_SCAN, input->mscbit);
        }
 
-ignore:
        return;
 
+ignore:
+       usage->type = 0;
+       usage->code = 0;
 }
 
 static void hidinput_handle_scroll(struct hid_usage *usage,
index a45f2352618d390575f34795219e1d211c938be0..c436e12feb23315f141362dccc38c2c549c8d9f9 100644 (file)
@@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
 static const struct hid_device_id ite_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
        { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+       /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
+                        USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
index 3cfeb1629f79fec23e0ceadc32be2081c29b1f87..362805ddf377736927fa95493186698a8d5fad0f 100644 (file)
@@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
                tool = MT_TOOL_DIAL;
        else if (unlikely(!confidence_state)) {
                tool = MT_TOOL_PALM;
-               if (!active &&
+               if (!active && mt &&
                    input_mt_is_active(&mt->slots[slotnum])) {
                        /*
                         * The non-confidence was reported for
@@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_LG,
                HID_USB_DEVICE(USB_VENDOR_ID_LG,
                        USB_DEVICE_ID_LG_MELFAS_MT) },
+       { .driver_data = MT_CLS_LG,
+               HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+                       USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
 
        /* MosArt panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
index d1b39c29e3535a0aa40cf34dc52101c1ace87046..0e7b2d998395a91f5d5f9109443501707fe964fc 100644 (file)
@@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
 
        { 0 }
 };
index 8dae0f9b819e011d6695462fea7e88e85cd16669..6286204d4c56029656f6cf68758e75781d9ff0ff 100644 (file)
@@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev,
 
        if (steam->quirks & STEAM_QUIRK_WIRELESS) {
                hid_info(hdev, "Steam wireless receiver connected");
+               /* If using a wireless adaptor ask for connection status */
+               steam->connected = false;
                steam_request_conn_status(steam);
        } else {
+               /* A wired connection is always present */
+               steam->connected = true;
                ret = steam_register(steam);
                if (ret) {
                        hid_err(hdev,
index c3fc0ceb80963c331f1515bacdfa51883708d1ae..7a75aff78388289f0ac4164b2acb25b96ed78dd5 100644 (file)
@@ -249,13 +249,14 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
 static __poll_t hidraw_poll(struct file *file, poll_table *wait)
 {
        struct hidraw_list *list = file->private_data;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */
 
        poll_wait(file, &list->hidraw->wait, wait);
        if (list->head != list->tail)
-               return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (!list->hidraw->exist)
-               return EPOLLERR | EPOLLHUP;
-       return 0;
+               mask |= EPOLLERR | EPOLLHUP;
+       return mask;
 }
 
 static int hidraw_open(struct inode *inode, struct file *file)
index a358e61fbc8272af922b27f789cc544b0bf56e00..009000c5d55cddfaad101f1b33d5b37497e0f176 100644 (file)
@@ -49,6 +49,8 @@
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(5)
+#define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(6)
+
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -175,6 +177,8 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
                 I2C_HID_QUIRK_RESET_ON_RESUME },
+       { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+               I2C_HID_QUIRK_BAD_INPUT_SIZE },
        { 0, 0 }
 };
 
@@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
        }
 
        if ((ret_size > size) || (ret_size < 2)) {
-               dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
-                       __func__, size, ret_size);
-               return;
+               if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) {
+                       ihid->inbuf[0] = size & 0xff;
+                       ihid->inbuf[1] = size >> 8;
+                       ret_size = size;
+               } else {
+                       dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+                               __func__, size, ret_size);
+                       return;
+               }
        }
 
        i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
index 6c1e6110867f0d71ad279d41bc61c3d81f384dcc..1fb294ca463e5b566b577b26eefe5e38c40496d0 100644 (file)
@@ -24,7 +24,9 @@
 #define ICL_MOBILE_DEVICE_ID   0x34FC
 #define SPT_H_DEVICE_ID                0xA135
 #define CML_LP_DEVICE_ID       0x02FC
+#define CMP_H_DEVICE_ID                0x06FC
 #define EHL_Ax_DEVICE_ID       0x4BB3
+#define TGL_LP_DEVICE_ID       0xA0FC
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 784dcc8c702280800704abb903fc3ec3a79e6687..f491d8b4e24c7b39087bfc0a71db3b18d7cc596c 100644 (file)
@@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index fa0cc089982739e9d70da131c7b6235571f0acfa..8fe3efcb832715c4a978625387dc0bb0ea5b00a2 100644 (file)
@@ -766,13 +766,14 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
 static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
 {
        struct uhid_device *uhid = file->private_data;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
 
        poll_wait(file, &uhid->waitq, wait);
 
        if (uhid->head != uhid->tail)
-               return EPOLLIN | EPOLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
-       return 0;
+       return mask;
 }
 
 static const struct file_operations uhid_fops = {
index e421cdf2d1a4ec5554535e8b241592d6c8f3afa7..a970b809d778c3c9a74702abe70c9f1f976a06d9 100644 (file)
@@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file)
        return 0;
 }
 
+static int __hiddev_open(struct hiddev *hiddev, struct file *file)
+{
+       struct hiddev_list *list;
+       int error;
+
+       lockdep_assert_held(&hiddev->existancelock);
+
+       list = vzalloc(sizeof(*list));
+       if (!list)
+               return -ENOMEM;
+
+       mutex_init(&list->thread_lock);
+       list->hiddev = hiddev;
+
+       if (!hiddev->open++) {
+               error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
+               if (error < 0)
+                       goto err_drop_count;
+
+               error = hid_hw_open(hiddev->hid);
+               if (error < 0)
+                       goto err_normal_power;
+       }
+
+       spin_lock_irq(&hiddev->list_lock);
+       list_add_tail(&list->node, &hiddev->list);
+       spin_unlock_irq(&hiddev->list_lock);
+
+       file->private_data = list;
+
+       return 0;
+
+err_normal_power:
+       hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
+err_drop_count:
+       hiddev->open--;
+       vfree(list);
+       return error;
+}
+
 /*
  * open file op
  */
 static int hiddev_open(struct inode *inode, struct file *file)
 {
-       struct hiddev_list *list;
        struct usb_interface *intf;
        struct hid_device *hid;
        struct hiddev *hiddev;
@@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
        intf = usbhid_find_interface(iminor(inode));
        if (!intf)
                return -ENODEV;
+
        hid = usb_get_intfdata(intf);
        hiddev = hid->hiddev;
 
-       if (!(list = vzalloc(sizeof(struct hiddev_list))))
-               return -ENOMEM;
-       mutex_init(&list->thread_lock);
-       list->hiddev = hiddev;
-       file->private_data = list;
-
-       /*
-        * no need for locking because the USB major number
-        * is shared which usbcore guards against disconnect
-        */
-       if (list->hiddev->exist) {
-               if (!list->hiddev->open++) {
-                       res = hid_hw_open(hiddev->hid);
-                       if (res < 0)
-                               goto bail;
-               }
-       } else {
-               res = -ENODEV;
-               goto bail;
-       }
-
-       spin_lock_irq(&list->hiddev->list_lock);
-       list_add_tail(&list->node, &hiddev->list);
-       spin_unlock_irq(&list->hiddev->list_lock);
-
        mutex_lock(&hiddev->existancelock);
-       /*
-        * recheck exist with existance lock held to
-        * avoid opening a disconnected device
-        */
-       if (!list->hiddev->exist) {
-               res = -ENODEV;
-               goto bail_unlock;
-       }
-       if (!list->hiddev->open++)
-               if (list->hiddev->exist) {
-                       struct hid_device *hid = hiddev->hid;
-                       res = hid_hw_power(hid, PM_HINT_FULLON);
-                       if (res < 0)
-                               goto bail_unlock;
-                       res = hid_hw_open(hid);
-                       if (res < 0)
-                               goto bail_normal_power;
-               }
-       mutex_unlock(&hiddev->existancelock);
-       return 0;
-bail_normal_power:
-       hid_hw_power(hid, PM_HINT_NORMAL);
-bail_unlock:
+       res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
        mutex_unlock(&hiddev->existancelock);
 
-       spin_lock_irq(&list->hiddev->list_lock);
-       list_del(&list->node);
-       spin_unlock_irq(&list->hiddev->list_lock);
-bail:
-       file->private_data = NULL;
-       vfree(list);
        return res;
 }
 
index ccb74529bc78243689d0720bbe2d81cfd8a52004..d99a9d407671c88ea96decdd0e561c6f8784685a 100644 (file)
@@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
                    (hdev->product == 0x34d || hdev->product == 0x34e ||  /* MobileStudio Pro */
                     hdev->product == 0x357 || hdev->product == 0x358 ||  /* Intuos Pro 2 */
                     hdev->product == 0x392 ||                            /* Intuos Pro 2 */
-                    hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
+                    hdev->product == 0x398 || hdev->product == 0x399 ||  /* MobileStudio Pro */
+                    hdev->product == 0x3AA)) {                           /* MobileStudio Pro */
                        value = (field->logical_maximum - value);
 
                        if (hdev->product == 0x357 || hdev->product == 0x358 ||
                            hdev->product == 0x392)
                                value = wacom_offset_rotation(input, usage, value, 3, 16);
                        else if (hdev->product == 0x34d || hdev->product == 0x34e ||
-                                hdev->product == 0x398 || hdev->product == 0x399)
+                                hdev->product == 0x398 || hdev->product == 0x399 ||
+                                hdev->product == 0x3AA)
                                value = wacom_offset_rotation(input, usage, value, 1, 2);
                }
                else {
index dc3f507e7562d016c4e27a5d78f7ffecb8ab6542..a90d757f704319e36832e8cafa270e7967ebabcd 100644 (file)
@@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
        drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
 }
 
-#ifdef CONFIG_CPU_PM
 static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
 {
        int i, ret = 0;
@@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = {
 
 static int etm4_cpu_pm_register(void)
 {
-       return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+       if (IS_ENABLED(CONFIG_CPU_PM))
+               return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+
+       return 0;
 }
 
 static void etm4_cpu_pm_unregister(void)
 {
-       cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+       if (IS_ENABLED(CONFIG_CPU_PM))
+               cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
 }
-#else
-static int etm4_cpu_pm_register(void) { return 0; }
-static void etm4_cpu_pm_unregister(void) { }
-#endif
 
 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 {
index e13af4874976ef1ae493844dee464d3e0c3b6fb6..5137e62970221e1751412480fc94aed2cf2d94f4 100644 (file)
@@ -174,7 +174,7 @@ static struct at91_twi_pdata sama5d2_config = {
 
 static struct at91_twi_pdata sam9x60_config = {
        .clk_max_div = 7,
-       .clk_offset = 4,
+       .clk_offset = 3,
        .has_unre_flag = true,
        .has_alt_cmd = true,
        .has_hold_field = true,
index e01b2b57e724741c3c64853bae0a1bd094f0ab94..5ab901ad615dd0d3ed1ee369acbdcdfd413ea10f 100644 (file)
@@ -58,6 +58,7 @@ struct bcm2835_i2c_dev {
        struct i2c_adapter adapter;
        struct completion completion;
        struct i2c_msg *curr_msg;
+       struct clk *bus_clk;
        int num_msgs;
        u32 msg_err;
        u8 *msg_buf;
@@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
        struct resource *mem, *irq;
        int ret;
        struct i2c_adapter *adap;
-       struct clk *bus_clk;
        struct clk *mclk;
        u32 bus_clk_rate;
 
@@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(mclk);
        }
 
-       bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
+       i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
 
-       if (IS_ERR(bus_clk)) {
+       if (IS_ERR(i2c_dev->bus_clk)) {
                dev_err(&pdev->dev, "Could not register clock\n");
-               return PTR_ERR(bus_clk);
+               return PTR_ERR(i2c_dev->bus_clk);
        }
 
        ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
@@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
                bus_clk_rate = 100000;
        }
 
-       ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate);
+       ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
        if (ret < 0) {
                dev_err(&pdev->dev, "Could not set clock frequency\n");
                return ret;
        }
 
-       ret = clk_prepare_enable(bus_clk);
+       ret = clk_prepare_enable(i2c_dev->bus_clk);
        if (ret) {
                dev_err(&pdev->dev, "Couldn't prepare clock");
                return ret;
@@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
 static int bcm2835_i2c_remove(struct platform_device *pdev)
 {
        struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
-       struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div");
 
-       clk_rate_exclusive_put(bus_clk);
-       clk_disable_unprepare(bus_clk);
+       clk_rate_exclusive_put(i2c_dev->bus_clk);
+       clk_disable_unprepare(i2c_dev->bus_clk);
 
        free_irq(i2c_dev->irq, i2c_dev);
        i2c_del_adapter(&i2c_dev->adapter);
index 38556381f4cadb999025adfd7b97f0f791abdb4f..2f8b8050a223375b8fec23999032df2a2bd6b137 100644 (file)
@@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev)
        adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev,
                                                         "scl",
                                                         GPIOD_ASIS);
-       if (IS_ERR(adapter_data->gpio_scl))
-               return PTR_ERR(adapter_data->gpio_scl);
+       if (IS_ERR(adapter_data->gpio_scl)) {
+               ret = PTR_ERR(adapter_data->gpio_scl);
+               goto free_both;
+       }
        adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev,
                                                         "sda",
                                                         GPIOD_ASIS);
-       if (IS_ERR(adapter_data->gpio_sda))
-               return PTR_ERR(adapter_data->gpio_sda);
+       if (IS_ERR(adapter_data->gpio_sda)) {
+               ret = PTR_ERR(adapter_data->gpio_sda);
+               goto free_both;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
index a98bf31d0e5c1269552fa1e45bee391ab035af91..61339c665ebdc6eb21c7938ad366583f37fad356 100644 (file)
@@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev))
+       if (!pm_runtime_enabled(&pdev->dev)) {
                ret = tegra_i2c_runtime_resume(&pdev->dev);
-       else
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "runtime resume failed\n");
+                       goto unprepare_div_clk;
+               }
+       } else {
                ret = pm_runtime_get_sync(i2c_dev->dev);
-
-       if (ret < 0) {
-               dev_err(&pdev->dev, "runtime resume failed\n");
-               goto unprepare_div_clk;
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "runtime resume failed\n");
+                       goto disable_rpm;
+               }
        }
 
        if (i2c_dev->is_multimaster_mode) {
@@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
                if (ret < 0) {
                        dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
                                ret);
-                       goto disable_rpm;
+                       goto put_rpm;
                }
        }
 
@@ -1671,11 +1675,16 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        if (i2c_dev->is_multimaster_mode)
                clk_disable(i2c_dev->div_clk);
 
-disable_rpm:
-       pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
+put_rpm:
+       if (pm_runtime_enabled(&pdev->dev))
+               pm_runtime_put_sync(&pdev->dev);
+       else
                tegra_i2c_runtime_suspend(&pdev->dev);
 
+disable_rpm:
+       if (pm_runtime_enabled(&pdev->dev))
+               pm_runtime_disable(&pdev->dev);
+
 unprepare_div_clk:
        clk_unprepare(i2c_dev->div_clk);
 
@@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev)
 static int __maybe_unused tegra_i2c_suspend(struct device *dev)
 {
        struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+       int err;
 
        i2c_mark_adapter_suspended(&i2c_dev->adapter);
 
+       err = pm_runtime_force_suspend(dev);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
@@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
        if (err)
                return err;
 
+       err = pm_runtime_force_resume(dev);
+       if (err < 0)
+               return err;
+
        i2c_mark_adapter_resumed(&i2c_dev->adapter);
 
        return 0;
index 9f8dcd3f83850149bbc6e75bfc11d86d8ef7e3df..35b209797d7b759e4f274d28c7d29c173d83cd39 100644 (file)
@@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
         * If we can set SDA, we will always create a STOP to ensure additional
         * pulses will do no harm. This is achieved by letting SDA follow SCL
         * half a cycle later. Check the 'incomplete_write_byte' fault injector
-        * for details.
+        * for details. Note that we must honour tsu:sto, 4us, but lets use 5us
+        * here for simplicity.
         */
        bri->set_scl(adap, scl);
-       ndelay(RECOVERY_NDELAY / 2);
+       ndelay(RECOVERY_NDELAY);
        if (bri->set_sda)
                bri->set_sda(adap, scl);
        ndelay(RECOVERY_NDELAY / 2);
@@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
                scl = !scl;
                bri->set_scl(adap, scl);
                /* Creating STOP again, see above */
-               ndelay(RECOVERY_NDELAY / 2);
+               if (scl)  {
+                       /* Honour minimum tsu:sto */
+                       ndelay(RECOVERY_NDELAY);
+               } else {
+                       /* Honour minimum tf and thd:dat */
+                       ndelay(RECOVERY_NDELAY / 2);
+               }
                if (bri->set_sda)
                        bri->set_sda(adap, scl);
                ndelay(RECOVERY_NDELAY / 2);
index 3f03abf100b599d666dbae8bf7b0d5c7ca06d506..306bf15023a7878cd5a6e987d63e378bf455162c 100644 (file)
@@ -494,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
                st->channel_config[channel].buf_negative =
                        of_property_read_bool(child, "adi,buffered-negative");
 
-               *chan = ad7124_channel_template;
-               chan->address = channel;
-               chan->scan_index = channel;
-               chan->channel = ain[0];
-               chan->channel2 = ain[1];
-
-               chan++;
+               chan[channel] = ad7124_channel_template;
+               chan[channel].address = channel;
+               chan[channel].scan_index = channel;
+               chan[channel].channel = ain[0];
+               chan[channel].channel2 = ain[1];
        }
 
        return 0;
index fa4586037bb8ebcfaf0d978619e5d2475bbf85f1..0b91de4df8f4609daf0ad14d80db9f3665d38d34 100644 (file)
@@ -65,6 +65,7 @@ config IAQCORE
 config PMS7003
        tristate "Plantower PMS7003 particulate matter sensor"
        depends on SERIAL_DEV_BUS
+       select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Plantower PMS7003 particulate
index a7d40c02ce6b53a7a2606b454acb283738f7f33a..b921dd9e108faa5bfd017a1a7550c5095ff0c924 100644 (file)
@@ -1301,7 +1301,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id,
 
        for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
                for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
-                       if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
+                       if (st_lsm6dsx_sensor_settings[i].id[j].name &&
+                           id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
                                break;
                }
                if (j < ST_LSM6DSX_MAX_ID)
index c193d64e52179f296f8c68c1af09fdd77b16b425..112225c0e4868b0fbf4d6d31b366b2c610b53f2d 100644 (file)
@@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
                                const unsigned long *mask, bool timestamp)
 {
        unsigned bytes = 0;
-       int length, i;
+       int length, i, largest = 0;
 
        /* How much space will the demuxed element take? */
        for_each_set_bit(i, mask,
@@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
                length = iio_storage_bytes_for_si(indio_dev, i);
                bytes = ALIGN(bytes, length);
                bytes += length;
+               largest = max(largest, length);
        }
 
        if (timestamp) {
                length = iio_storage_bytes_for_timestamp(indio_dev);
                bytes = ALIGN(bytes, length);
                bytes += length;
+               largest = max(largest, length);
        }
+
+       bytes = ALIGN(bytes, largest);
        return bytes;
 }
 
index 16dacea9eadfa540afd57c7889339fc03384f639..b0e241aaefb48863c28f06235bdab3b3ac221fa7 100644 (file)
@@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data)
        if (ret < 0)
                return ret;
 
-       data->al_scale = 24000;
        data->vcnl4200_al.reg = VCNL4200_AL_DATA;
        data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
        switch (id) {
@@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data)
                /* show 54ms in total. */
                data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
                data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+               data->al_scale = 24000;
                break;
        case VCNL4040_PROD_ID:
                /* Integration time is 80ms, add 10ms. */
                data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
                data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+               data->al_scale = 120000;
                break;
        }
        data->vcnl4200_al.last_measurement = ktime_set(0, 0);
index 9b6ca15a183cd880a77574c3d473c4df47203412..ad5112a2325f9f2b51d8407a538de99d2d6473bd 100644 (file)
@@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
        int rc;
 
        rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-       if (rc)
+       if (rc) {
                dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+               return rc;
+       }
 
        if (mr->pages) {
                rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
index 958c1ff9c515c46767f2a3a1d03a1687671ebf61..4d07d22bfa7b1523e5985258a4d8a20894b27dc9 100644 (file)
@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
                        /* Add qp to flush list of the CQ */
                        bnxt_qplib_add_flush_qp(qp);
                } else {
+                       /* Before we complete, do WA 9060 */
+                       if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+                                     cqe_sq_cons)) {
+                               *lib_qp = qp;
+                               goto out;
+                       }
                        if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
-                               /* Before we complete, do WA 9060 */
-                               if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
-                                             cqe_sq_cons)) {
-                                       *lib_qp = qp;
-                                       goto out;
-                               }
                                cqe->status = CQ_REQ_STATUS_OK;
                                cqe++;
                                (*budget)--;
index adb4a1ba921b8cb4d593568f9445674bb0f7a3e4..5836fe7b28177b2ea2caea38d5742061e15dac8d 100644 (file)
@@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
 void iowait_cancel_work(struct iowait *w)
 {
        cancel_work_sync(&iowait_get_ib_work(w)->iowork);
-       cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+       /* Make sure that the iowork for TID RDMA is used */
+       if (iowait_get_tid_work(w)->iowork.func)
+               cancel_work_sync(&iowait_get_tid_work(w)->iowork);
 }
 
 /**
index e53f542b60af8a4da8201ae00286aa3765ecfdab..8a2e0d9351e91e569da79b011f2deb2619365a27 100644 (file)
@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
                         */
                        fpsn = full_flow_psn(flow, flow->flow_state.spsn);
                        req->r_ack_psn = psn;
+                       /*
+                        * If resync_psn points to the last flow PSN for a
+                        * segment and the new segment (likely from a new
+                        * request) starts with a new generation number, we
+                        * need to adjust resync_psn accordingly.
+                        */
+                       if (flow->flow_state.generation !=
+                           (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
+                               resync_psn = mask_psn(fpsn - 1);
                        flow->resync_npkts +=
                                delta_psn(mask_psn(resync_psn + 1), fpsn);
                        /*
index 86375947bc67a9550cb26658999ee0908d798254..dbd96d029d8bd073c88229ba14ffc15d6feb7094 100644 (file)
@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
 static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
        struct i40iw_ucontext *ucontext;
-       u64 db_addr_offset;
-       u64 push_offset;
+       u64 db_addr_offset, push_offset, pfn;
 
        ucontext = to_ucontext(context);
        if (ucontext->iwdev->sc_dev.is_pf) {
@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 
        if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               vma->vm_private_data = ucontext;
        } else {
                if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
                        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        }
 
-       if (io_remap_pfn_range(vma, vma->vm_start,
-                              vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
-                              PAGE_SIZE, vma->vm_page_prot))
-               return -EAGAIN;
+       pfn = vma->vm_pgoff +
+             (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
+              PAGE_SHIFT);
 
-       return 0;
+       return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+                                vma->vm_page_prot, NULL);
 }
 
 /**
index d7dd6fcf2db05a8841e60a4089dd71212d15c476..f918fca9ada3206b45dfef02a0a69b834107f071 100644 (file)
@@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client,
                 */
                client->tail = (client->head - 2) & (client->bufsize - 1);
 
-               client->buffer[client->tail].input_event_sec =
-                                               event->input_event_sec;
-               client->buffer[client->tail].input_event_usec =
-                                               event->input_event_usec;
-               client->buffer[client->tail].type = EV_SYN;
-               client->buffer[client->tail].code = SYN_DROPPED;
-               client->buffer[client->tail].value = 0;
+               client->buffer[client->tail] = (struct input_event) {
+                       .input_event_sec = event->input_event_sec,
+                       .input_event_usec = event->input_event_usec,
+                       .type = EV_SYN,
+                       .code = SYN_DROPPED,
+                       .value = 0,
+               };
 
                client->packet_head = client->tail;
        }
index 55086279d044e752e2e009639d36bb89e2462f4d..ee6c3234df3634800f257207b3970517d5e68384 100644 (file)
@@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev,
                }
        }
 
-       __clear_bit(*old_keycode, dev->keybit);
-       __set_bit(ke->keycode, dev->keybit);
-
-       for (i = 0; i < dev->keycodemax; i++) {
-               if (input_fetch_keycode(dev, i) == *old_keycode) {
-                       __set_bit(*old_keycode, dev->keybit);
-                       break; /* Setting the bit twice is useless, so break */
+       if (*old_keycode <= KEY_MAX) {
+               __clear_bit(*old_keycode, dev->keybit);
+               for (i = 0; i < dev->keycodemax; i++) {
+                       if (input_fetch_keycode(dev, i) == *old_keycode) {
+                               __set_bit(*old_keycode, dev->keybit);
+                               /* Setting the bit twice is useless, so break */
+                               break;
+                       }
                }
        }
 
+       __set_bit(ke->keycode, dev->keybit);
        return 0;
 }
 
@@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev,
         * Simulate keyup event if keycode is not present
         * in the keymap anymore
         */
-       if (test_bit(EV_KEY, dev->evbit) &&
-           !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
-           __test_and_clear_bit(old_keycode, dev->key)) {
+       if (old_keycode > KEY_MAX) {
+               dev_warn(dev->dev.parent ?: &dev->dev,
+                        "%s: got too big old keycode %#x\n",
+                        __func__, old_keycode);
+       } else if (test_bit(EV_KEY, dev->evbit) &&
+                  !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+                  __test_and_clear_bit(old_keycode, dev->key)) {
                struct input_value vals[] =  {
                        { EV_KEY, old_keycode, 0 },
                        input_value_sync
index 53799527dc75b27b57a48639af7a8d71af134ffa..9f809aeb785c4927d60009d233610d313fc07e9d 100644 (file)
@@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work)
                return;
        }
 
-       state = (bool)msg.state;
+       /*
+        * The response data from SCU firmware is 4 bytes,
+        * but ONLY the first byte is the key state, other
+        * 3 bytes could be some dirty data, so we should
+        * ONLY take the first byte as key state.
+        */
+       state = (bool)(msg.state & 0xff);
 
        if (state ^ priv->keystate) {
                priv->keystate = state;
index fd253781be711d306a4f0f41ffa59bac1a284000..f2593133e5247a8fbc6cebfc296a2aedeb0c87ff 100644 (file)
@@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev,
        struct uinput_device    *udev = input_get_drvdata(dev);
        struct timespec64       ts;
 
-       udev->buff[udev->head].type = type;
-       udev->buff[udev->head].code = code;
-       udev->buff[udev->head].value = value;
        ktime_get_ts64(&ts);
-       udev->buff[udev->head].input_event_sec = ts.tv_sec;
-       udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+       udev->buff[udev->head] = (struct input_event) {
+               .input_event_sec = ts.tv_sec,
+               .input_event_usec = ts.tv_nsec / NSEC_PER_USEC,
+               .type = type,
+               .code = code,
+               .value = value,
+       };
+
        udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
 
        wake_up_interruptible(&udev->waitq);
@@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
 static __poll_t uinput_poll(struct file *file, poll_table *wait)
 {
        struct uinput_device *udev = file->private_data;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */
 
        poll_wait(file, &udev->waitq, wait);
 
        if (udev->head != udev->tail)
-               return EPOLLIN | EPOLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
-       return EPOLLOUT | EPOLLWRNORM;
+       return mask;
 }
 
 static int uinput_release(struct inode *inode, struct file *file)
index c4486db105af01bfb5175dea3e84492254d86870..d2fade984999776edbfe361ff457fce862abb97a 100644 (file)
@@ -214,6 +214,7 @@ config INTEL_IOMMU_SVM
        select PCI_PASID
        select PCI_PRI
        select MMU_NOTIFIER
+       select IOASID
        help
          Shared Virtual Memory (SVM) provides a facility for devices
          to access DMA resources through process address space by
@@ -248,6 +249,18 @@ config INTEL_IOMMU_FLOPPY_WA
          workaround will setup a 1:1 mapping for the first
          16MiB to make floppy (an ISA device) work.
 
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+       bool "Enable Intel IOMMU scalable mode by default"
+       depends on INTEL_IOMMU
+       help
+         Selecting this option will enable by default the scalable mode if
+         hardware presents the capability. The scalable mode is defined in
+         VT-d 3.0. The scalable mode capability could be checked by reading
+         /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+         is not selected, scalable mode support could also be enabled by
+         passing intel_iommu=sm_on to the kernel. If not sure, please use
+         the default value.
+
 config IRQ_REMAP
        bool "Support for Interrupt Remapping"
        depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
@@ -356,7 +369,7 @@ config SPAPR_TCE_IOMMU
 
 # ARM IOMMU support
 config ARM_SMMU
-       bool "ARM Ltd. System MMU (SMMU) Support"
+       tristate "ARM Ltd. System MMU (SMMU) Support"
        depends on (ARM64 || ARM) && MMU
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
@@ -368,6 +381,18 @@ config ARM_SMMU
          Say Y here if your SoC includes an IOMMU device implementing
          the ARM SMMU architecture.
 
+config ARM_SMMU_LEGACY_DT_BINDINGS
+       bool "Support the legacy \"mmu-masters\" devicetree bindings"
+       depends on ARM_SMMU=y && OF
+       help
+         Support for the badly designed and deprecated "mmu-masters"
+         devicetree bindings. This allows some DMA masters to attach
+         to the SMMU but does not provide any support via the DMA API.
+         If you're lucky, you might be able to get VFIO up and running.
+
+         If you say Y here then you'll make me very sad. Instead, say N
+         and move your firmware to the utopian future that was 2016.
+
 config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
        bool "Default to disabling bypass on ARM SMMU v1 and v2"
        depends on ARM_SMMU
@@ -394,7 +419,7 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
          config.
 
 config ARM_SMMU_V3
-       bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+       tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
        depends on ARM64
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
index 97814cc861eaa90f58e46128c8ac26e2ef1c7489..2104fb8afc0660c2516df9dd5a5d4b6c31d21b8b 100644 (file)
@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
+arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
 obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
index 2c336b87f34076e6a45a815ac1d8debcaa611d77..60538e9df8a63d0057b46e5dabcb234e53658c15 100644 (file)
@@ -2297,7 +2297,6 @@ int __init amd_iommu_init_api(void)
 int __init amd_iommu_init_dma_ops(void)
 {
        swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
-       iommu_detected = 1;
 
        if (amd_iommu_unmap_flush)
                pr_info("IO/TLB flush on unmap enabled\n");
index 568c52317757ca2924598b69c7329dcfad940356..587ed1323c8628c0b6747218b721b1f8a8321b5e 100644 (file)
@@ -71,6 +71,8 @@
 #define IVHD_FLAG_ISOC_EN_MASK          0x08
 
 #define IVMD_FLAG_EXCL_RANGE            0x08
+#define IVMD_FLAG_IW                    0x04
+#define IVMD_FLAG_IR                    0x02
 #define IVMD_FLAG_UNITY_MAP             0x01
 
 #define ACPI_DEVFLAG_INITPASS           0x01
@@ -147,7 +149,7 @@ bool amd_iommu_dump;
 bool amd_iommu_irq_remap __read_mostly;
 
 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
 
 static bool amd_iommu_detected;
 static bool __initdata amd_iommu_disabled;
@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
        writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
        writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 
-       iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+       iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
        iommu_feature_enable(iommu, CONTROL_PPR_EN);
 }
 
@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void)
  */
 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 {
-       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
        if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
                return;
 
-       if (iommu) {
-               /*
-                * We only can configure exclusion ranges per IOMMU, not
-                * per device. But we can enable the exclusion range per
-                * device. This is done here
-                */
-               set_dev_entry_bit(devid, DEV_ENTRY_EX);
-               iommu->exclusion_start = m->range_start;
-               iommu->exclusion_length = m->range_length;
-       }
+       /*
+        * Treat per-device exclusion ranges as r/w unity-mapped regions
+        * since some buggy BIOSes might lead to the overwritten exclusion
+        * range (exclusion_start and exclusion_length members). This
+        * happens when there are multiple exclusion ranges (IVMD entries)
+        * defined in ACPI table.
+        */
+       m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
 }
 
 /*
@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
                        iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
                if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
                        amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-               if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
-                       amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
                break;
        case 0x11:
        case 0x40:
@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
                        iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
                if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
                        amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
-               if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
-                       amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+               /*
+                * Note: Since iommu_update_intcapxt() leverages
+                * the IOMMU MMIO access to MSI capability block registers
+                * for MSI address lo/hi/data, we need to check both
+                * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
+                */
+               if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
+                   (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
+                       amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
                break;
        default:
                return -EINVAL;
@@ -1655,27 +1658,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
 {
        struct pci_dev *pdev = iommu->dev;
-       u64 val = 0xabcd, val2 = 0;
+       u64 val = 0xabcd, val2 = 0, save_reg = 0;
 
        if (!iommu_feature(iommu, FEATURE_PC))
                return;
 
        amd_iommu_pc_present = true;
 
+       /* save the value to restore, if writable */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+               goto pc_false;
+
        /* Check if the performance counters can be written to */
        if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
            (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
-           (val != val2)) {
-               pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
-               amd_iommu_pc_present = false;
-               return;
-       }
+           (val != val2))
+               goto pc_false;
+
+       /* restore */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+               goto pc_false;
 
        pci_info(pdev, "IOMMU performance counters supported\n");
 
        val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
        iommu->max_banks = (u8) ((val >> 12) & 0x3f);
        iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+       return;
+
+pc_false:
+       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+       amd_iommu_pc_present = false;
+       return;
 }
 
 static ssize_t amd_iommu_show_cap(struct device *dev,
@@ -1715,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = {
 static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
        int cap_ptr = iommu->cap_ptr;
-       u32 range, misc, low, high;
        int ret;
 
        iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
@@ -1728,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
 
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
                              &iommu->cap);
-       pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
-                             &range);
-       pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
-                             &misc);
 
        if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
                amd_iommu_iotlb_sup = false;
 
        /* read extended feature bits */
-       low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
-       high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
-       iommu->features = ((u64)high << 32) | low;
+       iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
 
        if (iommu_feature(iommu, FEATURE_GT)) {
                int glxval;
@@ -1984,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
        struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
 
        /**
-        * IntCapXT requires XTSup=1, which can be inferred
-        * amd_iommu_xt_mode.
+        * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
+        * which can be inferred from amd_iommu_xt_mode.
         */
        if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
                return 0;
@@ -2032,7 +2039,7 @@ static int iommu_init_msi(struct amd_iommu *iommu)
        iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
        if (iommu->ppr_log != NULL)
-               iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+               iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
 
        iommu_ga_log_enable(iommu);
 
index f52f59d5c6bd44bd8e08657698b476c9d9c3a847..f8d01d6b00da7d9f1e94f7819888e7200019ad06 100644 (file)
 #define CONTROL_COHERENT_EN     0x0aULL
 #define CONTROL_ISOC_EN         0x0bULL
 #define CONTROL_CMDBUF_EN       0x0cULL
-#define CONTROL_PPFLOG_EN       0x0dULL
-#define CONTROL_PPFINT_EN       0x0eULL
+#define CONTROL_PPRLOG_EN       0x0dULL
+#define CONTROL_PPRINT_EN       0x0eULL
 #define CONTROL_PPR_EN          0x0fULL
 #define CONTROL_GT_EN           0x10ULL
 #define CONTROL_GA_EN           0x11ULL
 #define IOMMU_CAP_EFR     27
 
 /* IOMMU Feature Reporting Field (for IVHD type 10h */
-#define IOMMU_FEAT_XTSUP_SHIFT 0
 #define IOMMU_FEAT_GASUP_SHIFT 6
 
 /* IOMMU Extended Feature Register (EFR) */
 #define IOMMU_EFR_XTSUP_SHIFT  2
 #define IOMMU_EFR_GASUP_SHIFT  7
+#define IOMMU_EFR_MSICAPMMIOSUP_SHIFT  46
 
 #define MAX_DOMAIN_ID 65536
 
@@ -463,7 +463,6 @@ struct amd_irte_ops;
  * independent of their use.
  */
 struct protection_domain {
-       struct list_head list;  /* for list of all protection domains */
        struct list_head dev_list; /* List of all devices in this domain */
        struct iommu_domain domain; /* generic domain handle used by
                                       iommu core code */
index b2fe72a8f019324fc61c155f7a308e8c23c91d04..74d97a886e9315dfc6418dd9870d214864c15077 100644 (file)
@@ -119,7 +119,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
         * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
         */
        reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
-       major = FIELD_GET(ID7_MAJOR, reg);
+       major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
        reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
        if (major >= 2)
                reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
index 7f5b74a418de62d5ac65afa0d4765e5a8ddb8973..aa3ac2a03807f6db12f9546a3eb65a90547d962b 100644 (file)
@@ -21,8 +21,7 @@
 #include <linux/io-pgtable.h>
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 
 #define STRTAB_STE_0_S1FMT             GENMASK_ULL(5, 4)
 #define STRTAB_STE_0_S1FMT_LINEAR      0
+#define STRTAB_STE_0_S1FMT_64K_L2      2
 #define STRTAB_STE_0_S1CTXPTR_MASK     GENMASK_ULL(51, 6)
 #define STRTAB_STE_0_S1CDMAX           GENMASK_ULL(63, 59)
 
+#define STRTAB_STE_1_S1DSS             GENMASK_ULL(1, 0)
+#define STRTAB_STE_1_S1DSS_TERMINATE   0x0
+#define STRTAB_STE_1_S1DSS_BYPASS      0x1
+#define STRTAB_STE_1_S1DSS_SSID0       0x2
+
 #define STRTAB_STE_1_S1C_CACHE_NC      0UL
 #define STRTAB_STE_1_S1C_CACHE_WBRA    1UL
 #define STRTAB_STE_1_S1C_CACHE_WT      2UL
 
 #define STRTAB_STE_2_S2VMID            GENMASK_ULL(15, 0)
 #define STRTAB_STE_2_VTCR              GENMASK_ULL(50, 32)
+#define STRTAB_STE_2_VTCR_S2T0SZ       GENMASK_ULL(5, 0)
+#define STRTAB_STE_2_VTCR_S2SL0                GENMASK_ULL(7, 6)
+#define STRTAB_STE_2_VTCR_S2IR0                GENMASK_ULL(9, 8)
+#define STRTAB_STE_2_VTCR_S2OR0                GENMASK_ULL(11, 10)
+#define STRTAB_STE_2_VTCR_S2SH0                GENMASK_ULL(13, 12)
+#define STRTAB_STE_2_VTCR_S2TG         GENMASK_ULL(15, 14)
+#define STRTAB_STE_2_VTCR_S2PS         GENMASK_ULL(18, 16)
 #define STRTAB_STE_2_S2AA64            (1UL << 51)
 #define STRTAB_STE_2_S2ENDI            (1UL << 52)
 #define STRTAB_STE_2_S2PTW             (1UL << 54)
 
 #define STRTAB_STE_3_S2TTB_MASK                GENMASK_ULL(51, 4)
 
-/* Context descriptor (stage-1 only) */
+/*
+ * Context descriptors.
+ *
+ * Linear: when less than 1024 SSIDs are supported
+ * 2lvl: at most 1024 L1 entries,
+ *       1024 lazy entries per table.
+ */
+#define CTXDESC_SPLIT                  10
+#define CTXDESC_L2_ENTRIES             (1 << CTXDESC_SPLIT)
+
+#define CTXDESC_L1_DESC_DWORDS         1
+#define CTXDESC_L1_DESC_V              (1UL << 0)
+#define CTXDESC_L1_DESC_L2PTR_MASK     GENMASK_ULL(51, 12)
+
 #define CTXDESC_CD_DWORDS              8
 #define CTXDESC_CD_0_TCR_T0SZ          GENMASK_ULL(5, 0)
-#define ARM64_TCR_T0SZ                 GENMASK_ULL(5, 0)
 #define CTXDESC_CD_0_TCR_TG0           GENMASK_ULL(7, 6)
-#define ARM64_TCR_TG0                  GENMASK_ULL(15, 14)
 #define CTXDESC_CD_0_TCR_IRGN0         GENMASK_ULL(9, 8)
-#define ARM64_TCR_IRGN0                        GENMASK_ULL(9, 8)
 #define CTXDESC_CD_0_TCR_ORGN0         GENMASK_ULL(11, 10)
-#define ARM64_TCR_ORGN0                        GENMASK_ULL(11, 10)
 #define CTXDESC_CD_0_TCR_SH0           GENMASK_ULL(13, 12)
-#define ARM64_TCR_SH0                  GENMASK_ULL(13, 12)
 #define CTXDESC_CD_0_TCR_EPD0          (1ULL << 14)
-#define ARM64_TCR_EPD0                 (1ULL << 7)
 #define CTXDESC_CD_0_TCR_EPD1          (1ULL << 30)
-#define ARM64_TCR_EPD1                 (1ULL << 23)
 
 #define CTXDESC_CD_0_ENDI              (1UL << 15)
 #define CTXDESC_CD_0_V                 (1UL << 31)
 
 #define CTXDESC_CD_0_TCR_IPS           GENMASK_ULL(34, 32)
-#define ARM64_TCR_IPS                  GENMASK_ULL(34, 32)
 #define CTXDESC_CD_0_TCR_TBI0          (1ULL << 38)
-#define ARM64_TCR_TBI0                 (1ULL << 37)
 
 #define CTXDESC_CD_0_AA64              (1UL << 41)
 #define CTXDESC_CD_0_S                 (1UL << 44)
 
 #define CTXDESC_CD_1_TTB0_MASK         GENMASK_ULL(51, 4)
 
-/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
-#define ARM_SMMU_TCR2CD(tcr, fld)      FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
-                                       FIELD_GET(ARM64_TCR_##fld, tcr))
+/*
+ * When the SMMU only supports linear context descriptor tables, pick a
+ * reasonable size limit (64kB).
+ */
+#define CTXDESC_LINEAR_CDMAX           ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
 
 /* Command queue */
 #define CMDQ_ENT_SZ_SHIFT              4
 #define CMDQ_PREFETCH_1_SIZE           GENMASK_ULL(4, 0)
 #define CMDQ_PREFETCH_1_ADDR_MASK      GENMASK_ULL(63, 12)
 
+#define CMDQ_CFGI_0_SSID               GENMASK_ULL(31, 12)
 #define CMDQ_CFGI_0_SID                        GENMASK_ULL(63, 32)
 #define CMDQ_CFGI_1_LEAF               (1UL << 0)
 #define CMDQ_CFGI_1_RANGE              GENMASK_ULL(4, 0)
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
 
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param_named here.
- */
 static bool disable_bypass = 1;
 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
@@ -440,8 +455,11 @@ struct arm_smmu_cmdq_ent {
 
                #define CMDQ_OP_CFGI_STE        0x3
                #define CMDQ_OP_CFGI_ALL        0x4
+               #define CMDQ_OP_CFGI_CD         0x5
+               #define CMDQ_OP_CFGI_CD_ALL     0x6
                struct {
                        u32                     sid;
+                       u32                     ssid;
                        union {
                                bool            leaf;
                                u8              span;
@@ -547,16 +565,30 @@ struct arm_smmu_strtab_l1_desc {
        dma_addr_t                      l2ptr_dma;
 };
 
+struct arm_smmu_ctx_desc {
+       u16                             asid;
+       u64                             ttbr;
+       u64                             tcr;
+       u64                             mair;
+};
+
+struct arm_smmu_l1_ctx_desc {
+       __le64                          *l2ptr;
+       dma_addr_t                      l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc_cfg {
+       __le64                          *cdtab;
+       dma_addr_t                      cdtab_dma;
+       struct arm_smmu_l1_ctx_desc     *l1_desc;
+       unsigned int                    num_l1_ents;
+};
+
 struct arm_smmu_s1_cfg {
-       __le64                          *cdptr;
-       dma_addr_t                      cdptr_dma;
-
-       struct arm_smmu_ctx_desc {
-               u16     asid;
-               u64     ttbr;
-               u64     tcr;
-               u64     mair;
-       }                               cd;
+       struct arm_smmu_ctx_desc_cfg    cdcfg;
+       struct arm_smmu_ctx_desc        cd;
+       u8                              s1fmt;
+       u8                              s1cdmax;
 };
 
 struct arm_smmu_s2_cfg {
@@ -638,6 +670,7 @@ struct arm_smmu_master {
        u32                             *sids;
        unsigned int                    num_sids;
        bool                            ats_enabled;
+       unsigned int                    ssid_bits;
 };
 
 /* SMMU private data for an IOMMU domain */
@@ -847,15 +880,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
                cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
                cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
                break;
+       case CMDQ_OP_CFGI_CD:
+               cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
+               /* Fallthrough */
        case CMDQ_OP_CFGI_STE:
                cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
                cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
                break;
+       case CMDQ_OP_CFGI_CD_ALL:
+               cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+               break;
        case CMDQ_OP_CFGI_ALL:
                /* Cover the entire SID range */
                cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
                break;
        case CMDQ_OP_TLBI_NH_VA:
+               cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
                cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
                cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
                cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
@@ -1443,50 +1483,238 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
 }
 
 /* Context descriptor manipulation functions */
-static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
+static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+                            int ssid, bool leaf)
 {
-       u64 val = 0;
+       size_t i;
+       unsigned long flags;
+       struct arm_smmu_master *master;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_cmdq_ent cmd = {
+               .opcode = CMDQ_OP_CFGI_CD,
+               .cfgi   = {
+                       .ssid   = ssid,
+                       .leaf   = leaf,
+               },
+       };
+
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+               for (i = 0; i < master->num_sids; i++) {
+                       cmd.cfgi.sid = master->sids[i];
+                       arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+               }
+       }
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+       arm_smmu_cmdq_issue_sync(smmu);
+}
 
-       /* Repack the TCR. Just care about TTBR0 for now */
-       val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
-       val |= ARM_SMMU_TCR2CD(tcr, TG0);
-       val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
-       val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
-       val |= ARM_SMMU_TCR2CD(tcr, SH0);
-       val |= ARM_SMMU_TCR2CD(tcr, EPD0);
-       val |= ARM_SMMU_TCR2CD(tcr, EPD1);
-       val |= ARM_SMMU_TCR2CD(tcr, IPS);
+static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+                                       struct arm_smmu_l1_ctx_desc *l1_desc)
+{
+       size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
 
-       return val;
+       l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
+                                            &l1_desc->l2ptr_dma, GFP_KERNEL);
+       if (!l1_desc->l2ptr) {
+               dev_warn(smmu->dev,
+                        "failed to allocate context descriptor table\n");
+               return -ENOMEM;
+       }
+       return 0;
 }
 
-static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
-                                   struct arm_smmu_s1_cfg *cfg)
+static void arm_smmu_write_cd_l1_desc(__le64 *dst,
+                                     struct arm_smmu_l1_ctx_desc *l1_desc)
 {
-       u64 val;
+       u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
+                 CTXDESC_L1_DESC_V;
 
+       WRITE_ONCE(*dst, cpu_to_le64(val));
+}
+
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+                                  u32 ssid)
+{
+       __le64 *l1ptr;
+       unsigned int idx;
+       struct arm_smmu_l1_ctx_desc *l1_desc;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+       if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+               return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+
+       idx = ssid >> CTXDESC_SPLIT;
+       l1_desc = &cdcfg->l1_desc[idx];
+       if (!l1_desc->l2ptr) {
+               if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+                       return NULL;
+
+               l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+               arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+               /* An invalid L1CD can be cached */
+               arm_smmu_sync_cd(smmu_domain, ssid, false);
+       }
+       idx = ssid & (CTXDESC_L2_ENTRIES - 1);
+       return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+}
+
+static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+                                  int ssid, struct arm_smmu_ctx_desc *cd)
+{
        /*
-        * We don't need to issue any invalidation here, as we'll invalidate
-        * the STE when installing the new entry anyway.
+        * This function handles the following cases:
+        *
+        * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+        * (2) Install a secondary CD, for SID+SSID traffic.
+        * (3) Update ASID of a CD. Atomically write the first 64 bits of the
+        *     CD, then invalidate the old entry and mappings.
+        * (4) Remove a secondary CD.
         */
-       val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
+       u64 val;
+       bool cd_live;
+       __le64 *cdptr;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+       if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+               return -E2BIG;
+
+       cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+       if (!cdptr)
+               return -ENOMEM;
+
+       val = le64_to_cpu(cdptr[0]);
+       cd_live = !!(val & CTXDESC_CD_0_V);
+
+       if (!cd) { /* (4) */
+               val = 0;
+       } else if (cd_live) { /* (3) */
+               val &= ~CTXDESC_CD_0_ASID;
+               val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
+               /*
+                * Until CD+TLB invalidation, both ASIDs may be used for tagging
+                * this substream's traffic
+                */
+       } else { /* (1) and (2) */
+               cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+               cdptr[2] = 0;
+               cdptr[3] = cpu_to_le64(cd->mair);
+
+               /*
+                * STE is live, and the SMMU might read dwords of this CD in any
+                * order. Ensure that it observes valid values before reading
+                * V=1.
+                */
+               arm_smmu_sync_cd(smmu_domain, ssid, true);
+
+               val = cd->tcr |
 #ifdef __BIG_ENDIAN
-             CTXDESC_CD_0_ENDI |
+                       CTXDESC_CD_0_ENDI |
 #endif
-             CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
-             CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
-             CTXDESC_CD_0_V;
+                       CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+                       CTXDESC_CD_0_AA64 |
+                       FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
+                       CTXDESC_CD_0_V;
+
+               /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+               if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+                       val |= CTXDESC_CD_0_S;
+       }
+
+       /*
+        * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
+        * "Configuration structures and configuration invalidation completion"
+        *
+        *   The size of single-copy atomic reads made by the SMMU is
+        *   IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
+        *   field within an aligned 64-bit span of a structure can be altered
+        *   without first making the structure invalid.
+        */
+       WRITE_ONCE(cdptr[0], cpu_to_le64(val));
+       arm_smmu_sync_cd(smmu_domain, ssid, true);
+       return 0;
+}
+
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+       int ret;
+       size_t l1size;
+       size_t max_contexts;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+       struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+
+       max_contexts = 1 << cfg->s1cdmax;
+
+       if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
+           max_contexts <= CTXDESC_L2_ENTRIES) {
+               cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+               cdcfg->num_l1_ents = max_contexts;
+
+               l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+       } else {
+               cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+               cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+                                                 CTXDESC_L2_ENTRIES);
+
+               cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
+                                             sizeof(*cdcfg->l1_desc),
+                                             GFP_KERNEL);
+               if (!cdcfg->l1_desc)
+                       return -ENOMEM;
+
+               l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+       }
 
-       /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
-       if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
-               val |= CTXDESC_CD_0_S;
+       cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+                                          GFP_KERNEL);
+       if (!cdcfg->cdtab) {
+               dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+               ret = -ENOMEM;
+               goto err_free_l1;
+       }
+
+       return 0;
+
+err_free_l1:
+       if (cdcfg->l1_desc) {
+               devm_kfree(smmu->dev, cdcfg->l1_desc);
+               cdcfg->l1_desc = NULL;
+       }
+       return ret;
+}
+
+static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+{
+       int i;
+       size_t size, l1size;
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+
+       if (cdcfg->l1_desc) {
+               size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+
+               for (i = 0; i < cdcfg->num_l1_ents; i++) {
+                       if (!cdcfg->l1_desc[i].l2ptr)
+                               continue;
 
-       cfg->cdptr[0] = cpu_to_le64(val);
+                       dmam_free_coherent(smmu->dev, size,
+                                          cdcfg->l1_desc[i].l2ptr,
+                                          cdcfg->l1_desc[i].l2ptr_dma);
+               }
+               devm_kfree(smmu->dev, cdcfg->l1_desc);
+               cdcfg->l1_desc = NULL;
 
-       val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
-       cfg->cdptr[1] = cpu_to_le64(val);
+               l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+       } else {
+               l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+       }
 
-       cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
+       dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
+       cdcfg->cdtab_dma = 0;
+       cdcfg->cdtab = NULL;
 }
 
 /* Stream table manipulation functions */
@@ -1608,6 +1836,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
        if (s1_cfg) {
                BUG_ON(ste_live);
                dst[1] = cpu_to_le64(
+                        FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
                         FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
                         FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
                         FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
@@ -1617,8 +1846,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                   !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
                        dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
 
-               val |= (s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
-                       FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
+               val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+                       FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+                       FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
+                       FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
        }
 
        if (s2_cfg) {
@@ -1642,7 +1873,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                                                 STRTAB_STE_1_EATS_TRANS));
 
        arm_smmu_sync_ste_for_sid(smmu, sid);
-       dst[0] = cpu_to_le64(val);
+       /* See comment in arm_smmu_write_ctx_desc() */
+       WRITE_ONCE(dst[0], cpu_to_le64(val));
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
@@ -1675,7 +1907,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
 
        desc->span = STRTAB_SPLIT + 1;
        desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
-                                         GFP_KERNEL | __GFP_ZERO);
+                                         GFP_KERNEL);
        if (!desc->l2ptr) {
                dev_err(smmu->dev,
                        "failed to allocate l2 stream table for SID %u\n",
@@ -2131,12 +2363,8 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
 
-               if (cfg->cdptr) {
-                       dmam_free_coherent(smmu_domain->smmu->dev,
-                                          CTXDESC_CD_DWORDS << 3,
-                                          cfg->cdptr,
-                                          cfg->cdptr_dma);
-
+               if (cfg->cdcfg.cdtab) {
+                       arm_smmu_free_cd_tables(smmu_domain);
                        arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
                }
        } else {
@@ -2149,55 +2377,82 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 }
 
 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+                                      struct arm_smmu_master *master,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int ret;
        int asid;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+       typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
        asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
        if (asid < 0)
                return asid;
 
-       cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
-                                        &cfg->cdptr_dma,
-                                        GFP_KERNEL | __GFP_ZERO);
-       if (!cfg->cdptr) {
-               dev_warn(smmu->dev, "failed to allocate context descriptor\n");
-               ret = -ENOMEM;
+       cfg->s1cdmax = master->ssid_bits;
+
+       ret = arm_smmu_alloc_cd_tables(smmu_domain);
+       if (ret)
                goto out_free_asid;
-       }
 
        cfg->cd.asid    = (u16)asid;
-       cfg->cd.ttbr    = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-       cfg->cd.tcr     = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+       cfg->cd.ttbr    = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+       cfg->cd.tcr     = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
+                         CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
        cfg->cd.mair    = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+
+       /*
+        * Note that this will end up calling arm_smmu_sync_cd() before
+        * the master has been added to the devices list for this domain.
+        * This isn't an issue because the STE hasn't been installed yet.
+        */
+       ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
+       if (ret)
+               goto out_free_cd_tables;
+
        return 0;
 
+out_free_cd_tables:
+       arm_smmu_free_cd_tables(smmu_domain);
 out_free_asid:
        arm_smmu_bitmap_free(smmu->asid_map, asid);
        return ret;
 }
 
 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+                                      struct arm_smmu_master *master,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int vmid;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+       typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
 
        vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
        if (vmid < 0)
                return vmid;
 
+       vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
        cfg->vmid       = (u16)vmid;
        cfg->vttbr      = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
-       cfg->vtcr       = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+       cfg->vtcr       = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+                         FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
        return 0;
 }
 
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct iommu_domain *domain,
+                                   struct arm_smmu_master *master)
 {
        int ret;
        unsigned long ias, oas;
@@ -2205,6 +2460,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
        struct io_pgtable_cfg pgtbl_cfg;
        struct io_pgtable_ops *pgtbl_ops;
        int (*finalise_stage_fn)(struct arm_smmu_domain *,
+                                struct arm_smmu_master *,
                                 struct io_pgtable_cfg *);
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -2259,7 +2515,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
        domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
        domain->geometry.force_aperture = true;
 
-       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+       ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
        if (ret < 0) {
                free_io_pgtable_ops(pgtbl_ops);
                return ret;
@@ -2412,7 +2668,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 
        if (!smmu_domain->smmu) {
                smmu_domain->smmu = smmu;
-               ret = arm_smmu_domain_finalise(domain);
+               ret = arm_smmu_domain_finalise(domain, master);
                if (ret) {
                        smmu_domain->smmu = NULL;
                        goto out_unlock;
@@ -2424,6 +2680,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                        dev_name(smmu->dev));
                ret = -ENXIO;
                goto out_unlock;
+       } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
+                  master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+               dev_err(dev,
+                       "cannot attach to incompatible domain (%u SSID bits != %u)\n",
+                       smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
+               ret = -EINVAL;
+               goto out_unlock;
        }
 
        master->domain = smmu_domain;
@@ -2431,9 +2694,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
                master->ats_enabled = arm_smmu_ats_supported(master);
 
-       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
-               arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
-
        arm_smmu_install_ste_for_dev(master);
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -2534,51 +2794,66 @@ static int arm_smmu_add_device(struct device *dev)
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return -ENODEV;
-       /*
-        * We _can_ actually withstand dodgy bus code re-calling add_device()
-        * without an intervening remove_device()/of_xlate() sequence, but
-        * we're not going to do so quietly...
-        */
-       if (WARN_ON_ONCE(fwspec->iommu_priv)) {
-               master = fwspec->iommu_priv;
-               smmu = master->smmu;
-       } else {
-               smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-               if (!smmu)
-                       return -ENODEV;
-               master = kzalloc(sizeof(*master), GFP_KERNEL);
-               if (!master)
-                       return -ENOMEM;
 
-               master->dev = dev;
-               master->smmu = smmu;
-               master->sids = fwspec->ids;
-               master->num_sids = fwspec->num_ids;
-               fwspec->iommu_priv = master;
-       }
+       if (WARN_ON_ONCE(fwspec->iommu_priv))
+               return -EBUSY;
+
+       smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
+       if (!smmu)
+               return -ENODEV;
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return -ENOMEM;
+
+       master->dev = dev;
+       master->smmu = smmu;
+       master->sids = fwspec->ids;
+       master->num_sids = fwspec->num_ids;
+       fwspec->iommu_priv = master;
 
        /* Check the SIDs are in range of the SMMU and our stream table */
        for (i = 0; i < master->num_sids; i++) {
                u32 sid = master->sids[i];
 
-               if (!arm_smmu_sid_in_range(smmu, sid))
-                       return -ERANGE;
+               if (!arm_smmu_sid_in_range(smmu, sid)) {
+                       ret = -ERANGE;
+                       goto err_free_master;
+               }
 
                /* Ensure l2 strtab is initialised */
                if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
                        ret = arm_smmu_init_l2_strtab(smmu, sid);
                        if (ret)
-                               return ret;
+                               goto err_free_master;
                }
        }
 
+       master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+
+       if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
+               master->ssid_bits = min_t(u8, master->ssid_bits,
+                                         CTXDESC_LINEAR_CDMAX);
+
+       ret = iommu_device_link(&smmu->iommu, dev);
+       if (ret)
+               goto err_free_master;
+
        group = iommu_group_get_for_dev(dev);
-       if (!IS_ERR(group)) {
-               iommu_group_put(group);
-               iommu_device_link(&smmu->iommu, dev);
+       if (IS_ERR(group)) {
+               ret = PTR_ERR(group);
+               goto err_unlink;
        }
 
-       return PTR_ERR_OR_ZERO(group);
+       iommu_group_put(group);
+       return 0;
+
+err_unlink:
+       iommu_device_unlink(&smmu->iommu, dev);
+err_free_master:
+       kfree(master);
+       fwspec->iommu_priv = NULL;
+       return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
@@ -2874,7 +3149,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
 
        l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
        strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
-                                    GFP_KERNEL | __GFP_ZERO);
+                                    GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
                        "failed to allocate l1 stream table (%u bytes)\n",
@@ -2901,7 +3176,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
 
        size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
        strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
-                                    GFP_KERNEL | __GFP_ZERO);
+                                    GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
                        "failed to allocate linear stream table (%u bytes)\n",
@@ -3561,6 +3836,43 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
                return SZ_128K;
 }
 
+static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
+{
+       int err;
+
+#ifdef CONFIG_PCI
+       if (pci_bus_type.iommu_ops != ops) {
+               err = bus_set_iommu(&pci_bus_type, ops);
+               if (err)
+                       return err;
+       }
+#endif
+#ifdef CONFIG_ARM_AMBA
+       if (amba_bustype.iommu_ops != ops) {
+               err = bus_set_iommu(&amba_bustype, ops);
+               if (err)
+                       goto err_reset_pci_ops;
+       }
+#endif
+       if (platform_bus_type.iommu_ops != ops) {
+               err = bus_set_iommu(&platform_bus_type, ops);
+               if (err)
+                       goto err_reset_amba_ops;
+       }
+
+       return 0;
+
+err_reset_amba_ops:
+#ifdef CONFIG_ARM_AMBA
+       bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+       bus_set_iommu(&pci_bus_type, NULL);
+#endif
+       return err;
+}
+
 static int arm_smmu_device_probe(struct platform_device *pdev)
 {
        int irq, ret;
@@ -3590,7 +3902,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 
        /* Base address */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) {
+       if (resource_size(res) < arm_smmu_resource_size(smmu)) {
                dev_err(dev, "MMIO region too small (%pr)\n", res);
                return -EINVAL;
        }
@@ -3651,48 +3963,45 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return ret;
        }
 
-#ifdef CONFIG_PCI
-       if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
-               pci_request_acs();
-               ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
-               if (ret)
-                       return ret;
-       }
-#endif
-#ifdef CONFIG_ARM_AMBA
-       if (amba_bustype.iommu_ops != &arm_smmu_ops) {
-               ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-               if (ret)
-                       return ret;
-       }
-#endif
-       if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
-               ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-               if (ret)
-                       return ret;
-       }
-       return 0;
+       return arm_smmu_set_bus_ops(&arm_smmu_ops);
 }
 
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
+       arm_smmu_set_bus_ops(NULL);
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
        arm_smmu_device_disable(smmu);
+
+       return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+       arm_smmu_device_remove(pdev);
 }
 
 static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "arm,smmu-v3", },
        { },
 };
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
 static struct platform_driver arm_smmu_driver = {
        .driver = {
-               .name           = "arm-smmu-v3",
-               .of_match_table = of_match_ptr(arm_smmu_of_match),
-               .suppress_bind_attrs = true,
+               .name                   = "arm-smmu-v3",
+               .of_match_table         = arm_smmu_of_match,
+               .suppress_bind_attrs    = true,
        },
        .probe  = arm_smmu_device_probe,
+       .remove = arm_smmu_device_remove,
        .shutdown = arm_smmu_device_shutdown,
 };
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu-v3");
+MODULE_LICENSE("GPL v2");
index 7da03eba6f89e0ae32e284a9d2761a965525047b..16c4b87af42bb42b519e529556e3f804c97b5e98 100644 (file)
@@ -27,8 +27,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #define MSI_IOVA_LENGTH                        0x100000
 
 static int force_stage;
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param() here.
- */
 module_param(force_stage, int, S_IRUGO);
 MODULE_PARM_DESC(force_stage,
        "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
@@ -131,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
+static int arm_smmu_bus_init(struct iommu_ops *ops);
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
        if (dev_is_pci(dev)) {
@@ -166,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
        return err == -ENOENT ? 0 : err;
 }
 
-static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
-
 static int arm_smmu_register_legacy_master(struct device *dev,
                                           struct arm_smmu_device **smmu)
 {
@@ -220,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev,
        return err;
 }
 
+/*
+ * With the legacy DT binding in play, we have no guarantees about
+ * probe order, but then we're also not doing default domains, so we can
+ * delay setting bus ops until we're sure every possible SMMU is ready,
+ * and that way ensure that no add_device() calls get missed.
+ */
+static int arm_smmu_legacy_bus_init(void)
+{
+       if (using_legacy_binding)
+               return arm_smmu_bus_init(&arm_smmu_ops);
+       return 0;
+}
+device_initcall_sync(arm_smmu_legacy_bus_init);
+#else
+static int arm_smmu_register_legacy_master(struct device *dev,
+                                          struct arm_smmu_device **smmu)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
+
 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
 {
        int idx;
@@ -252,7 +271,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
        for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
                for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
                        reg = arm_smmu_readl(smmu, page, status);
-                       if (!(reg & sTLBGSTATUS_GSACTIVE))
+                       if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
                                return;
                        cpu_relax();
                }
@@ -459,7 +478,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        int idx = smmu_domain->cfg.cbndx;
 
        fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
-       if (!(fsr & FSR_FAULT))
+       if (!(fsr & ARM_SMMU_FSR_FAULT))
                return IRQ_NONE;
 
        fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
@@ -491,7 +510,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
 
        if (__ratelimit(&rs)) {
                if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
-                   (gfsr & sGFSR_USF))
+                   (gfsr & ARM_SMMU_sGFSR_USF))
                        dev_err(smmu->dev,
                                "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
                                (u16)gfsynr1);
@@ -521,26 +540,28 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
                        cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
                } else {
-                       cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
-                       cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-                       cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
+                       cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
+                       cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
                        if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-                               cb->tcr[1] |= TCR2_AS;
+                               cb->tcr[1] |= ARM_SMMU_TCR2_AS;
+                       else
+                               cb->tcr[0] |= ARM_SMMU_TCR_EAE;
                }
        } else {
-               cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+               cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
        }
 
        /* TTBRs */
        if (stage1) {
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-                       cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
-                       cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+                       cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
+                       cb->ttbr[1] = 0;
                } else {
-                       cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-                       cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
-                       cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-                       cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
+                       cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+                       cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+                                                 cfg->asid);
+                       cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+                                                cfg->asid);
                }
        } else {
                cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -576,31 +597,33 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
        /* CBA2R */
        if (smmu->version > ARM_SMMU_V1) {
                if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
-                       reg = CBA2R_VA64;
+                       reg = ARM_SMMU_CBA2R_VA64;
                else
                        reg = 0;
                /* 16-bit VMIDs live in CBA2R */
                if (smmu->features & ARM_SMMU_FEAT_VMID16)
-                       reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
+                       reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
 
                arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
        }
 
        /* CBAR */
-       reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
+       reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
        if (smmu->version < ARM_SMMU_V2)
-               reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
+               reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
 
        /*
         * Use the weakest shareability/memory types, so they are
         * overridden by the ttbcr/pte.
         */
        if (stage1) {
-               reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
-                       FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
+               reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
+                                 ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
+                      FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
+                                 ARM_SMMU_CBAR_S1_MEMATTR_WB);
        } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
                /* 8-bit VMIDs live in CBAR */
-               reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
+               reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
        }
        arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
 
@@ -632,11 +655,12 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
        }
 
        /* SCTLR */
-       reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
+       reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
+             ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
        if (stage1)
-               reg |= SCTLR_S1_ASIDPNE;
+               reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
        if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
-               reg |= SCTLR_E;
+               reg |= ARM_SMMU_SCTLR_E;
 
        arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
 }
@@ -818,7 +842,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (ret < 0) {
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        cfg->irptndx, irq);
-               cfg->irptndx = INVALID_IRPTNDX;
+               cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
        }
 
        mutex_unlock(&smmu_domain->init_mutex);
@@ -856,7 +880,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        smmu->cbs[cfg->cbndx].cfg = NULL;
        arm_smmu_write_context_bank(smmu, cfg->cbndx);
 
-       if (cfg->irptndx != INVALID_IRPTNDX) {
+       if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
                irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
                devm_free_irq(smmu->dev, irq, domain);
        }
@@ -912,23 +936,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
 {
        struct arm_smmu_smr *smr = smmu->smrs + idx;
-       u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
+       u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
+                 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
 
        if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
-               reg |= SMR_VALID;
+               reg |= ARM_SMMU_SMR_VALID;
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
 }
 
 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
 {
        struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
-       u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
-                 FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
-                 FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
+       u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
+                 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
+                 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
 
        if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
            smmu->smrs[idx].valid)
-               reg |= S2CR_EXIDVALID;
+               reg |= ARM_SMMU_S2CR_EXIDVALID;
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
 }
 
@@ -946,24 +971,37 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
 {
        u32 smr;
+       int i;
 
        if (!smmu->smrs)
                return;
-
+       /*
+        * If we've had to accommodate firmware memory regions, we may
+        * have live SMRs by now; tread carefully...
+        *
+        * Somewhat perversely, not having a free SMR for this test implies we
+        * can get away without it anyway, as we'll only be able to 'allocate'
+        * these SMRs for the ID/mask values we're already trusting to be OK.
+        */
+       for (i = 0; i < smmu->num_mapping_groups; i++)
+               if (!smmu->smrs[i].valid)
+                       goto smr_ok;
+       return;
+smr_ok:
        /*
         * SMR.ID bits may not be preserved if the corresponding MASK
         * bits are set, so check each one separately. We can reject
         * masters later if they try to claim IDs outside these masks.
         */
-       smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
-       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
-       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
-       smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
+       smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+       smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
 
-       smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
-       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
-       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
-       smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
+       smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+       smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
 }
 
 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
@@ -1032,8 +1070,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
        mutex_lock(&smmu->stream_map_mutex);
        /* Figure out a viable stream map entry allocation */
        for_each_cfg_sme(fwspec, i, idx) {
-               u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
-               u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+               u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+               u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
 
                if (idx != INVALID_SMENDX) {
                        ret = -EEXIST;
@@ -1277,7 +1315,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
                arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
 
        reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
-       if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
+       if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE),
+                                     5, 50)) {
                spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
                dev_err(dev,
                        "iova to phys timed out on %pad. Falling back to software table walk.\n",
@@ -1287,7 +1326,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
 
        phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
        spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
-       if (phys & CB_PAR_F) {
+       if (phys & ARM_SMMU_CB_PAR_F) {
                dev_err(dev, "translation fault!\n");
                dev_err(dev, "PAR = 0x%llx\n", phys);
                return 0;
@@ -1368,8 +1407,8 @@ static int arm_smmu_add_device(struct device *dev)
 
        ret = -EINVAL;
        for (i = 0; i < fwspec->num_ids; i++) {
-               u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
-               u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
+               u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
+               u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
 
                if (sid & ~smmu->streamid_mask) {
                        dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
@@ -1550,12 +1589,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
        u32 mask, fwid = 0;
 
        if (args->args_count > 0)
-               fwid |= FIELD_PREP(SMR_ID, args->args[0]);
+               fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
 
        if (args->args_count > 1)
-               fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
+               fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
        else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
-               fwid |= FIELD_PREP(SMR_MASK, mask);
+               fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, mask);
 
        return iommu_fwspec_add_ids(dev, &fwid, 1);
 }
@@ -1616,7 +1655,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        /* Make sure all context banks are disabled and clear CB_FSR  */
        for (i = 0; i < smmu->num_context_banks; ++i) {
                arm_smmu_write_context_bank(smmu, i);
-               arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
+               arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
        }
 
        /* Invalidate the TLB, just in case */
@@ -1626,29 +1665,30 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
 
        /* Enable fault reporting */
-       reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+       reg |= (ARM_SMMU_sCR0_GFRE | ARM_SMMU_sCR0_GFIE |
+               ARM_SMMU_sCR0_GCFGFRE | ARM_SMMU_sCR0_GCFGFIE);
 
        /* Disable TLB broadcasting. */
-       reg |= (sCR0_VMIDPNE | sCR0_PTM);
+       reg |= (ARM_SMMU_sCR0_VMIDPNE | ARM_SMMU_sCR0_PTM);
 
        /* Enable client access, handling unmatched streams as appropriate */
-       reg &= ~sCR0_CLIENTPD;
+       reg &= ~ARM_SMMU_sCR0_CLIENTPD;
        if (disable_bypass)
-               reg |= sCR0_USFCFG;
+               reg |= ARM_SMMU_sCR0_USFCFG;
        else
-               reg &= ~sCR0_USFCFG;
+               reg &= ~ARM_SMMU_sCR0_USFCFG;
 
        /* Disable forced broadcasting */
-       reg &= ~sCR0_FB;
+       reg &= ~ARM_SMMU_sCR0_FB;
 
        /* Don't upgrade barriers */
-       reg &= ~(sCR0_BSU);
+       reg &= ~(ARM_SMMU_sCR0_BSU);
 
        if (smmu->features & ARM_SMMU_FEAT_VMID16)
-               reg |= sCR0_VMID16EN;
+               reg |= ARM_SMMU_sCR0_VMID16EN;
 
        if (smmu->features & ARM_SMMU_FEAT_EXIDS)
-               reg |= sCR0_EXIDENABLE;
+               reg |= ARM_SMMU_sCR0_EXIDENABLE;
 
        if (smmu->impl && smmu->impl->reset)
                smmu->impl->reset(smmu);
@@ -1693,21 +1733,21 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
        /* Restrict available stages based on module parameter */
        if (force_stage == 1)
-               id &= ~(ID0_S2TS | ID0_NTS);
+               id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS);
        else if (force_stage == 2)
-               id &= ~(ID0_S1TS | ID0_NTS);
+               id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
 
-       if (id & ID0_S1TS) {
+       if (id & ARM_SMMU_ID0_S1TS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
                dev_notice(smmu->dev, "\tstage 1 translation\n");
        }
 
-       if (id & ID0_S2TS) {
+       if (id & ARM_SMMU_ID0_S2TS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
                dev_notice(smmu->dev, "\tstage 2 translation\n");
        }
 
-       if (id & ID0_NTS) {
+       if (id & ARM_SMMU_ID0_NTS) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
                dev_notice(smmu->dev, "\tnested translation\n");
        }
@@ -1718,8 +1758,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENODEV;
        }
 
-       if ((id & ID0_S1TS) &&
-               ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
+       if ((id & ARM_SMMU_ID0_S1TS) &&
+           ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
@@ -1730,7 +1770,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
         * Fortunately, this also opens up a workaround for systems where the
         * ID register value has ended up configured incorrectly.
         */
-       cttw_reg = !!(id & ID0_CTTW);
+       cttw_reg = !!(id & ARM_SMMU_ID0_CTTW);
        if (cttw_fw || cttw_reg)
                dev_notice(smmu->dev, "\t%scoherent table walk\n",
                           cttw_fw ? "" : "non-");
@@ -1739,16 +1779,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                           "\t(IDR0.CTTW overridden by FW configuration)\n");
 
        /* Max. number of entries we have for stream matching/indexing */
-       if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+       if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
                smmu->features |= ARM_SMMU_FEAT_EXIDS;
                size = 1 << 16;
        } else {
-               size = 1 << FIELD_GET(ID0_NUMSIDB, id);
+               size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
        }
        smmu->streamid_mask = size - 1;
-       if (id & ID0_SMS) {
+       if (id & ARM_SMMU_ID0_SMS) {
                smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
-               size = FIELD_GET(ID0_NUMSMRG, id);
+               size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id);
                if (size == 0) {
                        dev_err(smmu->dev,
                                "stream-matching supported, but no SMRs present!\n");
@@ -1776,18 +1816,19 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        mutex_init(&smmu->stream_map_mutex);
        spin_lock_init(&smmu->global_sync_lock);
 
-       if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+       if (smmu->version < ARM_SMMU_V2 ||
+           !(id & ARM_SMMU_ID0_PTFS_NO_AARCH32)) {
                smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
-               if (!(id & ID0_PTFS_NO_AARCH32S))
+               if (!(id & ARM_SMMU_ID0_PTFS_NO_AARCH32S))
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
        }
 
        /* ID1 */
        id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
-       smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
+       smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
 
        /* Check for size mismatch of SMMU address space from mapped region */
-       size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
+       size = 1 << (FIELD_GET(ARM_SMMU_ID1_NUMPAGENDXB, id) + 1);
        if (smmu->numpage != 2 * size << smmu->pgshift)
                dev_warn(smmu->dev,
                        "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
@@ -1795,8 +1836,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
        smmu->numpage = size;
 
-       smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
-       smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
+       smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
+       smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
        if (smmu->num_s2_context_banks > smmu->num_context_banks) {
                dev_err(smmu->dev, "impossible number of S2 context banks!\n");
                return -ENODEV;
@@ -1810,14 +1851,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
        /* ID2 */
        id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
-       size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
+       size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_IAS, id));
        smmu->ipa_size = size;
 
        /* The output mask is also applied for bypass */
-       size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
+       size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
        smmu->pa_size = size;
 
-       if (id & ID2_VMID16)
+       if (id & ARM_SMMU_ID2_VMID16)
                smmu->features |= ARM_SMMU_FEAT_VMID16;
 
        /*
@@ -1834,13 +1875,13 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                if (smmu->version == ARM_SMMU_V1_64K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
        } else {
-               size = FIELD_GET(ID2_UBS, id);
+               size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
                smmu->va_size = arm_smmu_id_size_to_bits(size);
-               if (id & ID2_PTFS_4K)
+               if (id & ARM_SMMU_ID2_PTFS_4K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
-               if (id & ID2_PTFS_16K)
+               if (id & ARM_SMMU_ID2_PTFS_16K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
-               if (id & ID2_PTFS_64K)
+               if (id & ARM_SMMU_ID2_PTFS_64K)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
        }
 
@@ -1902,6 +1943,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
        { },
 };
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
 #ifdef CONFIG_ACPI
 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
@@ -1988,8 +2030,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 
        legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
        if (legacy_binding && !using_generic_binding) {
-               if (!using_legacy_binding)
-                       pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
+               if (!using_legacy_binding) {
+                       pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
+                                 IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
+               }
                using_legacy_binding = true;
        } else if (!legacy_binding && !using_legacy_binding) {
                using_generic_binding = true;
@@ -2004,25 +2048,50 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
        return 0;
 }
 
-static void arm_smmu_bus_init(void)
+static int arm_smmu_bus_init(struct iommu_ops *ops)
 {
+       int err;
+
        /* Oh, for a proper bus abstraction */
-       if (!iommu_present(&platform_bus_type))
-               bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+       if (!iommu_present(&platform_bus_type)) {
+               err = bus_set_iommu(&platform_bus_type, ops);
+               if (err)
+                       return err;
+       }
 #ifdef CONFIG_ARM_AMBA
-       if (!iommu_present(&amba_bustype))
-               bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+       if (!iommu_present(&amba_bustype)) {
+               err = bus_set_iommu(&amba_bustype, ops);
+               if (err)
+                       goto err_reset_platform_ops;
+       }
 #endif
 #ifdef CONFIG_PCI
        if (!iommu_present(&pci_bus_type)) {
-               pci_request_acs();
-               bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+               err = bus_set_iommu(&pci_bus_type, ops);
+               if (err)
+                       goto err_reset_amba_ops;
        }
 #endif
 #ifdef CONFIG_FSL_MC_BUS
-       if (!iommu_present(&fsl_mc_bus_type))
-               bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+       if (!iommu_present(&fsl_mc_bus_type)) {
+               err = bus_set_iommu(&fsl_mc_bus_type, ops);
+               if (err)
+                       goto err_reset_pci_ops;
+       }
+#endif
+       return 0;
+
+err_reset_pci_ops: __maybe_unused;
+#ifdef CONFIG_PCI
+       bus_set_iommu(&pci_bus_type, NULL);
 #endif
+err_reset_amba_ops: __maybe_unused;
+#ifdef CONFIG_ARM_AMBA
+       bus_set_iommu(&amba_bustype, NULL);
+#endif
+err_reset_platform_ops: __maybe_unused;
+       bus_set_iommu(&platform_bus_type, NULL);
+       return err;
 }
 
 static int arm_smmu_device_probe(struct platform_device *pdev)
@@ -2168,38 +2237,28 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
         * ready to handle default domain setup as soon as any SMMU exists.
         */
        if (!using_legacy_binding)
-               arm_smmu_bus_init();
-
-       return 0;
-}
+               return arm_smmu_bus_init(&arm_smmu_ops);
 
-/*
- * With the legacy DT binding in play, though, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
-       if (using_legacy_binding)
-               arm_smmu_bus_init();
        return 0;
 }
-device_initcall_sync(arm_smmu_legacy_bus_init);
 
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
        if (!smmu)
-               return;
+               return -ENODEV;
 
        if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_err(&pdev->dev, "removing device with active domains!\n");
 
+       arm_smmu_bus_init(NULL);
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
+
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
-       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
        arm_smmu_rpm_put(smmu);
 
        if (pm_runtime_enabled(smmu->dev))
@@ -2208,6 +2267,12 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
                clk_bulk_disable(smmu->num_clks, smmu->clks);
 
        clk_bulk_unprepare(smmu->num_clks, smmu->clks);
+       return 0;
+}
+
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
+{
+       arm_smmu_device_remove(pdev);
 }
 
 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
@@ -2258,11 +2323,17 @@ static const struct dev_pm_ops arm_smmu_pm_ops = {
 static struct platform_driver arm_smmu_driver = {
        .driver = {
                .name                   = "arm-smmu",
-               .of_match_table         = of_match_ptr(arm_smmu_of_match),
+               .of_match_table         = arm_smmu_of_match,
                .pm                     = &arm_smmu_pm_ops,
-               .suppress_bind_attrs    = true,
+               .suppress_bind_attrs    = true,
        },
        .probe  = arm_smmu_device_probe,
+       .remove = arm_smmu_device_remove,
        .shutdown = arm_smmu_device_shutdown,
 };
-builtin_platform_driver(arm_smmu_driver);
+module_platform_driver(arm_smmu_driver);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will@kernel.org>");
+MODULE_ALIAS("platform:arm-smmu");
+MODULE_LICENSE("GPL v2");
index 62b9f0cec49bbd1edcce191777498af957762c3a..8d1cd54d82a6f69c4d2e0ce7c70b69dbffc4b4f5 100644 (file)
@@ -11,6 +11,7 @@
 #define _ARM_SMMU_H
 
 #include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/clk.h>
 #include <linux/device.h>
 
 /* Configuration registers */
 #define ARM_SMMU_GR0_sCR0              0x0
-#define sCR0_VMID16EN                  BIT(31)
-#define sCR0_BSU                       GENMASK(15, 14)
-#define sCR0_FB                                BIT(13)
-#define sCR0_PTM                       BIT(12)
-#define sCR0_VMIDPNE                   BIT(11)
-#define sCR0_USFCFG                    BIT(10)
-#define sCR0_GCFGFIE                   BIT(5)
-#define sCR0_GCFGFRE                   BIT(4)
-#define sCR0_EXIDENABLE                        BIT(3)
-#define sCR0_GFIE                      BIT(2)
-#define sCR0_GFRE                      BIT(1)
-#define sCR0_CLIENTPD                  BIT(0)
+#define ARM_SMMU_sCR0_VMID16EN         BIT(31)
+#define ARM_SMMU_sCR0_BSU              GENMASK(15, 14)
+#define ARM_SMMU_sCR0_FB               BIT(13)
+#define ARM_SMMU_sCR0_PTM              BIT(12)
+#define ARM_SMMU_sCR0_VMIDPNE          BIT(11)
+#define ARM_SMMU_sCR0_USFCFG           BIT(10)
+#define ARM_SMMU_sCR0_GCFGFIE          BIT(5)
+#define ARM_SMMU_sCR0_GCFGFRE          BIT(4)
+#define ARM_SMMU_sCR0_EXIDENABLE       BIT(3)
+#define ARM_SMMU_sCR0_GFIE             BIT(2)
+#define ARM_SMMU_sCR0_GFRE             BIT(1)
+#define ARM_SMMU_sCR0_CLIENTPD         BIT(0)
 
 /* Auxiliary Configuration register */
 #define ARM_SMMU_GR0_sACR              0x10
 
 /* Identification registers */
 #define ARM_SMMU_GR0_ID0               0x20
-#define ID0_S1TS                       BIT(30)
-#define ID0_S2TS                       BIT(29)
-#define ID0_NTS                                BIT(28)
-#define ID0_SMS                                BIT(27)
-#define ID0_ATOSNS                     BIT(26)
-#define ID0_PTFS_NO_AARCH32            BIT(25)
-#define ID0_PTFS_NO_AARCH32S           BIT(24)
-#define ID0_NUMIRPT                    GENMASK(23, 16)
-#define ID0_CTTW                       BIT(14)
-#define ID0_NUMSIDB                    GENMASK(12, 9)
-#define ID0_EXIDS                      BIT(8)
-#define ID0_NUMSMRG                    GENMASK(7, 0)
+#define ARM_SMMU_ID0_S1TS              BIT(30)
+#define ARM_SMMU_ID0_S2TS              BIT(29)
+#define ARM_SMMU_ID0_NTS               BIT(28)
+#define ARM_SMMU_ID0_SMS               BIT(27)
+#define ARM_SMMU_ID0_ATOSNS            BIT(26)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32   BIT(25)
+#define ARM_SMMU_ID0_PTFS_NO_AARCH32S  BIT(24)
+#define ARM_SMMU_ID0_NUMIRPT           GENMASK(23, 16)
+#define ARM_SMMU_ID0_CTTW              BIT(14)
+#define ARM_SMMU_ID0_NUMSIDB           GENMASK(12, 9)
+#define ARM_SMMU_ID0_EXIDS             BIT(8)
+#define ARM_SMMU_ID0_NUMSMRG           GENMASK(7, 0)
 
 #define ARM_SMMU_GR0_ID1               0x24
-#define ID1_PAGESIZE                   BIT(31)
-#define ID1_NUMPAGENDXB                        GENMASK(30, 28)
-#define ID1_NUMS2CB                    GENMASK(23, 16)
-#define ID1_NUMCB                      GENMASK(7, 0)
+#define ARM_SMMU_ID1_PAGESIZE          BIT(31)
+#define ARM_SMMU_ID1_NUMPAGENDXB       GENMASK(30, 28)
+#define ARM_SMMU_ID1_NUMS2CB           GENMASK(23, 16)
+#define ARM_SMMU_ID1_NUMCB             GENMASK(7, 0)
 
 #define ARM_SMMU_GR0_ID2               0x28
-#define ID2_VMID16                     BIT(15)
-#define ID2_PTFS_64K                   BIT(14)
-#define ID2_PTFS_16K                   BIT(13)
-#define ID2_PTFS_4K                    BIT(12)
-#define ID2_UBS                                GENMASK(11, 8)
-#define ID2_OAS                                GENMASK(7, 4)
-#define ID2_IAS                                GENMASK(3, 0)
+#define ARM_SMMU_ID2_VMID16            BIT(15)
+#define ARM_SMMU_ID2_PTFS_64K          BIT(14)
+#define ARM_SMMU_ID2_PTFS_16K          BIT(13)
+#define ARM_SMMU_ID2_PTFS_4K           BIT(12)
+#define ARM_SMMU_ID2_UBS               GENMASK(11, 8)
+#define ARM_SMMU_ID2_OAS               GENMASK(7, 4)
+#define ARM_SMMU_ID2_IAS               GENMASK(3, 0)
 
 #define ARM_SMMU_GR0_ID3               0x2c
 #define ARM_SMMU_GR0_ID4               0x30
 #define ARM_SMMU_GR0_ID6               0x38
 
 #define ARM_SMMU_GR0_ID7               0x3c
-#define ID7_MAJOR                      GENMASK(7, 4)
-#define ID7_MINOR                      GENMASK(3, 0)
+#define ARM_SMMU_ID7_MAJOR             GENMASK(7, 4)
+#define ARM_SMMU_ID7_MINOR             GENMASK(3, 0)
 
 #define ARM_SMMU_GR0_sGFSR             0x48
-#define sGFSR_USF                      BIT(1)
+#define ARM_SMMU_sGFSR_USF             BIT(1)
 
 #define ARM_SMMU_GR0_sGFSYNR0          0x50
 #define ARM_SMMU_GR0_sGFSYNR1          0x54
 #define ARM_SMMU_GR0_sTLBGSYNC         0x70
 
 #define ARM_SMMU_GR0_sTLBGSTATUS       0x74
-#define sTLBGSTATUS_GSACTIVE           BIT(0)
+#define ARM_SMMU_sTLBGSTATUS_GSACTIVE  BIT(0)
 
 /* Stream mapping registers */
 #define ARM_SMMU_GR0_SMR(n)            (0x800 + ((n) << 2))
-#define SMR_VALID                      BIT(31)
-#define SMR_MASK                       GENMASK(31, 16)
-#define SMR_ID                         GENMASK(15, 0)
+#define ARM_SMMU_SMR_VALID             BIT(31)
+#define ARM_SMMU_SMR_MASK              GENMASK(31, 16)
+#define ARM_SMMU_SMR_ID                        GENMASK(15, 0)
 
 #define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
-#define S2CR_PRIVCFG                   GENMASK(25, 24)
+#define ARM_SMMU_S2CR_PRIVCFG          GENMASK(25, 24)
 enum arm_smmu_s2cr_privcfg {
        S2CR_PRIVCFG_DEFAULT,
        S2CR_PRIVCFG_DIPAN,
        S2CR_PRIVCFG_UNPRIV,
        S2CR_PRIVCFG_PRIV,
 };
-#define S2CR_TYPE                      GENMASK(17, 16)
+#define ARM_SMMU_S2CR_TYPE             GENMASK(17, 16)
 enum arm_smmu_s2cr_type {
        S2CR_TYPE_TRANS,
        S2CR_TYPE_BYPASS,
        S2CR_TYPE_FAULT,
 };
-#define S2CR_EXIDVALID                 BIT(10)
-#define S2CR_CBNDX                     GENMASK(7, 0)
+#define ARM_SMMU_S2CR_EXIDVALID                BIT(10)
+#define ARM_SMMU_S2CR_CBNDX            GENMASK(7, 0)
 
 /* Context bank attribute registers */
 #define ARM_SMMU_GR1_CBAR(n)           (0x0 + ((n) << 2))
-#define CBAR_IRPTNDX                   GENMASK(31, 24)
-#define CBAR_TYPE                      GENMASK(17, 16)
+#define ARM_SMMU_CBAR_IRPTNDX          GENMASK(31, 24)
+#define ARM_SMMU_CBAR_TYPE             GENMASK(17, 16)
 enum arm_smmu_cbar_type {
        CBAR_TYPE_S2_TRANS,
        CBAR_TYPE_S1_TRANS_S2_BYPASS,
        CBAR_TYPE_S1_TRANS_S2_FAULT,
        CBAR_TYPE_S1_TRANS_S2_TRANS,
 };
-#define CBAR_S1_MEMATTR                        GENMASK(15, 12)
-#define CBAR_S1_MEMATTR_WB             0xf
-#define CBAR_S1_BPSHCFG                        GENMASK(9, 8)
-#define CBAR_S1_BPSHCFG_NSH            3
-#define CBAR_VMID                      GENMASK(7, 0)
+#define ARM_SMMU_CBAR_S1_MEMATTR       GENMASK(15, 12)
+#define ARM_SMMU_CBAR_S1_MEMATTR_WB    0xf
+#define ARM_SMMU_CBAR_S1_BPSHCFG       GENMASK(9, 8)
+#define ARM_SMMU_CBAR_S1_BPSHCFG_NSH   3
+#define ARM_SMMU_CBAR_VMID             GENMASK(7, 0)
 
 #define ARM_SMMU_GR1_CBFRSYNRA(n)      (0x400 + ((n) << 2))
 
 #define ARM_SMMU_GR1_CBA2R(n)          (0x800 + ((n) << 2))
-#define CBA2R_VMID16                   GENMASK(31, 16)
-#define CBA2R_VA64                     BIT(0)
+#define ARM_SMMU_CBA2R_VMID16          GENMASK(31, 16)
+#define ARM_SMMU_CBA2R_VA64            BIT(0)
 
 #define ARM_SMMU_CB_SCTLR              0x0
-#define SCTLR_S1_ASIDPNE               BIT(12)
-#define SCTLR_CFCFG                    BIT(7)
-#define SCTLR_CFIE                     BIT(6)
-#define SCTLR_CFRE                     BIT(5)
-#define SCTLR_E                                BIT(4)
-#define SCTLR_AFE                      BIT(2)
-#define SCTLR_TRE                      BIT(1)
-#define SCTLR_M                                BIT(0)
+#define ARM_SMMU_SCTLR_S1_ASIDPNE      BIT(12)
+#define ARM_SMMU_SCTLR_CFCFG           BIT(7)
+#define ARM_SMMU_SCTLR_CFIE            BIT(6)
+#define ARM_SMMU_SCTLR_CFRE            BIT(5)
+#define ARM_SMMU_SCTLR_E               BIT(4)
+#define ARM_SMMU_SCTLR_AFE             BIT(2)
+#define ARM_SMMU_SCTLR_TRE             BIT(1)
+#define ARM_SMMU_SCTLR_M               BIT(0)
 
 #define ARM_SMMU_CB_ACTLR              0x4
 
 #define ARM_SMMU_CB_RESUME             0x8
-#define RESUME_TERMINATE               BIT(0)
+#define ARM_SMMU_RESUME_TERMINATE      BIT(0)
 
 #define ARM_SMMU_CB_TCR2               0x10
-#define TCR2_SEP                       GENMASK(17, 15)
-#define TCR2_SEP_UPSTREAM              0x7
-#define TCR2_AS                                BIT(4)
+#define ARM_SMMU_TCR2_SEP              GENMASK(17, 15)
+#define ARM_SMMU_TCR2_SEP_UPSTREAM     0x7
+#define ARM_SMMU_TCR2_AS               BIT(4)
+#define ARM_SMMU_TCR2_PASIZE           GENMASK(3, 0)
 
 #define ARM_SMMU_CB_TTBR0              0x20
 #define ARM_SMMU_CB_TTBR1              0x28
-#define TTBRn_ASID                     GENMASK_ULL(63, 48)
+#define ARM_SMMU_TTBRn_ASID            GENMASK_ULL(63, 48)
 
 #define ARM_SMMU_CB_TCR                        0x30
+#define ARM_SMMU_TCR_EAE               BIT(31)
+#define ARM_SMMU_TCR_EPD1              BIT(23)
+#define ARM_SMMU_TCR_TG0               GENMASK(15, 14)
+#define ARM_SMMU_TCR_SH0               GENMASK(13, 12)
+#define ARM_SMMU_TCR_ORGN0             GENMASK(11, 10)
+#define ARM_SMMU_TCR_IRGN0             GENMASK(9, 8)
+#define ARM_SMMU_TCR_T0SZ              GENMASK(5, 0)
+
+#define ARM_SMMU_VTCR_RES1             BIT(31)
+#define ARM_SMMU_VTCR_PS               GENMASK(18, 16)
+#define ARM_SMMU_VTCR_TG0              ARM_SMMU_TCR_TG0
+#define ARM_SMMU_VTCR_SH0              ARM_SMMU_TCR_SH0
+#define ARM_SMMU_VTCR_ORGN0            ARM_SMMU_TCR_ORGN0
+#define ARM_SMMU_VTCR_IRGN0            ARM_SMMU_TCR_IRGN0
+#define ARM_SMMU_VTCR_SL0              GENMASK(7, 6)
+#define ARM_SMMU_VTCR_T0SZ             ARM_SMMU_TCR_T0SZ
+
 #define ARM_SMMU_CB_CONTEXTIDR         0x34
 #define ARM_SMMU_CB_S1_MAIR0           0x38
 #define ARM_SMMU_CB_S1_MAIR1           0x3c
 
 #define ARM_SMMU_CB_PAR                        0x50
-#define CB_PAR_F                       BIT(0)
+#define ARM_SMMU_CB_PAR_F              BIT(0)
 
 #define ARM_SMMU_CB_FSR                        0x58
-#define FSR_MULTI                      BIT(31)
-#define FSR_SS                         BIT(30)
-#define FSR_UUT                                BIT(8)
-#define FSR_ASF                                BIT(7)
-#define FSR_TLBLKF                     BIT(6)
-#define FSR_TLBMCF                     BIT(5)
-#define FSR_EF                         BIT(4)
-#define FSR_PF                         BIT(3)
-#define FSR_AFF                                BIT(2)
-#define FSR_TF                         BIT(1)
-
-#define FSR_IGN                                (FSR_AFF | FSR_ASF | \
-                                        FSR_TLBMCF | FSR_TLBLKF)
-#define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT | \
-                                        FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+#define ARM_SMMU_FSR_MULTI             BIT(31)
+#define ARM_SMMU_FSR_SS                        BIT(30)
+#define ARM_SMMU_FSR_UUT               BIT(8)
+#define ARM_SMMU_FSR_ASF               BIT(7)
+#define ARM_SMMU_FSR_TLBLKF            BIT(6)
+#define ARM_SMMU_FSR_TLBMCF            BIT(5)
+#define ARM_SMMU_FSR_EF                        BIT(4)
+#define ARM_SMMU_FSR_PF                        BIT(3)
+#define ARM_SMMU_FSR_AFF               BIT(2)
+#define ARM_SMMU_FSR_TF                        BIT(1)
+
+#define ARM_SMMU_FSR_IGN               (ARM_SMMU_FSR_AFF |             \
+                                        ARM_SMMU_FSR_ASF |             \
+                                        ARM_SMMU_FSR_TLBMCF |          \
+                                        ARM_SMMU_FSR_TLBLKF)
+
+#define ARM_SMMU_FSR_FAULT             (ARM_SMMU_FSR_MULTI |           \
+                                        ARM_SMMU_FSR_SS |              \
+                                        ARM_SMMU_FSR_UUT |             \
+                                        ARM_SMMU_FSR_EF |              \
+                                        ARM_SMMU_FSR_PF |              \
+                                        ARM_SMMU_FSR_TF |              \
+                                        ARM_SMMU_FSR_IGN)
 
 #define ARM_SMMU_CB_FAR                        0x60
 
 #define ARM_SMMU_CB_FSYNR0             0x68
-#define FSYNR0_WNR                     BIT(4)
+#define ARM_SMMU_FSYNR0_WNR            BIT(4)
 
 #define ARM_SMMU_CB_S1_TLBIVA          0x600
 #define ARM_SMMU_CB_S1_TLBIASID                0x610
@@ -203,7 +230,7 @@ enum arm_smmu_cbar_type {
 #define ARM_SMMU_CB_ATS1PR             0x800
 
 #define ARM_SMMU_CB_ATSR               0x8f0
-#define ATSR_ACTIVE                    BIT(0)
+#define ARM_SMMU_ATSR_ACTIVE           BIT(0)
 
 
 /* Maximum number of context banks per SMMU */
@@ -297,7 +324,7 @@ struct arm_smmu_cfg {
        enum arm_smmu_cbar_type         cbar;
        enum arm_smmu_context_fmt       fmt;
 };
-#define INVALID_IRPTNDX                        0xff
+#define ARM_SMMU_INVALID_IRPTNDX       0xff
 
 enum arm_smmu_domain_stage {
        ARM_SMMU_DOMAIN_S1 = 0,
@@ -318,6 +345,33 @@ struct arm_smmu_domain {
        struct iommu_domain             domain;
 };
 
+static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
+{
+       return ARM_SMMU_TCR_EPD1 |
+              FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
+              FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
+              FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
+              FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
+              FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+}
+
+static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg)
+{
+       return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
+              FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
+}
+
+static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg)
+{
+       return ARM_SMMU_VTCR_RES1 |
+              FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
+              FIELD_PREP(ARM_SMMU_VTCR_TG0, cfg->arm_lpae_s2_cfg.vtcr.tg) |
+              FIELD_PREP(ARM_SMMU_VTCR_SH0, cfg->arm_lpae_s2_cfg.vtcr.sh) |
+              FIELD_PREP(ARM_SMMU_VTCR_ORGN0, cfg->arm_lpae_s2_cfg.vtcr.orgn) |
+              FIELD_PREP(ARM_SMMU_VTCR_IRGN0, cfg->arm_lpae_s2_cfg.vtcr.irgn) |
+              FIELD_PREP(ARM_SMMU_VTCR_SL0, cfg->arm_lpae_s2_cfg.vtcr.sl) |
+              FIELD_PREP(ARM_SMMU_VTCR_T0SZ, cfg->arm_lpae_s2_cfg.vtcr.tsz);
+}
 
 /* Implementation details, yay! */
 struct arm_smmu_impl {
index c363294b3bb93ff9ce1165c06cab466dcfa4e68a..a2e96a5fd9a7b3a6d9e2bf35ea1a8d7bdc78f1f2 100644 (file)
@@ -1203,7 +1203,6 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
 {
        struct device *dev = msi_desc_to_dev(desc);
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct iommu_dma_cookie *cookie;
        struct iommu_dma_msi_page *msi_page;
        static DEFINE_MUTEX(msi_prepare_lock); /* see below */
 
@@ -1212,8 +1211,6 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
                return 0;
        }
 
-       cookie = domain->iova_cookie;
-
        /*
         * In fact the whole prepare operation should already be serialised by
         * irq_domain_mutex further up the callchain, but that's pretty subtle
index 3acfa6a25fa29afef60273cb66058d6a3d32bfd6..071bb42bbbc5bef6eba8584ec50ce24eeea06993 100644 (file)
@@ -244,7 +244,7 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
                     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
                    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
                     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
-                     info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
+                     info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
                        pr_warn("Device scope type does not match for %s\n",
                                pci_name(info->dev));
                        return -EINVAL;
@@ -1354,7 +1354,6 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        struct qi_desc desc;
 
        if (mask) {
-               WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
                addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
                desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
        } else
@@ -1371,6 +1370,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        qi_submit_sync(&desc, iommu);
 }
 
+/* PASID-based IOTLB invalidation */
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+                    unsigned long npages, bool ih)
+{
+       struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
+
+       /*
+        * npages == -1 means a PASID-selective invalidation, otherwise,
+        * a positive value for Page-selective-within-PASID invalidation.
+        * 0 is not a valid input.
+        */
+       if (WARN_ON(!npages)) {
+               pr_err("Invalid input npages = %ld\n", npages);
+               return;
+       }
+
+       if (npages == -1) {
+               desc.qw0 = QI_EIOTLB_PASID(pasid) |
+                               QI_EIOTLB_DID(did) |
+                               QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+                               QI_EIOTLB_TYPE;
+               desc.qw1 = 0;
+       } else {
+               int mask = ilog2(__roundup_pow_of_two(npages));
+               unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+               if (WARN_ON_ONCE(!ALIGN(addr, align)))
+                       addr &= ~(align - 1);
+
+               desc.qw0 = QI_EIOTLB_PASID(pasid) |
+                               QI_EIOTLB_DID(did) |
+                               QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+                               QI_EIOTLB_TYPE;
+               desc.qw1 = QI_EIOTLB_ADDR(addr) |
+                               QI_EIOTLB_IH(ih) |
+                               QI_EIOTLB_AM(mask);
+       }
+
+       qi_submit_sync(&desc, iommu);
+}
+
 /*
  * Disable Queued Invalidation interface.
  */
index 471f05d452e0198f3d2837f2a83e13d0984e1a12..c1257bef553cef77881a54dd0372babe03b1544b 100644 (file)
@@ -5,6 +5,7 @@
  * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
  *         Sohil Mehta <sohil.mehta@intel.com>
  *         Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *         Lu Baolu <baolu.lu@linux.intel.com>
  */
 
 #include <linux/debugfs.h>
@@ -283,6 +284,77 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
 
+static inline unsigned long level_to_directory_size(int level)
+{
+       return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
+}
+
+static inline void
+dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
+{
+       seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
+                  iova >> VTD_PAGE_SHIFT, path[5], path[4],
+                  path[3], path[2], path[1]);
+}
+
+static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
+                              int level, unsigned long start,
+                              u64 *path)
+{
+       int i;
+
+       if (level > 5 || level < 1)
+               return;
+
+       for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
+                       i++, pde++, start += level_to_directory_size(level)) {
+               if (!dma_pte_present(pde))
+                       continue;
+
+               path[level] = pde->val;
+               if (dma_pte_superpage(pde) || level == 1)
+                       dump_page_info(m, start, path);
+               else
+                       pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
+                                          level - 1, start, path);
+               path[level] = 0;
+       }
+}
+
+static int show_device_domain_translation(struct device *dev, void *data)
+{
+       struct dmar_domain *domain = find_domain(dev);
+       struct seq_file *m = data;
+       u64 path[6] = { 0 };
+
+       if (!domain)
+               return 0;
+
+       seq_printf(m, "Device %s with pasid %d @0x%llx\n",
+                  dev_name(dev), domain->default_pasid,
+                  (u64)virt_to_phys(domain->pgd));
+       seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+
+       pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       ret = bus_for_each_dev(&pci_bus_type, NULL, m,
+                              show_device_domain_translation);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+
 #ifdef CONFIG_IRQ_REMAP
 static void ir_tbl_remap_entry_show(struct seq_file *m,
                                    struct intel_iommu *iommu)
@@ -396,6 +468,9 @@ void __init intel_iommu_debugfs_init(void)
                            &iommu_regset_fops);
        debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
                            NULL, &dmar_translation_struct_fops);
+       debugfs_create_file("domain_translation_struct", 0444,
+                           intel_iommu_debug, NULL,
+                           &domain_translation_struct_fops);
 #ifdef CONFIG_IRQ_REMAP
        debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
                            NULL, &ir_translation_struct_fops);
index b2526a4fc6c4e49151f56319a723b8daebb5056f..64ddccf1d5fe3be70f3a0199aab4a87d4d06ea17 100644 (file)
@@ -307,6 +307,20 @@ static int hw_pass_through = 1;
  */
 #define DOMAIN_FLAG_LOSE_CHILDREN              BIT(1)
 
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL            BIT(2)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE               BIT(3)
+
 #define for_each_domain_iommu(idx, domain)                     \
        for (idx = 0; idx < g_num_of_iommus; idx++)             \
                if (domain->iommu_refcnt[idx])
@@ -355,9 +369,14 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
 int dmar_disabled = 0;
 #else
 int dmar_disabled = 1;
-#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
 
+#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+int intel_iommu_sm = 1;
+#else
 int intel_iommu_sm;
+#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
@@ -368,7 +387,6 @@ static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
 static int intel_no_bounce;
 
-#define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
@@ -377,7 +395,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
-static DEFINE_SPINLOCK(device_domain_lock);
+DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
 #define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&   \
@@ -552,6 +570,11 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
        return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
 }
 
+static inline bool domain_use_first_level(struct dmar_domain *domain)
+{
+       return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
+}
+
 static inline int domain_pfn_supported(struct dmar_domain *domain,
                                       unsigned long pfn)
 {
@@ -661,11 +684,12 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
        return ret;
 }
 
-static int domain_update_iommu_superpage(struct intel_iommu *skip)
+static int domain_update_iommu_superpage(struct dmar_domain *domain,
+                                        struct intel_iommu *skip)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
-       int mask = 0xf;
+       int mask = 0x3;
 
        if (!intel_iommu_superpage) {
                return 0;
@@ -675,7 +699,13 @@ static int domain_update_iommu_superpage(struct intel_iommu *skip)
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
                if (iommu != skip) {
-                       mask &= cap_super_page_val(iommu->cap);
+                       if (domain && domain_use_first_level(domain)) {
+                               if (!cap_fl1gp_support(iommu->cap))
+                                       mask = 0x1;
+                       } else {
+                               mask &= cap_super_page_val(iommu->cap);
+                       }
+
                        if (!mask)
                                break;
                }
@@ -690,7 +720,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
 {
        domain_update_iommu_coherency(domain);
        domain->iommu_snooping = domain_update_iommu_snooping(NULL);
-       domain->iommu_superpage = domain_update_iommu_superpage(NULL);
+       domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
 }
 
 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -913,6 +943,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
 
                        domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+                       if (domain_use_first_level(domain))
+                               pteval |= DMA_FL_PTE_XD;
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
                                /* Someone else set it while we were thinking; use theirs. */
                                free_pgtable_page(tmp_page);
@@ -1483,6 +1515,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
+static void domain_flush_piotlb(struct intel_iommu *iommu,
+                               struct dmar_domain *domain,
+                               u64 addr, unsigned long npages, bool ih)
+{
+       u16 did = domain->iommu_did[iommu->seq_id];
+
+       if (domain->default_pasid)
+               qi_flush_piotlb(iommu, did, domain->default_pasid,
+                               addr, npages, ih);
+
+       if (!list_empty(&domain->devices))
+               qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
                                  struct dmar_domain *domain,
                                  unsigned long pfn, unsigned int pages,
@@ -1496,18 +1542,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 
        if (ih)
                ih = 1 << 6;
-       /*
-        * Fallback to domain selective flush if no PSI support or the size is
-        * too big.
-        * PSI requires page size to be 2 ^ x, and the base address is naturally
-        * aligned to the size
-        */
-       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
-               iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                               DMA_TLB_DSI_FLUSH);
-       else
-               iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
-                                               DMA_TLB_PSI_FLUSH);
+
+       if (domain_use_first_level(domain)) {
+               domain_flush_piotlb(iommu, domain, addr, pages, ih);
+       } else {
+               /*
+                * Fallback to domain selective flush if no PSI support or
+                * the size is too big. PSI requires page size to be 2 ^ x,
+                * and the base address is naturally aligned to the size.
+                */
+               if (!cap_pgsel_inv(iommu->cap) ||
+                   mask > cap_max_amask_val(iommu->cap))
+                       iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                                       DMA_TLB_DSI_FLUSH);
+               else
+                       iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+                                                       DMA_TLB_PSI_FLUSH);
+       }
 
        /*
         * In caching mode, changes of pages from non-present to present require
@@ -1522,8 +1573,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
                                        struct dmar_domain *domain,
                                        unsigned long pfn, unsigned int pages)
 {
-       /* It's a non-present to present mapping. Only flush if caching mode */
-       if (cap_caching_mode(iommu->cap))
+       /*
+        * It's a non-present to present mapping. Only flush if caching mode
+        * and second level.
+        */
+       if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
                iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
        else
                iommu_flush_write_buffer(iommu);
@@ -1540,7 +1594,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
                struct intel_iommu *iommu = g_iommus[idx];
                u16 did = domain->iommu_did[iommu->seq_id];
 
-               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+               if (domain_use_first_level(domain))
+                       domain_flush_piotlb(iommu, domain, 0, -1, 0);
+               else
+                       iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                                DMA_TLB_DSI_FLUSH);
 
                if (!cap_caching_mode(iommu->cap))
                        iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
@@ -1709,6 +1767,33 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
 #endif
 }
 
+/*
+ * Check and return whether first level is used by default for
+ * DMA translation.
+ */
+static bool first_level_by_default(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       static int first_level_support = -1;
+
+       if (likely(first_level_support != -1))
+               return first_level_support;
+
+       first_level_support = 1;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
+                       first_level_support = 0;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return first_level_support;
+}
+
 static struct dmar_domain *alloc_domain(int flags)
 {
        struct dmar_domain *domain;
@@ -1720,6 +1805,8 @@ static struct dmar_domain *alloc_domain(int flags)
        memset(domain, 0, sizeof(*domain));
        domain->nid = NUMA_NO_NODE;
        domain->flags = flags;
+       if (first_level_by_default())
+               domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
        domain->has_iotlb_device = false;
        INIT_LIST_HEAD(&domain->devices);
 
@@ -1849,14 +1936,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
 {
        int adjust_width, agaw;
        unsigned long sagaw;
-       int err;
+       int ret;
 
        init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
 
-       err = init_iova_flush_queue(&domain->iovad,
-                                   iommu_flush_iova, iova_entry_free);
-       if (err)
-               return err;
+       if (!intel_iommu_strict) {
+               ret = init_iova_flush_queue(&domain->iovad,
+                                           iommu_flush_iova, iova_entry_free);
+               if (ret)
+                       pr_info("iova flush queue initialization failed\n");
+       }
 
        domain_reserve_special_ranges(domain);
 
@@ -2229,17 +2318,20 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
        unsigned long sg_res = 0;
        unsigned int largepage_lvl = 0;
        unsigned long lvl_pages = 0;
+       u64 attr;
 
        BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
 
        if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
                return -EINVAL;
 
-       prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+       attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+       if (domain_use_first_level(domain))
+               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
 
        if (!sg) {
                sg_res = nr_pages;
-               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
        }
 
        while (nr_pages > 0) {
@@ -2251,7 +2343,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                        sg_res = aligned_nrpages(sg->offset, sg->length);
                        sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
                        sg->dma_length = sg->length;
-                       pteval = (sg_phys(sg) - pgoff) | prot;
+                       pteval = (sg_phys(sg) - pgoff) | attr;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -2420,7 +2512,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static struct dmar_domain *find_domain(struct device *dev)
+struct dmar_domain *find_domain(struct device *dev)
 {
        struct device_domain_info *info;
 
@@ -2463,6 +2555,36 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
        return NULL;
 }
 
+static int domain_setup_first_level(struct intel_iommu *iommu,
+                                   struct dmar_domain *domain,
+                                   struct device *dev,
+                                   int pasid)
+{
+       int flags = PASID_FLAG_SUPERVISOR_MODE;
+       struct dma_pte *pgd = domain->pgd;
+       int agaw, level;
+
+       /*
+        * Skip top levels of page tables for iommu which has
+        * less agaw than default. Unnecessary for PT mode.
+        */
+       for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+               pgd = phys_to_virt(dma_pte_addr(pgd));
+               if (!dma_pte_present(pgd))
+                       return -ENOMEM;
+       }
+
+       level = agaw_to_level(agaw);
+       if (level != 4 && level != 5)
+               return -EINVAL;
+
+       flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+
+       return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
+                                            domain->iommu_did[iommu->seq_id],
+                                            flags);
+}
+
 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                                                    int bus, int devfn,
                                                    struct device *dev,
@@ -2562,6 +2684,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                if (hw_pass_through && domain_type_is_si(domain))
                        ret = intel_pasid_setup_pass_through(iommu, domain,
                                        dev, PASID_RID2PASID);
+               else if (domain_use_first_level(domain))
+                       ret = domain_setup_first_level(iommu, domain, dev,
+                                       PASID_RID2PASID);
                else
                        ret = intel_pasid_setup_second_level(iommu, domain,
                                        dev, PASID_RID2PASID);
@@ -2767,10 +2892,8 @@ static int __init si_domain_init(int hw)
        }
 
        /*
-        * Normally we use DMA domains for devices which have RMRRs. But we
-        * loose this requirement for graphic and usb devices. Identity map
-        * the RMRRs for graphic and USB devices so that they could use the
-        * si_domain.
+        * Identity map the RMRRs so that devices with RMRRs could also use
+        * the si_domain.
         */
        for_each_rmrr_units(rmrr) {
                for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
@@ -2778,9 +2901,6 @@ static int __init si_domain_init(int hw)
                        unsigned long long start = rmrr->base_address;
                        unsigned long long end = rmrr->end_address;
 
-                       if (device_is_rmrr_locked(dev))
-                               continue;
-
                        if (WARN_ON(end < start ||
                                    end >> agaw_to_width(si_domain->agaw)))
                                continue;
@@ -2919,9 +3039,6 @@ static int device_def_domain_type(struct device *dev)
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
-               if (device_is_rmrr_locked(dev))
-                       return IOMMU_DOMAIN_DMA;
-
                /*
                 * Prevent any device marked as untrusted from getting
                 * placed into the statically identity mapping domain.
@@ -2959,13 +3076,9 @@ static int device_def_domain_type(struct device *dev)
                                return IOMMU_DOMAIN_DMA;
                } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
                        return IOMMU_DOMAIN_DMA;
-       } else {
-               if (device_has_rmrr(dev))
-                       return IOMMU_DOMAIN_DMA;
        }
 
-       return (iommu_identity_mapping & IDENTMAP_ALL) ?
-                       IOMMU_DOMAIN_IDENTITY : 0;
+       return 0;
 }
 
 static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3294,10 +3407,7 @@ static int __init init_dmars(void)
 
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
-#ifdef CONFIG_INTEL_IOMMU_SVM
-               if (pasid_supported(iommu))
-                       intel_svm_init(iommu);
-#endif
+               intel_svm_check(iommu);
        }
 
        /*
@@ -3312,9 +3422,6 @@ static int __init init_dmars(void)
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        }
 
-       if (iommu_default_passthrough())
-               iommu_identity_mapping |= IDENTMAP_ALL;
-
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
        dmar_map_gfx = 0;
 #endif
@@ -3387,8 +3494,21 @@ static unsigned long intel_alloc_iova(struct device *dev,
 {
        unsigned long iova_pfn;
 
-       /* Restrict dma_mask to the width that the iommu can handle */
-       dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+       /*
+        * Restrict dma_mask to the width that the iommu can handle.
+        * First-level translation restricts the input-address to a
+        * canonical address (i.e., address bits 63:N have the same
+        * value as address bit [N-1], where N is 48-bits with 4-level
+        * paging and 57-bits with 5-level paging). Hence, skip bit
+        * [N-1].
+        */
+       if (domain_use_first_level(domain))
+               dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1),
+                                dma_mask);
+       else
+               dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw),
+                                dma_mask);
+
        /* Ensure we reserve the whole size-aligned region */
        nrpages = __roundup_pow_of_two(nrpages);
 
@@ -3775,8 +3895,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
                return 0;
        }
 
-       trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
-                    sg_phys(sglist), size << VTD_PAGE_SHIFT);
+       for_each_sg(sglist, sg, nelems, i)
+               trace_map_sg(dev, i + 1, nelems, sg);
 
        return nelems;
 }
@@ -3988,6 +4108,9 @@ bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
                sg_dma_len(sg) = sg->length;
        }
 
+       for_each_sg(sglist, sg, nelems, i)
+               trace_bounce_map_sg(dev, i + 1, nelems, sg);
+
        return nelems;
 
 out_unmap:
@@ -4316,16 +4439,31 @@ static void __init init_iommu_pm_ops(void)
 static inline void init_iommu_pm_ops(void) {}
 #endif /* CONFIG_PM */
 
+static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+{
+       if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
+           !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
+           rmrr->end_address <= rmrr->base_address ||
+           arch_rmrr_sanity_check(rmrr))
+               return -EINVAL;
+
+       return 0;
+}
+
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
        struct acpi_dmar_reserved_memory *rmrr;
        struct dmar_rmrr_unit *rmrru;
-       int ret;
 
        rmrr = (struct acpi_dmar_reserved_memory *)header;
-       ret = arch_rmrr_sanity_check(rmrr);
-       if (ret)
-               return ret;
+       if (rmrr_sanity_check(rmrr))
+               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+                          "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
+                          "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+                          rmrr->base_address, rmrr->end_address,
+                          dmi_get_system_info(DMI_BIOS_VENDOR),
+                          dmi_get_system_info(DMI_BIOS_VERSION),
+                          dmi_get_system_info(DMI_PRODUCT_VERSION));
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
@@ -4471,7 +4609,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
                        iommu->name);
                return -ENXIO;
        }
-       sp = domain_update_iommu_superpage(iommu) - 1;
+       sp = domain_update_iommu_superpage(NULL, iommu) - 1;
        if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
                pr_warn("%s: Doesn't support large page.\n",
                        iommu->name);
@@ -4491,10 +4629,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
        if (ret)
                goto out;
 
-#ifdef CONFIG_INTEL_IOMMU_SVM
-       if (pasid_supported(iommu))
-               intel_svm_init(iommu);
-#endif
+       intel_svm_check(iommu);
 
        if (dmaru->ignored) {
                /*
@@ -4899,7 +5034,7 @@ static int __init platform_optin_force_iommu(void)
         * map for all devices except those marked as being untrusted.
         */
        if (dmar_disabled)
-               iommu_identity_mapping |= IDENTMAP_ALL;
+               iommu_set_default_passthrough(false);
 
        dmar_disabled = 0;
        no_iommu = 0;
@@ -5164,7 +5299,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
 
        spin_lock_irqsave(&device_domain_lock, flags);
        info = dev->archdata.iommu;
-       if (info)
+       if (info && info != DEFER_DEVICE_DOMAIN_INFO
+           && info != DUMMY_DEVICE_DOMAIN_INFO)
                __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
@@ -5198,6 +5334,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
+       int ret;
 
        switch (type) {
        case IOMMU_DOMAIN_DMA:
@@ -5214,11 +5351,12 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
                        return NULL;
                }
 
-               if (type == IOMMU_DOMAIN_DMA &&
-                   init_iova_flush_queue(&dmar_domain->iovad,
-                                         iommu_flush_iova, iova_entry_free)) {
-                       pr_warn("iova flush queue initialization failed\n");
-                       intel_iommu_strict = 1;
+               if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
+                       ret = init_iova_flush_queue(&dmar_domain->iovad,
+                                                   iommu_flush_iova,
+                                                   iova_entry_free);
+                       if (ret)
+                               pr_info("iova flush queue initialization failed\n");
                }
 
                domain_update_iommu_cap(dmar_domain);
@@ -5284,7 +5422,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain,
        domain->auxd_refcnt--;
 
        if (!domain->auxd_refcnt && domain->default_pasid > 0)
-               intel_pasid_free_id(domain->default_pasid);
+               ioasid_free(domain->default_pasid);
 }
 
 static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -5302,10 +5440,11 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
        if (domain->default_pasid <= 0) {
                int pasid;
 
-               pasid = intel_pasid_alloc_id(domain, PASID_MIN,
-                                            pci_max_pasids(to_pci_dev(dev)),
-                                            GFP_KERNEL);
-               if (pasid <= 0) {
+               /* No private data needed for the default pasid */
+               pasid = ioasid_alloc(NULL, PASID_MIN,
+                                    pci_max_pasids(to_pci_dev(dev)) - 1,
+                                    NULL);
+               if (pasid == INVALID_IOASID) {
                        pr_err("Can't allocate default pasid\n");
                        return -ENODEV;
                }
@@ -5323,8 +5462,12 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
                goto attach_failed;
 
        /* Setup the PASID entry for mediated devices: */
-       ret = intel_pasid_setup_second_level(iommu, domain, dev,
-                                            domain->default_pasid);
+       if (domain_use_first_level(domain))
+               ret = domain_setup_first_level(iommu, domain, dev,
+                                              domain->default_pasid);
+       else
+               ret = intel_pasid_setup_second_level(iommu, domain, dev,
+                                                    domain->default_pasid);
        if (ret)
                goto table_failed;
        spin_unlock(&iommu->lock);
@@ -5341,7 +5484,7 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
        spin_unlock(&iommu->lock);
        spin_unlock_irqrestore(&device_domain_lock, flags);
        if (!domain->auxd_refcnt && domain->default_pasid > 0)
-               intel_pasid_free_id(domain->default_pasid);
+               ioasid_free(domain->default_pasid);
 
        return ret;
 }
@@ -5595,6 +5738,24 @@ static inline bool iommu_pasid_support(void)
        return ret;
 }
 
+static inline bool nested_mode_support(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       bool ret = true;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
+                       ret = false;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static bool intel_iommu_capable(enum iommu_cap cap)
 {
        if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5625,8 +5786,10 @@ static int intel_iommu_add_device(struct device *dev)
 
        group = iommu_group_get_for_dev(dev);
 
-       if (IS_ERR(group))
-               return PTR_ERR(group);
+       if (IS_ERR(group)) {
+               ret = PTR_ERR(group);
+               goto unlink;
+       }
 
        iommu_group_put(group);
 
@@ -5652,7 +5815,8 @@ static int intel_iommu_add_device(struct device *dev)
                                if (!get_private_domain_for_dev(dev)) {
                                        dev_warn(dev,
                                                 "Failed to get a private domain.\n");
-                                       return -ENOMEM;
+                                       ret = -ENOMEM;
+                                       goto unlink;
                                }
 
                                dev_info(dev,
@@ -5667,6 +5831,10 @@ static int intel_iommu_add_device(struct device *dev)
        }
 
        return 0;
+
+unlink:
+       iommu_device_unlink(&iommu->iommu, dev);
+       return ret;
 }
 
 static void intel_iommu_remove_device(struct device *dev)
@@ -5809,6 +5977,13 @@ static void intel_iommu_apply_resv_region(struct device *dev,
        WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
 }
 
+static struct iommu_group *intel_iommu_device_group(struct device *dev)
+{
+       if (dev_is_pci(dev))
+               return pci_device_group(dev);
+       return generic_device_group(dev);
+}
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 {
@@ -5964,10 +6139,42 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
        return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
 }
 
+static int
+intel_iommu_domain_set_attr(struct iommu_domain *domain,
+                           enum iommu_attr attr, void *data)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long flags;
+       int ret = 0;
+
+       if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+               return -EINVAL;
+
+       switch (attr) {
+       case DOMAIN_ATTR_NESTING:
+               spin_lock_irqsave(&device_domain_lock, flags);
+               if (nested_mode_support() &&
+                   list_empty(&dmar_domain->devices)) {
+                       dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
+                       dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
+               } else {
+                       ret = -ENODEV;
+               }
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
 const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
        .domain_free            = intel_iommu_domain_free,
+       .domain_set_attr        = intel_iommu_domain_set_attr,
        .attach_dev             = intel_iommu_attach_device,
        .detach_dev             = intel_iommu_detach_device,
        .aux_attach_dev         = intel_iommu_aux_attach_device,
@@ -5981,7 +6188,7 @@ const struct iommu_ops intel_iommu_ops = {
        .get_resv_regions       = intel_iommu_get_resv_regions,
        .put_resv_regions       = generic_iommu_put_resv_regions,
        .apply_resv_region      = intel_iommu_apply_resv_region,
-       .device_group           = pci_device_group,
+       .device_group           = intel_iommu_device_group,
        .dev_has_feat           = intel_iommu_dev_has_feat,
        .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
        .dev_enable_feat        = intel_iommu_dev_enable_feat,
index 040a445be30091f4a7cbf6c2158ff9c69896b399..22b30f10b3964e9d59a9a10d9abc8f35b75e59e8 100644 (file)
  */
 static DEFINE_SPINLOCK(pasid_lock);
 u32 intel_pasid_max_id = PASID_MAX;
-static DEFINE_IDR(pasid_idr);
-
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
-{
-       int ret, min, max;
-
-       min = max_t(int, start, PASID_MIN);
-       max = min_t(int, end, intel_pasid_max_id);
-
-       WARN_ON(in_interrupt());
-       idr_preload(gfp);
-       spin_lock(&pasid_lock);
-       ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
-       spin_unlock(&pasid_lock);
-       idr_preload_end();
-
-       return ret;
-}
-
-void intel_pasid_free_id(int pasid)
-{
-       spin_lock(&pasid_lock);
-       idr_remove(&pasid_idr, pasid);
-       spin_unlock(&pasid_lock);
-}
-
-void *intel_pasid_lookup_id(int pasid)
-{
-       void *p;
-
-       spin_lock(&pasid_lock);
-       p = idr_find(&pasid_idr, pasid);
-       spin_unlock(&pasid_lock);
-
-       return p;
-}
 
 /*
  * Per device pasid table management:
@@ -465,6 +429,21 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
                devtlb_invalidation_with_pasid(iommu, dev, pasid);
 }
 
+static void pasid_flush_caches(struct intel_iommu *iommu,
+                               struct pasid_entry *pte,
+                               int pasid, u16 did)
+{
+       if (!ecap_coherent(iommu->ecap))
+               clflush_cache_range(pte, sizeof(*pte));
+
+       if (cap_caching_mode(iommu->cap)) {
+               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+               iotlb_invalidation_with_pasid(iommu, did, pasid);
+       } else {
+               iommu_flush_write_buffer(iommu);
+       }
+}
+
 /*
  * Set up the scalable mode pasid table entry for first only
  * translation type.
@@ -498,10 +477,15 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
                pasid_set_sre(pte);
        }
 
-#ifdef CONFIG_X86
-       if (cpu_feature_enabled(X86_FEATURE_LA57))
-               pasid_set_flpm(pte, 1);
-#endif /* CONFIG_X86 */
+       if (flags & PASID_FLAG_FL5LP) {
+               if (cap_5lp_support(iommu->cap)) {
+                       pasid_set_flpm(pte, 1);
+               } else {
+                       pr_err("No 5-level paging support for first-level\n");
+                       pasid_clear_entry(pte);
+                       return -EINVAL;
+               }
+       }
 
        pasid_set_domain_id(pte, did);
        pasid_set_address_width(pte, iommu->agaw);
@@ -510,16 +494,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
        /* Setup Present and PASID Granular Transfer Type: */
        pasid_set_translation_type(pte, 1);
        pasid_set_present(pte);
-
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       if (cap_caching_mode(iommu->cap)) {
-               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               iotlb_invalidation_with_pasid(iommu, did, pasid);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
+       pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
@@ -583,16 +558,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
         */
        pasid_set_sre(pte);
        pasid_set_present(pte);
-
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       if (cap_caching_mode(iommu->cap)) {
-               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               iotlb_invalidation_with_pasid(iommu, did, pasid);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
+       pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
@@ -626,16 +592,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
         */
        pasid_set_sre(pte);
        pasid_set_present(pte);
-
-       if (!ecap_coherent(iommu->ecap))
-               clflush_cache_range(pte, sizeof(*pte));
-
-       if (cap_caching_mode(iommu->cap)) {
-               pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-               iotlb_invalidation_with_pasid(iommu, did, pasid);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
+       pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
index fc8cd8f17de166a8c96c82c3e73b3e1c3f1572d0..92de6df24ccb10d136d003a88c79eb001fab2892 100644 (file)
  */
 #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
 
+/*
+ * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+ * level translation, otherwise, 4-level paging will be used.
+ */
+#define PASID_FLAG_FL5LP               BIT(1)
+
 struct pasid_dir_entry {
        u64 val;
 };
index dca88f9fdf29a10b574ed5d123eabf4df7ac7a06..d7f2a53589002e6aee3bdcf43bc8132c20415092 100644 (file)
 #include <linux/dmar.h>
 #include <linux/interrupt.h>
 #include <linux/mm_types.h>
+#include <linux/ioasid.h>
 #include <asm/page.h>
 
 #include "intel-pasid.h"
 
 static irqreturn_t prq_event_thread(int irq, void *d);
 
-int intel_svm_init(struct intel_iommu *iommu)
-{
-       if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
-                       !cap_fl1gp_support(iommu->cap))
-               return -EINVAL;
-
-       if (cpu_feature_enabled(X86_FEATURE_LA57) &&
-                       !cap_5lp_support(iommu->cap))
-               return -EINVAL;
-
-       return 0;
-}
-
 #define PRQ_ORDER 0
 
 int intel_svm_enable_prq(struct intel_iommu *iommu)
@@ -99,6 +87,33 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
        return 0;
 }
 
+static inline bool intel_svm_capable(struct intel_iommu *iommu)
+{
+       return iommu->flags & VTD_FLAG_SVM_CAPABLE;
+}
+
+void intel_svm_check(struct intel_iommu *iommu)
+{
+       if (!pasid_supported(iommu))
+               return;
+
+       if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
+           !cap_fl1gp_support(iommu->cap)) {
+               pr_err("%s SVM disabled, incompatible 1GB page capability\n",
+                      iommu->name);
+               return;
+       }
+
+       if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+           !cap_5lp_support(iommu->cap)) {
+               pr_err("%s SVM disabled, incompatible paging mode\n",
+                      iommu->name);
+               return;
+       }
+
+       iommu->flags |= VTD_FLAG_SVM_CAPABLE;
+}
+
 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
                                unsigned long address, unsigned long pages, int ih)
 {
@@ -207,6 +222,10 @@ static const struct mmu_notifier_ops intel_mmuops = {
 static DEFINE_MUTEX(pasid_mutex);
 static LIST_HEAD(global_svm_list);
 
+#define for_each_svm_dev(sdev, svm, d)                 \
+       list_for_each_entry((sdev), &(svm)->devs, list) \
+               if ((d) != (sdev)->dev) {} else
+
 int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
 {
        struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
@@ -220,6 +239,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
        if (!iommu || dmar_disabled)
                return -EINVAL;
 
+       if (!intel_svm_capable(iommu))
+               return -ENOTSUPP;
+
        if (dev_is_pci(dev)) {
                pasid_max = pci_max_pasids(to_pci_dev(dev));
                if (pasid_max < 0)
@@ -252,15 +274,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                                goto out;
                        }
 
-                       list_for_each_entry(sdev, &svm->devs, list) {
-                               if (dev == sdev->dev) {
-                                       if (sdev->ops != ops) {
-                                               ret = -EBUSY;
-                                               goto out;
-                                       }
-                                       sdev->users++;
-                                       goto success;
+                       /* Find the matching device in svm list */
+                       for_each_svm_dev(sdev, svm, dev) {
+                               if (sdev->ops != ops) {
+                                       ret = -EBUSY;
+                                       goto out;
                                }
+                               sdev->users++;
+                               goto success;
                        }
 
                        break;
@@ -314,16 +335,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                if (pasid_max > intel_pasid_max_id)
                        pasid_max = intel_pasid_max_id;
 
-               /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
-               ret = intel_pasid_alloc_id(svm,
-                                          !!cap_caching_mode(iommu->cap),
-                                          pasid_max - 1, GFP_KERNEL);
-               if (ret < 0) {
+               /* Do not use PASID 0, reserved for RID to PASID */
+               svm->pasid = ioasid_alloc(NULL, PASID_MIN,
+                                         pasid_max - 1, svm);
+               if (svm->pasid == INVALID_IOASID) {
                        kfree(svm);
                        kfree(sdev);
+                       ret = -ENOSPC;
                        goto out;
                }
-               svm->pasid = ret;
                svm->notifier.ops = &intel_mmuops;
                svm->mm = mm;
                svm->flags = flags;
@@ -333,7 +353,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                if (mm) {
                        ret = mmu_notifier_register(&svm->notifier, mm);
                        if (ret) {
-                               intel_pasid_free_id(svm->pasid);
+                               ioasid_free(svm->pasid);
                                kfree(svm);
                                kfree(sdev);
                                goto out;
@@ -344,12 +364,14 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                ret = intel_pasid_setup_first_level(iommu, dev,
                                mm ? mm->pgd : init_mm.pgd,
                                svm->pasid, FLPT_DEFAULT_DID,
-                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+                               (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+                               (cpu_feature_enabled(X86_FEATURE_LA57) ?
+                                PASID_FLAG_FL5LP : 0));
                spin_unlock(&iommu->lock);
                if (ret) {
                        if (mm)
                                mmu_notifier_unregister(&svm->notifier, mm);
-                       intel_pasid_free_id(svm->pasid);
+                       ioasid_free(svm->pasid);
                        kfree(svm);
                        kfree(sdev);
                        goto out;
@@ -365,7 +387,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                ret = intel_pasid_setup_first_level(iommu, dev,
                                                mm ? mm->pgd : init_mm.pgd,
                                                svm->pasid, FLPT_DEFAULT_DID,
-                                               mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+                                               (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
+                                               (cpu_feature_enabled(X86_FEATURE_LA57) ?
+                                               PASID_FLAG_FL5LP : 0));
                spin_unlock(&iommu->lock);
                if (ret) {
                        kfree(sdev);
@@ -397,44 +421,45 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
        if (!iommu)
                goto out;
 
-       svm = intel_pasid_lookup_id(pasid);
+       svm = ioasid_find(NULL, pasid, NULL);
        if (!svm)
                goto out;
 
-       list_for_each_entry(sdev, &svm->devs, list) {
-               if (dev == sdev->dev) {
-                       ret = 0;
-                       sdev->users--;
-                       if (!sdev->users) {
-                               list_del_rcu(&sdev->list);
-                               /* Flush the PASID cache and IOTLB for this device.
-                                * Note that we do depend on the hardware *not* using
-                                * the PASID any more. Just as we depend on other
-                                * devices never using PASIDs that they have no right
-                                * to use. We have a *shared* PASID table, because it's
-                                * large and has to be physically contiguous. So it's
-                                * hard to be as defensive as we might like. */
-                               intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
-                               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
-                               kfree_rcu(sdev, rcu);
-
-                               if (list_empty(&svm->devs)) {
-                                       intel_pasid_free_id(svm->pasid);
-                                       if (svm->mm)
-                                               mmu_notifier_unregister(&svm->notifier, svm->mm);
-
-                                       list_del(&svm->list);
-
-                                       /* We mandate that no page faults may be outstanding
-                                        * for the PASID when intel_svm_unbind_mm() is called.
-                                        * If that is not obeyed, subtle errors will happen.
-                                        * Let's make them less subtle... */
-                                       memset(svm, 0x6b, sizeof(*svm));
-                                       kfree(svm);
-                               }
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
+
+       for_each_svm_dev(sdev, svm, dev) {
+               ret = 0;
+               sdev->users--;
+               if (!sdev->users) {
+                       list_del_rcu(&sdev->list);
+                       /* Flush the PASID cache and IOTLB for this device.
+                        * Note that we do depend on the hardware *not* using
+                        * the PASID any more. Just as we depend on other
+                        * devices never using PASIDs that they have no right
+                        * to use. We have a *shared* PASID table, because it's
+                        * large and has to be physically contiguous. So it's
+                        * hard to be as defensive as we might like. */
+                       intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+                       intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+                       kfree_rcu(sdev, rcu);
+
+                       if (list_empty(&svm->devs)) {
+                               ioasid_free(svm->pasid);
+                               if (svm->mm)
+                                       mmu_notifier_unregister(&svm->notifier, svm->mm);
+                               list_del(&svm->list);
+                               /* We mandate that no page faults may be outstanding
+                                * for the PASID when intel_svm_unbind_mm() is called.
+                                * If that is not obeyed, subtle errors will happen.
+                                * Let's make them less subtle... */
+                               memset(svm, 0x6b, sizeof(*svm));
+                               kfree(svm);
                        }
-                       break;
                }
+               break;
        }
  out:
        mutex_unlock(&pasid_mutex);
@@ -454,10 +479,14 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
        if (!iommu)
                goto out;
 
-       svm = intel_pasid_lookup_id(pasid);
+       svm = ioasid_find(NULL, pasid, NULL);
        if (!svm)
                goto out;
 
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
        /* init_mm is used in this case */
        if (!svm->mm)
                ret = 1;
@@ -564,13 +593,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
 
                if (!svm || svm->pasid != req->pasid) {
                        rcu_read_lock();
-                       svm = intel_pasid_lookup_id(req->pasid);
+                       svm = ioasid_find(NULL, req->pasid, NULL);
                        /* It *can't* go away, because the driver is not permitted
                         * to unbind the mm while any page faults are outstanding.
                         * So we only need RCU to protect the internal idr code. */
                        rcu_read_unlock();
-
-                       if (!svm) {
+                       if (IS_ERR_OR_NULL(svm)) {
                                pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
                                       iommu->name, req->pasid, ((unsigned long long *)req)[0],
                                       ((unsigned long long *)req)[1]);
@@ -654,11 +682,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        if (req->priv_data_present)
                                memcpy(&resp.qw2, req->priv_data,
                                       sizeof(req->priv_data));
+                       resp.qw2 = 0;
+                       resp.qw3 = 0;
+                       qi_submit_sync(&resp, iommu);
                }
-               resp.qw2 = 0;
-               resp.qw3 = 0;
-               qi_submit_sync(&resp, iommu);
-
                head = (head + sizeof(*req)) & PRQ_RING_MASK;
        }
 
index 7c3bd2c3cdca9c08fd77f1e8ad48e99080e4f178..4272fe4e17f4c9b322be38580c750594503b40d0 100644 (file)
 #define ARM_V7S_TTBR_IRGN_ATTR(attr)                                   \
        ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
 
-#define ARM_V7S_TCR_PD1                        BIT(5)
-
 #ifdef CONFIG_ZONE_DMA32
 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
@@ -798,8 +796,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
         */
        cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
 
-       /* TCR: T0SZ=0, disable TTBR1 */
-       cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
+       /* TCR: T0SZ=0, EAE=0 (if applicable) */
+       cfg->arm_v7s_cfg.tcr = 0;
 
        /*
         * TEX remap: the indices used map to the closest equivalent types
@@ -822,15 +820,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
        /* Ensure the empty pgd is visible before any actual TTBR write */
        wmb();
 
-       /* TTBRs */
-       cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
-                                  ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
-                                  (cfg->coherent_walk ?
-                                  (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
-                                   ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
-                                  (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
-                                   ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
-       cfg->arm_v7s_cfg.ttbr[1] = 0;
+       /* TTBR */
+       cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
+                               (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+                                ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+                                ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+                               (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+                                ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
        return &data->iop;
 
 out_free_data:
index bdf47f74526879d1316f640e2308a0a3871931f1..983b08477e645902a6e477fa9adc3226b9143e13 100644 (file)
 #define ARM_LPAE_PTE_MEMATTR_DEV       (((arm_lpae_iopte)0x1) << 2)
 
 /* Register bits */
-#define ARM_32_LPAE_TCR_EAE            (1 << 31)
-#define ARM_64_LPAE_S2_TCR_RES1                (1 << 31)
+#define ARM_LPAE_TCR_TG0_4K            0
+#define ARM_LPAE_TCR_TG0_64K           1
+#define ARM_LPAE_TCR_TG0_16K           2
 
-#define ARM_LPAE_TCR_EPD1              (1 << 23)
+#define ARM_LPAE_TCR_TG1_16K           1
+#define ARM_LPAE_TCR_TG1_4K            2
+#define ARM_LPAE_TCR_TG1_64K           3
 
-#define ARM_LPAE_TCR_TG0_4K            (0 << 14)
-#define ARM_LPAE_TCR_TG0_64K           (1 << 14)
-#define ARM_LPAE_TCR_TG0_16K           (2 << 14)
-
-#define ARM_LPAE_TCR_SH0_SHIFT         12
-#define ARM_LPAE_TCR_SH0_MASK          0x3
 #define ARM_LPAE_TCR_SH_NS             0
 #define ARM_LPAE_TCR_SH_OS             2
 #define ARM_LPAE_TCR_SH_IS             3
 
-#define ARM_LPAE_TCR_ORGN0_SHIFT       10
-#define ARM_LPAE_TCR_IRGN0_SHIFT       8
-#define ARM_LPAE_TCR_RGN_MASK          0x3
 #define ARM_LPAE_TCR_RGN_NC            0
 #define ARM_LPAE_TCR_RGN_WBWA          1
 #define ARM_LPAE_TCR_RGN_WT            2
 #define ARM_LPAE_TCR_RGN_WB            3
 
-#define ARM_LPAE_TCR_SL0_SHIFT         6
-#define ARM_LPAE_TCR_SL0_MASK          0x3
+#define ARM_LPAE_VTCR_SL0_MASK         0x3
 
 #define ARM_LPAE_TCR_T0SZ_SHIFT                0
-#define ARM_LPAE_TCR_SZ_MASK           0xf
-
-#define ARM_LPAE_TCR_PS_SHIFT          16
-#define ARM_LPAE_TCR_PS_MASK           0x7
 
-#define ARM_LPAE_TCR_IPS_SHIFT         32
-#define ARM_LPAE_TCR_IPS_MASK          0x7
+#define ARM_LPAE_VTCR_PS_SHIFT         16
+#define ARM_LPAE_VTCR_PS_MASK          0x7
 
 #define ARM_LPAE_TCR_PS_32_BIT         0x0ULL
 #define ARM_LPAE_TCR_PS_36_BIT         0x1ULL
@@ -293,17 +282,11 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 {
        arm_lpae_iopte pte = prot;
 
-       if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
-               pte |= ARM_LPAE_PTE_NS;
-
        if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
                pte |= ARM_LPAE_PTE_TYPE_PAGE;
        else
                pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 
-       if (data->iop.fmt != ARM_MALI_LPAE)
-               pte |= ARM_LPAE_PTE_AF;
-       pte |= ARM_LPAE_PTE_SH_IS;
        pte |= paddr_to_iopte(paddr, data);
 
        __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
@@ -460,9 +443,20 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
        }
 
+       if (prot & IOMMU_CACHE)
+               pte |= ARM_LPAE_PTE_SH_IS;
+       else
+               pte |= ARM_LPAE_PTE_SH_OS;
+
        if (prot & IOMMU_NOEXEC)
                pte |= ARM_LPAE_PTE_XN;
 
+       if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+               pte |= ARM_LPAE_PTE_NS;
+
+       if (data->iop.fmt != ARM_MALI_LPAE)
+               pte |= ARM_LPAE_PTE_AF;
+
        return pte;
 }
 
@@ -474,6 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        arm_lpae_iopte *ptep = data->pgd;
        int ret, lvl = data->start_level;
        arm_lpae_iopte prot;
+       long iaext = (long)iova >> cfg->ias;
 
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -482,7 +477,9 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return -EINVAL;
 
-       if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+               iaext = ~iaext;
+       if (WARN_ON(iaext || paddr >> cfg->oas))
                return -ERANGE;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -648,11 +645,14 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
+       long iaext = (long)iova >> cfg->ias;
 
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return 0;
 
-       if (WARN_ON(iova >> data->iop.cfg.ias))
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+               iaext = ~iaext;
+       if (WARN_ON(iaext))
                return 0;
 
        return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
@@ -787,9 +787,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
        u64 reg;
        struct arm_lpae_io_pgtable *data;
+       typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
+       bool tg1;
 
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
-                           IO_PGTABLE_QUIRK_NON_STRICT))
+                           IO_PGTABLE_QUIRK_NON_STRICT |
+                           IO_PGTABLE_QUIRK_ARM_TTBR1))
                return NULL;
 
        data = arm_lpae_alloc_pgtable(cfg);
@@ -798,58 +801,55 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 
        /* TCR */
        if (cfg->coherent_walk) {
-               reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+               tcr->sh = ARM_LPAE_TCR_SH_IS;
+               tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+               tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
        } else {
-               reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
-                     (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+               tcr->sh = ARM_LPAE_TCR_SH_OS;
+               tcr->irgn = ARM_LPAE_TCR_RGN_NC;
+               tcr->orgn = ARM_LPAE_TCR_RGN_NC;
        }
 
+       tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
        switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
-               reg |= ARM_LPAE_TCR_TG0_4K;
+               tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
                break;
        case SZ_16K:
-               reg |= ARM_LPAE_TCR_TG0_16K;
+               tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
                break;
        case SZ_64K:
-               reg |= ARM_LPAE_TCR_TG0_64K;
+               tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
                break;
        }
 
        switch (cfg->oas) {
        case 32:
-               reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
                break;
        case 36:
-               reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
                break;
        case 40:
-               reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
                break;
        case 42:
-               reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
                break;
        case 44:
-               reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
                break;
        case 48:
-               reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
                break;
        case 52:
-               reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+               tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
                break;
        default:
                goto out_free_data;
        }
 
-       reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-
-       /* Disable speculative walks through TTBR1 */
-       reg |= ARM_LPAE_TCR_EPD1;
-       cfg->arm_lpae_s1_cfg.tcr = reg;
+       tcr->tsz = 64ULL - cfg->ias;
 
        /* MAIRs */
        reg = (ARM_LPAE_MAIR_ATTR_NC
@@ -872,9 +872,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
        /* Ensure the empty pgd is visible before any actual TTBR write */
        wmb();
 
-       /* TTBRs */
-       cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
-       cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+       /* TTBR */
+       cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
        return &data->iop;
 
 out_free_data:
@@ -885,8 +884,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 static struct io_pgtable *
 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       u64 reg, sl;
+       u64 sl;
        struct arm_lpae_io_pgtable *data;
+       typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
 
        /* The NS quirk doesn't apply at stage 2 */
        if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
@@ -911,55 +911,59 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
        }
 
        /* VTCR */
-       reg = ARM_64_LPAE_S2_TCR_RES1 |
-            (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-            (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-            (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+       if (cfg->coherent_walk) {
+               vtcr->sh = ARM_LPAE_TCR_SH_IS;
+               vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
+               vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
+       } else {
+               vtcr->sh = ARM_LPAE_TCR_SH_OS;
+               vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
+               vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
+       }
 
        sl = data->start_level;
 
        switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
-               reg |= ARM_LPAE_TCR_TG0_4K;
+               vtcr->tg = ARM_LPAE_TCR_TG0_4K;
                sl++; /* SL0 format is different for 4K granule size */
                break;
        case SZ_16K:
-               reg |= ARM_LPAE_TCR_TG0_16K;
+               vtcr->tg = ARM_LPAE_TCR_TG0_16K;
                break;
        case SZ_64K:
-               reg |= ARM_LPAE_TCR_TG0_64K;
+               vtcr->tg = ARM_LPAE_TCR_TG0_64K;
                break;
        }
 
        switch (cfg->oas) {
        case 32:
-               reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
                break;
        case 36:
-               reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
                break;
        case 40:
-               reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
                break;
        case 42:
-               reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
                break;
        case 44:
-               reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
                break;
        case 48:
-               reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
                break;
        case 52:
-               reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
+               vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
                break;
        default:
                goto out_free_data;
        }
 
-       reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
-       reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
-       cfg->arm_lpae_s2_cfg.vtcr = reg;
+       vtcr->tsz = 64ULL - cfg->ias;
+       vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
 
        /* Allocate pgd pages */
        data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
@@ -982,35 +986,21 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 static struct io_pgtable *
 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       struct io_pgtable *iop;
-
        if (cfg->ias > 32 || cfg->oas > 40)
                return NULL;
 
        cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-       iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
-       if (iop) {
-               cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
-               cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
-       }
-
-       return iop;
+       return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
 }
 
 static struct io_pgtable *
 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       struct io_pgtable *iop;
-
        if (cfg->ias > 40 || cfg->oas > 40)
                return NULL;
 
        cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-       iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
-       if (iop)
-               cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
-
-       return iop;
+       return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
 }
 
 static struct io_pgtable *
index ced53e5b72b56d02b828c2f79a25ee2ec202cc89..94394c81468f716e228157bef7878d8d40f3a6ff 100644 (file)
@@ -63,7 +63,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
        if (!ops)
                return;
 
-       iop = container_of(ops, struct io_pgtable, ops);
+       iop = io_pgtable_ops_to_pgtable(ops);
        io_pgtable_tlb_flush_all(iop);
        io_pgtable_init_table[iop->fmt]->free(iop);
 }
index e436ff813e7e5bcced5225a11a6a25baa2bb5814..99869217fbec7d862af4bba0e0a6facbe34a7168 100644 (file)
@@ -87,6 +87,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
        put_device(iommu->dev);
        return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_add);
 
 void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
@@ -94,6 +95,8 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu)
        device_unregister(iommu->dev);
        iommu->dev = NULL;
 }
+EXPORT_SYMBOL_GPL(iommu_device_sysfs_remove);
+
 /*
  * IOMMU drivers can indicate a device is managed by a given IOMMU using
  * this interface.  A link to the device will be created in the "devices"
@@ -119,6 +122,7 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_device_link);
 
 void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
 {
@@ -128,3 +132,4 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
        sysfs_remove_link(&link->kobj, "iommu");
        sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
 }
+EXPORT_SYMBOL_GPL(iommu_device_unlink);
index 101f2d68eb6ea735ad7be5f251297f3de33d999b..3e3528436e0b220b8714470675bf9b1eefb673bb 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/property.h>
 #include <linux/fsl/mc.h>
+#include <linux/module.h>
 #include <trace/events/iommu.h>
 
 static struct kset *iommu_group_kset;
@@ -141,6 +142,7 @@ int iommu_device_register(struct iommu_device *iommu)
        spin_unlock(&iommu_device_lock);
        return 0;
 }
+EXPORT_SYMBOL_GPL(iommu_device_register);
 
 void iommu_device_unregister(struct iommu_device *iommu)
 {
@@ -148,6 +150,7 @@ void iommu_device_unregister(struct iommu_device *iommu)
        list_del(&iommu->list);
        spin_unlock(&iommu_device_lock);
 }
+EXPORT_SYMBOL_GPL(iommu_device_unregister);
 
 static struct iommu_param *iommu_get_dev_param(struct device *dev)
 {
@@ -183,10 +186,21 @@ int iommu_probe_device(struct device *dev)
        if (!iommu_get_dev_param(dev))
                return -ENOMEM;
 
+       if (!try_module_get(ops->owner)) {
+               ret = -EINVAL;
+               goto err_free_dev_param;
+       }
+
        ret = ops->add_device(dev);
        if (ret)
-               iommu_free_dev_param(dev);
+               goto err_module_put;
 
+       return 0;
+
+err_module_put:
+       module_put(ops->owner);
+err_free_dev_param:
+       iommu_free_dev_param(dev);
        return ret;
 }
 
@@ -197,7 +211,10 @@ void iommu_release_device(struct device *dev)
        if (dev->iommu_group)
                ops->remove_device(dev);
 
-       iommu_free_dev_param(dev);
+       if (dev->iommu_param) {
+               module_put(ops->owner);
+               iommu_free_dev_param(dev);
+       }
 }
 
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -751,6 +768,7 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
        mutex_unlock(&group->mutex);
        dev->iommu_group = NULL;
        kobject_put(group->devices_kobj);
+       sysfs_remove_link(group->devices_kobj, device->name);
 err_free_name:
        kfree(device->name);
 err_remove_link:
@@ -886,6 +904,7 @@ struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
        kobject_get(group->devices_kobj);
        return group;
 }
+EXPORT_SYMBOL_GPL(iommu_group_ref_get);
 
 /**
  * iommu_group_put - Decrement group reference
@@ -1259,6 +1278,7 @@ struct iommu_group *generic_device_group(struct device *dev)
 {
        return iommu_group_alloc();
 }
+EXPORT_SYMBOL_GPL(generic_device_group);
 
 /*
  * Use standard PCI bus topology, isolation features, and DMA alias quirks
@@ -1326,6 +1346,7 @@ struct iommu_group *pci_device_group(struct device *dev)
        /* No shared group found, allocate new */
        return iommu_group_alloc();
 }
+EXPORT_SYMBOL_GPL(pci_device_group);
 
 /* Get the IOMMU group for device on fsl-mc bus */
 struct iommu_group *fsl_mc_device_group(struct device *dev)
@@ -1338,6 +1359,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
                group = iommu_group_alloc();
        return group;
 }
+EXPORT_SYMBOL_GPL(fsl_mc_device_group);
 
 /**
  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
@@ -1406,6 +1428,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
        return group;
 }
+EXPORT_SYMBOL(iommu_group_get_for_dev);
 
 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
 {
@@ -1536,6 +1559,11 @@ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
 {
        int err;
 
+       if (ops == NULL) {
+               bus->iommu_ops = NULL;
+               return 0;
+       }
+
        if (bus->iommu_ops != NULL)
                return -EBUSY;
 
@@ -2265,6 +2293,7 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
        region->type = type;
        return region;
 }
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
 
 static int
 request_default_domain_for_dev(struct device *dev, unsigned long type)
index d02edd2751f32077bdb1ebed449c5958b8abcd19..ecb3f9464dd5c894583fe88ef84fb0d1093511fd 100644 (file)
@@ -374,7 +374,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
        u32 tmp;
 
        /* TTBR0 */
-       ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
+       ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
        ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
        ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
 
index 93f14bca26ee0292ce51578236c59131e58bfc85..94a6df1bddd60b8b1607555ae4fa6e77a7ac7231 100644 (file)
@@ -279,8 +279,8 @@ static void __program_context(void __iomem *base, int ctx,
        SET_V2PCFG(base, ctx, 0x3);
 
        SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
-       SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
-       SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
+       SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
+       SET_TTBR1(base, ctx, 0);
 
        /* Set prrr and nmrr */
        SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
index 6fc1f5ecf91e5109e62146ff4be90a070099166c..95945f467c03f949bc07abc801152e181f2ecfce 100644 (file)
@@ -367,7 +367,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
        /* Update the pgtable base address register of the M4U HW */
        if (!data->m4u_dom) {
                data->m4u_dom = dom;
-               writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+               writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
                       data->base + REG_MMU_PT_BASE_ADDR);
        }
 
@@ -765,7 +765,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
        writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
        writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
        if (m4u_dom)
-               writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
+               writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
                       base + REG_MMU_PT_BASE_ADDR);
        return 0;
 }
index 026ad2b29dcd549f0b6e8681b96a2ff89dcfe935..20738aacac89e29f23c28370b75cfce30374fabd 100644 (file)
@@ -8,11 +8,12 @@
 #include <linux/export.h>
 #include <linux/iommu.h>
 #include <linux/limits.h>
-#include <linux/pci.h>
+#include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_iommu.h>
 #include <linux/of_pci.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/fsl/mc.h>
 
@@ -91,16 +92,16 @@ static int of_iommu_xlate(struct device *dev,
 {
        const struct iommu_ops *ops;
        struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
-       int err;
+       int ret;
 
        ops = iommu_ops_from_fwnode(fwnode);
        if ((ops && !ops->of_xlate) ||
            !of_device_is_available(iommu_spec->np))
                return NO_IOMMU;
 
-       err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
-       if (err)
-               return err;
+       ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+       if (ret)
+               return ret;
        /*
         * The otherwise-empty fwspec handily serves to indicate the specific
         * IOMMU device we're waiting for, which will be useful if we ever get
@@ -109,7 +110,12 @@ static int of_iommu_xlate(struct device *dev,
        if (!ops)
                return driver_deferred_probe_check_state(dev);
 
-       return ops->of_xlate(dev, iommu_spec);
+       if (!try_module_get(ops->owner))
+               return -ENODEV;
+
+       ret = ops->of_xlate(dev, iommu_spec);
+       module_put(ops->owner);
+       return ret;
 }
 
 struct of_pci_iommu_alias_info {
@@ -179,6 +185,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                        .np = master_np,
                };
 
+               pci_request_acs();
                err = pci_for_each_dma_alias(to_pci_dev(dev),
                                             of_pci_iommu_init, &info);
        } else if (dev_is_fsl_mc(dev)) {
@@ -196,8 +203,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                        if (err)
                                break;
                }
-       }
 
+               fwspec = dev_iommu_fwspec_get(dev);
+               if (!err && fwspec)
+                       of_property_read_u32(master_np, "pasid-num-bits",
+                                            &fwspec->num_pasid_bits);
+       }
 
        /*
         * Two success conditions can be represented by non-negative err here:
index 52f38292df5b634e6c25adf56649b90eef76610e..39759db4f0038c0ec7dd49928b75e6d89e5cae86 100644 (file)
@@ -201,7 +201,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
 
        fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
 
-       if (!(fsr & FSR_FAULT))
+       if (!(fsr & ARM_SMMU_FSR_FAULT))
                return IRQ_NONE;
 
        fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
@@ -215,7 +215,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
        }
 
        iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
-       iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
+       iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
 
        return IRQ_HANDLED;
 }
@@ -269,18 +269,15 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
 
                /* TTBRs */
                iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
-                               pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
-                               FIELD_PREP(TTBRn_ASID, ctx->asid));
-               iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
-                               pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
-                               FIELD_PREP(TTBRn_ASID, ctx->asid));
+                               pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
+                               FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
+               iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
 
                /* TCR */
                iommu_writel(ctx, ARM_SMMU_CB_TCR2,
-                               (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
-                               FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+                               arm_smmu_lpae_tcr2(&pgtbl_cfg));
                iommu_writel(ctx, ARM_SMMU_CB_TCR,
-                               pgtbl_cfg.arm_lpae_s1_cfg.tcr);
+                            arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
 
                /* MAIRs (stage-1 only) */
                iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
@@ -289,11 +286,13 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
                                pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
 
                /* SCTLR */
-               reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
-                       SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
+               reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
+                     ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
+                     ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
+                     ARM_SMMU_SCTLR_CFCFG;
 
                if (IS_ENABLED(CONFIG_BIG_ENDIAN))
-                       reg |= SCTLR_E;
+                       reg |= ARM_SMMU_SCTLR_E;
 
                iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
 
index 01d18b39069ebee06224a451a5c1d5bee00533cf..c5589ee0dfb3f3b7613083ec4e88161632145621 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/delay.h>
 
 #include <asm/io.h>
-#include <asm/mach-jz4740/irq.h>
 
 struct ingenic_intc_data {
        void __iomem *base;
@@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data)
                while (pending) {
                        int bit = __fls(pending);
 
-                       irq = irq_find_mapping(domain, bit + (i * 32));
+                       irq = irq_linear_revmap(domain, bit + (i * 32));
                        generic_handle_irq(irq);
                        pending &= ~BIT(bit);
                }
@@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
                goto out_unmap_irq;
        }
 
-       domain = irq_domain_add_legacy(node, num_chips * 32,
-                                      JZ4740_IRQ_BASE, 0,
+       domain = irq_domain_add_linear(node, num_chips * 32,
                                       &irq_generic_chip_ops, NULL);
        if (!domain) {
                err = -ENOMEM;
index 8df547d2d935309c2bdf5f60d018e0b64dc686ac..0aca5807a119ba2d0bda2ed2cade2d4de985159b 100644 (file)
@@ -256,7 +256,7 @@ static int __init plic_init(struct device_node *node,
                 * Skip contexts other than external interrupts for our
                 * privilege level.
                 */
-               if (parent.args[0] != IRQ_EXT)
+               if (parent.args[0] != RV_IRQ_EXT)
                        continue;
 
                hartid = plic_find_hart_id(parent.np);
index 3c50c4e4da8f39ee56703f9e0abc2461f8b96659..963d3774c93e287432966b658088914b67f10da3 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/dm-bufio.h>
 
 #define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32       /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U      /* 16KB */
 
 #define DM_PREFETCH_CHUNKS             12
 
index b7c20979bd19a68205dbbdfe5fd05abf5e3ef6c3..322386ff5d225dfb463c68627a885252f76e42f4 100644 (file)
@@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        char b[BDEVNAME_SIZE];
        char b2[BDEVNAME_SIZE];
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
-       unsigned short blksize = 512;
+       unsigned blksize = 512;
 
        *private_conf = ERR_PTR(-ENOMEM);
        if (!conf)
index 9340435a94a095d77bddcb12b6331940878d1c8e..6c95dc471d4c6c63d53f5fdab61acd150bf15a40 100644 (file)
@@ -380,7 +380,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
        } else {
                list_del_init(&data->list);
                if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
-                       data->adap->transmit_queue_sz--;
+                       if (!WARN_ON(!data->adap->transmit_queue_sz))
+                               data->adap->transmit_queue_sz--;
        }
 
        if (data->msg.tx_status & CEC_TX_STATUS_OK) {
@@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap)
                 * need to do anything special in that case.
                 */
        }
+       /*
+        * If something went wrong and this counter isn't what it should
+        * be, then this will reset it back to 0. Warn if it is not 0,
+        * since it indicates a bug, either in this framework or in a
+        * CEC driver.
+        */
+       if (WARN_ON(adap->transmit_queue_sz))
+               adap->transmit_queue_sz = 0;
 }
 
 /*
@@ -456,7 +465,7 @@ int cec_thread_func(void *_adap)
                bool timeout = false;
                u8 attempts;
 
-               if (adap->transmitting) {
+               if (adap->transmit_in_progress) {
                        int err;
 
                        /*
@@ -491,7 +500,7 @@ int cec_thread_func(void *_adap)
                        goto unlock;
                }
 
-               if (adap->transmitting && timeout) {
+               if (adap->transmit_in_progress && timeout) {
                        /*
                         * If we timeout, then log that. Normally this does
                         * not happen and it is an indication of a faulty CEC
@@ -500,14 +509,18 @@ int cec_thread_func(void *_adap)
                         * so much traffic on the bus that the adapter was
                         * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
                         */
-                       pr_warn("cec-%s: message %*ph timed out\n", adap->name,
-                               adap->transmitting->msg.len,
-                               adap->transmitting->msg.msg);
+                       if (adap->transmitting) {
+                               pr_warn("cec-%s: message %*ph timed out\n", adap->name,
+                                       adap->transmitting->msg.len,
+                                       adap->transmitting->msg.msg);
+                               /* Just give up on this. */
+                               cec_data_cancel(adap->transmitting,
+                                               CEC_TX_STATUS_TIMEOUT);
+                       } else {
+                               pr_warn("cec-%s: transmit timed out\n", adap->name);
+                       }
                        adap->transmit_in_progress = false;
                        adap->tx_timeouts++;
-                       /* Just give up on this. */
-                       cec_data_cancel(adap->transmitting,
-                                       CEC_TX_STATUS_TIMEOUT);
                        goto unlock;
                }
 
@@ -522,7 +535,8 @@ int cec_thread_func(void *_adap)
                data = list_first_entry(&adap->transmit_queue,
                                        struct cec_data, list);
                list_del_init(&data->list);
-               adap->transmit_queue_sz--;
+               if (!WARN_ON(!data->adap->transmit_queue_sz))
+                       adap->transmit_queue_sz--;
 
                /* Make this the current transmitting message */
                adap->transmitting = data;
@@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
                        valid_la = false;
                else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
                        valid_la = false;
-               else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+               else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
                        valid_la = false;
                else if (cec_msg_is_broadcast(msg) &&
-                        adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
-                        !(dir_fl & BCAST2_0))
+                        adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
+                        !(dir_fl & BCAST1_4))
                        valid_la = false;
        }
        if (valid_la && min_len) {
index ac88ade94cdab449ebbd257964426da519899a55..59609556d9692b6e1e97746a8a328bdf8fb4fe89 100644 (file)
@@ -116,6 +116,7 @@ struct pulse8 {
        unsigned int vers;
        struct completion cmd_done;
        struct work_struct work;
+       u8 work_result;
        struct delayed_work ping_eeprom_work;
        struct cec_msg rx_msg;
        u8 data[DATA_SIZE];
@@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
 {
        struct pulse8 *pulse8 =
                container_of(work, struct pulse8, work);
+       u8 result = pulse8->work_result;
 
-       switch (pulse8->data[0] & 0x3f) {
+       pulse8->work_result = 0;
+       switch (result & 0x3f) {
        case MSGCODE_FRAME_DATA:
                cec_received_msg(pulse8->adap, &pulse8->rx_msg);
                break;
@@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
                pulse8->escape = false;
        } else if (data == MSGEND) {
                struct cec_msg *msg = &pulse8->rx_msg;
+               u8 msgcode = pulse8->buf[0];
 
                if (debug)
                        dev_info(pulse8->dev, "received: %*ph\n",
                                 pulse8->idx, pulse8->buf);
-               pulse8->data[0] = pulse8->buf[0];
-               switch (pulse8->buf[0] & 0x3f) {
+               switch (msgcode & 0x3f) {
                case MSGCODE_FRAME_START:
                        msg->len = 1;
                        msg->msg[0] = pulse8->buf[1];
@@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
                        if (msg->len == CEC_MAX_MSG_SIZE)
                                break;
                        msg->msg[msg->len++] = pulse8->buf[1];
-                       if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
+                       if (msgcode & MSGCODE_FRAME_EOM) {
+                               WARN_ON(pulse8->work_result);
+                               pulse8->work_result = msgcode;
                                schedule_work(&pulse8->work);
+                               break;
+                       }
                        break;
                case MSGCODE_TRANSMIT_SUCCEEDED:
                case MSGCODE_TRANSMIT_FAILED_LINE:
                case MSGCODE_TRANSMIT_FAILED_ACK:
                case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
                case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+                       WARN_ON(pulse8->work_result);
+                       pulse8->work_result = msgcode;
                        schedule_work(&pulse8->work);
                        break;
                case MSGCODE_HIGH_ERROR:
index f9ac224130008026cc317f8d6ea26272dd7bf10c..1074b882c57c87d2da4565fbf73494a8933ebe04 100644 (file)
@@ -100,19 +100,19 @@ struct buflist {
  * Function prototypes. Called from OS entry point mptctl_ioctl.
  * arg contents specific to function.
  */
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
 
 static int  mptctl_probe(struct pci_dev *, const struct pci_device_id *);
 static void mptctl_remove(struct pci_dev *);
@@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
 /*
  * Private function calls.
  */
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
 static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
                struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
 static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
@@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
         * by TM and FW reloads.
         */
        if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
-               return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+               return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
        } else if (cmd == MPTTARGETINFO) {
-               return mptctl_gettargetinfo(arg);
+               return mptctl_gettargetinfo(iocp, arg);
        } else if (cmd == MPTTEST) {
-               return mptctl_readtest(arg);
+               return mptctl_readtest(iocp, arg);
        } else if (cmd == MPTEVENTQUERY) {
-               return mptctl_eventquery(arg);
+               return mptctl_eventquery(iocp, arg);
        } else if (cmd == MPTEVENTENABLE) {
-               return mptctl_eventenable(arg);
+               return mptctl_eventenable(iocp, arg);
        } else if (cmd == MPTEVENTREPORT) {
-               return mptctl_eventreport(arg);
+               return mptctl_eventreport(iocp, arg);
        } else if (cmd == MPTFWREPLACE) {
-               return mptctl_replace_fw(arg);
+               return mptctl_replace_fw(iocp, arg);
        }
 
        /* All of these commands require an interrupt or
@@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return ret;
 
        if (cmd == MPTFWDOWNLOAD)
-               ret = mptctl_fw_download(arg);
+               ret = mptctl_fw_download(iocp, arg);
        else if (cmd == MPTCOMMAND)
-               ret = mptctl_mpt_command(arg);
+               ret = mptctl_mpt_command(iocp, arg);
        else if (cmd == MPTHARDRESET)
-               ret = mptctl_do_reset(arg);
+               ret = mptctl_do_reset(iocp, arg);
        else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
-               ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+               ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
        else if (cmd == HP_GETTARGETINFO)
-               ret = mptctl_hp_targetinfo(arg);
+               ret = mptctl_hp_targetinfo(iocp, arg);
        else
                ret = -EINVAL;
 
@@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return ret;
 }
 
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
 {
        struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
        struct mpt_ioctl_diag_reset krinfo;
-       MPT_ADAPTER             *iocp;
 
        if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
@@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg)
                return -EFAULT;
        }
 
-       if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
-               printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
-                               __FILE__, __LINE__, krinfo.hdr.iocnum);
-               return -ENODEV; /* (-6) No such device or address */
-       }
-
        dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
            iocp->name));
 
@@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg)
  *             -ENOMSG if FW upload returned bad status
  */
 static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
 {
        struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
        struct mpt_fw_xfer       kfwdl;
@@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg)
                return -EFAULT;
        }
 
-       return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+       return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg)
  *             -ENOMSG if FW upload returned bad status
  */
 static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
 {
        FWDownload_t            *dlmsg;
        MPT_FRAME_HDR           *mf;
-       MPT_ADAPTER             *iocp;
        FWDownloadTCSGE_t       *ptsge;
        MptSge_t                *sgl, *sgIn;
        char                    *sgOut;
@@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
        pFWDownloadReply_t       ReplyMsg = NULL;
        unsigned long            timeleft;
 
-       if (mpt_verify_adapter(ioc, &iocp) < 0) {
-               printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
-                                ioc);
-               return -ENODEV; /* (-6) No such device or address */
-       } else {
-
-               /*  Valid device. Get a message frame and construct the FW download message.
-               */
-               if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
-                       return -EAGAIN;
-       }
+       /*  Valid device. Get a message frame and construct the FW download message.
+       */
+       if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+               return -EAGAIN;
 
        dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
            "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
@@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
            iocp->name, ufwbuf));
        dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
            iocp->name, (int)fwlen));
-       dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc   = %04xh\n",
-           iocp->name, ioc));
 
        dlmsg = (FWDownload_t*) mf;
        ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
@@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
 {
        struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
        struct mpt_ioctl_iocinfo *karg;
-       MPT_ADAPTER             *ioc;
        struct pci_dev          *pdev;
-       int                     iocnum;
        unsigned int            port;
        int                     cim_rev;
        struct scsi_device      *sdev;
@@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
                return PTR_ERR(karg);
        }
 
-       if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               kfree(karg);
-               return -ENODEV;
-       }
-
        /* Verify the data transfer size is correct. */
        if (karg->hdr.maxDataSize != data_size) {
                printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
@@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
        struct mpt_ioctl_targetinfo karg;
-       MPT_ADAPTER             *ioc;
        VirtDevice              *vdevice;
        char                    *pmem;
        int                     *pdata;
-       int                     iocnum;
        int                     numDevices = 0;
        int                     lun;
        int                     maxWordsLeft;
@@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
            ioc->name));
        /* Get the port number and set the maximum number of bytes
@@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg)
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_test __user *uarg = (void __user *) arg;
        struct mpt_ioctl_test    karg;
-       MPT_ADAPTER *ioc;
-       int iocnum;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
@@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
            ioc->name));
        /* Fill in the data and return the structure to the calling
@@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg)
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
        struct mpt_ioctl_eventquery      karg;
-       MPT_ADAPTER *ioc;
-       int iocnum;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
@@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
            ioc->name));
        karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
@@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
        struct mpt_ioctl_eventenable     karg;
-       MPT_ADAPTER *ioc;
-       int iocnum;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
@@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
            ioc->name));
        if (ioc->events == NULL) {
@@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
        struct mpt_ioctl_eventreport     karg;
-       MPT_ADAPTER              *ioc;
-       int                      iocnum;
        int                      numBytes, maxEvents, max;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
@@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
            ioc->name));
 
@@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
        struct mpt_ioctl_replace_fw      karg;
-       MPT_ADAPTER              *ioc;
-       int                      iocnum;
        int                      newFwSize;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
@@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
            ioc->name));
        /* If caching FW, Free the old FW image
@@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg)
  *             -ENOMEM if memory allocation error
  */
 static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_command __user *uarg = (void __user *) arg;
        struct mpt_ioctl_command  karg;
-       MPT_ADAPTER     *ioc;
-       int             iocnum;
        int             rc;
 
 
@@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
-       rc = mptctl_do_mpt_command (karg, &uarg->MF);
+       rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
 
        return rc;
 }
@@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg)
  *             -EPERM if SCSI I/O and target is untagged
  */
 static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
 {
-       MPT_ADAPTER     *ioc;
        MPT_FRAME_HDR   *mf = NULL;
        MPIHeader_t     *hdr;
        char            *psge;
@@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        dma_addr_t      dma_addr_in;
        dma_addr_t      dma_addr_out;
        int             sgSize = 0;     /* Num SG elements */
-       int             iocnum, flagsLength;
+       int             flagsLength;
        int             sz, rc = 0;
        int             msgContext;
        u16             req_idx;
@@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        bufIn.kptr = bufOut.kptr = NULL;
        bufIn.len = bufOut.len = 0;
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
        if (ioc->ioc_reset_in_progress) {
                spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
@@ -2418,17 +2321,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
  *             -ENOMEM if memory allocation error
  */
 static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
 {
        hp_host_info_t  __user *uarg = (void __user *) arg;
-       MPT_ADAPTER             *ioc;
        struct pci_dev          *pdev;
        char                    *pbuf=NULL;
        dma_addr_t              buf_dma;
        hp_host_info_t          karg;
        CONFIGPARMS             cfg;
        ConfigPageHeader_t      hdr;
-       int                     iocnum;
        int                     rc, cim_rev;
        ToolboxIstwiReadWriteRequest_t  *IstwiRWRequest;
        MPT_FRAME_HDR           *mf = NULL;
@@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
            ioc->name));
 
@@ -2659,15 +2554,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
  *             -ENOMEM if memory allocation error
  */
 static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
 {
        hp_target_info_t __user *uarg = (void __user *) arg;
        SCSIDevicePage0_t       *pg0_alloc;
        SCSIDevicePage3_t       *pg3_alloc;
-       MPT_ADAPTER             *ioc;
        MPT_SCSI_HOST           *hd = NULL;
        hp_target_info_t        karg;
-       int                     iocnum;
        int                     data_sz;
        dma_addr_t              page_dma;
        CONFIGPARMS             cfg;
@@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-               (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
        if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
                return -EINVAL;
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
@@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
        kfw.fwlen = kfw32.fwlen;
        kfw.bufp = compat_ptr(kfw32.bufp);
 
-       ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+       ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
 
        mutex_unlock(&iocp->ioctl_cmds.mutex);
 
@@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
 
        /* Pass new structure to do_mpt_command
         */
-       ret = mptctl_do_mpt_command (karg, &uarg->MF);
+       ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
 
        mutex_unlock(&iocp->ioctl_cmds.mutex);
 
index 6d27ccfe068021127efd5452d7ff872859bcbe5f..3c2d405bc79b951f88a618289bfc8d47e16fac52 100644 (file)
@@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
                cdev = &edev->component[i];
                if (cdev->dev == dev) {
                        enclosure_remove_links(cdev);
-                       device_del(&cdev->cdev);
                        put_device(dev);
                        cdev->dev = NULL;
-                       return device_add(&cdev->cdev);
+                       return 0;
                }
        }
        return -ENODEV;
index a4fdad04809a994bf9afdbbb561eb059b0084fc9..de87693cf557d405ba86e8977c5aeab6ce15f2f6 100644 (file)
@@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
 
 void lkdtm_UNSET_SMEP(void)
 {
-#ifdef CONFIG_X86_64
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
 #define MOV_CR4_DEPTH  64
        void (*direct_write_cr4)(unsigned long val);
        unsigned char *insn;
@@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void)
                native_write_cr4(cr4);
        }
 #else
-       pr_err("FAIL: this test is x86_64-only\n");
+       pr_err("XFAIL: this test is x86_64-only\n");
 #endif
 }
 
-#ifdef CONFIG_X86_32
 void lkdtm_DOUBLE_FAULT(void)
 {
+#ifdef CONFIG_X86_32
        /*
         * Trigger #DF by setting the stack limit to zero.  This clobbers
         * a GDT TLS slot, which is okay because the current task will die
@@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void)
        asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
                      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
 
-       panic("tried to double fault but didn't die\n");
-}
+       pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+       pr_err("XFAIL: this test is ia32-only\n");
 #endif
+}
index edf94ee54ec7fb4b7bdae3ec8ff730284d248bef..aa9368bf7a0c8e48530da90df282fe0a1d8a1ed7 100644 (file)
@@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
        unsigned long timeout;
        u32 syscfg;
 
-       if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+       if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
            state == FL_VERIFYING_ERASE) {
                int i = 21;
                unsigned int intr_flags = ONENAND_INT_MASTER;
 
                switch (state) {
-               case FL_RESETING:
+               case FL_RESETTING:
                        intr_flags |= ONENAND_INT_RESET;
                        break;
                case FL_PREPARING_ERASE:
@@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
        struct dma_async_tx_descriptor *tx;
        dma_cookie_t cookie;
 
-       tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+       tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
+                                      DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
        if (!tx) {
                dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
                return -EIO;
@@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
         * context fallback to PIO mode.
         */
        if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
-           count < 384 || in_interrupt() || oops_in_progress )
+           count < 384 || in_interrupt() || oops_in_progress)
                goto out_copy;
 
        xtra = count & 3;
@@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
         * context fallback to PIO mode.
         */
        if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
-           count < 384 || in_interrupt() || oops_in_progress )
+           count < 384 || in_interrupt() || oops_in_progress)
                goto out_copy;
 
        dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
@@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
                 c->gpmc_cs, c->phys_base, c->onenand.base,
                 c->dma_chan ? "DMA" : "PIO");
 
-       if ((r = onenand_scan(&c->mtd, 1)) < 0)
+       r = onenand_scan(&c->mtd, 1);
+       if (r < 0)
                goto err_release_dma;
 
        freq = omap2_onenand_get_freq(c->onenand.version_id);
index 77bd32a683e180f4f438d56e382cecb1ac10d967..85640ee11c8688b55d16e40ef8802fead6e7eeaf 100644 (file)
@@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
 
                /* Exit OTP access mode */
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETTING);
 
                status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
                status &= 0x60;
@@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
 
        /* Exit OTP access mode */
        this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
 
        return ret;
 }
@@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
 
        /* Exit OTP access mode */
        this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
 
        return ret;
 }
@@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
 
                /* Exit OTP access mode */
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETTING);
        } else {
                ops.mode = MTD_OPS_PLACE_OOB;
                ops.ooblen = len;
@@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
                this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
 
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETTING);
 
                printk(KERN_INFO "Die %d boundary: %d%s\n", die,
                       this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
        ret = this->wait(mtd, FL_WRITING);
 out:
        this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
        if (!ret)
                /* Recalculate device size on boundary change*/
                flexonenand_get_size(mtd);
@@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
        /* Reset OneNAND to read default register values */
        this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
        /* Wait reset */
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
 
        /* Restore system configuration 1 */
        this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
index 55e5536a5850d17c9c9c5565f5ebed0a2d62b6ba..beb7987e4c2b7ba50c7beae4b95a4b99d48fe410 100644 (file)
@@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
 normal:
        if (count != mtd->writesize) {
                /* Copy the bufferram to memory to prevent unaligned access */
-               memcpy(this->page_buf, p, mtd->writesize);
-               p = this->page_buf + offset;
+               memcpy_fromio(this->page_buf, p, mtd->writesize);
+               memcpy(buffer, this->page_buf + offset, count);
+       } else {
+               memcpy_fromio(buffer, p, count);
        }
 
-       memcpy(buffer, p, count);
-
        return 0;
 }
 
index 3a36285a8d8a191948c603957ee7629c1cf7fc48..f6c7102a1e3253f79cbc1f6c0bf54858661c7564 100644 (file)
@@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
 /* Prepare CDMA descriptor. */
 static void
 cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
-                              char nf_mem, u32 flash_ptr, char *mem_ptr,
-                              char *ctrl_data_ptr, u16 ctype)
+                              char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+                                  dma_addr_t ctrl_data_ptr, u16 ctype)
 {
        struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
 
@@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
        cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
        cdma_desc->command_flags  |= CDMA_CF_INT;
 
-       cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+       cdma_desc->memory_pointer = mem_ptr;
        cdma_desc->status = 0;
        cdma_desc->sync_flag_pointer = 0;
        cdma_desc->sync_arguments = 0;
 
        cdma_desc->command_type = ctype;
-       cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+       cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
 }
 
 static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
@@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
        }
 
        cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
-                                      (void *)dma_buf, (void *)dma_ctrl_dat,
-                                      ctype);
+                                      dma_buf, dma_ctrl_dat, ctype);
 
        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
 
@@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page)
 
        cadence_nand_cdma_desc_prepare(cdns_ctrl,
                                       cdns_chip->cs[chip->cur_cs],
-                                      page, NULL, NULL,
+                                      page, 0, 0,
                                       CDMA_CT_ERASE);
        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
        if (status) {
index 334fe3130285a0dc2556b36acfdac767eb137d36..b9d5d55a5edb9a6e5384a87a1470f168225db58a 100644 (file)
@@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
        struct resources *r = &this->resources;
        int ret;
 
+       ret = pm_runtime_get_sync(this->dev);
+       if (ret < 0)
+               return ret;
+
        ret = gpmi_reset_block(r->gpmi_regs, false);
        if (ret)
                goto err_out;
@@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this)
         */
        writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 
-       return 0;
 err_out:
+       pm_runtime_mark_last_busy(this->dev);
+       pm_runtime_put_autosuspend(this->dev);
        return ret;
 }
 
@@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev)
                return ret;
        }
 
+       /* Set flag to get timing setup restored for next exec_op */
+       if (this->hw.clk_rate)
+               this->hw.must_apply_timings = true;
+
        /* re-init the BCH registers */
        ret = bch_set_geometry(this);
        if (ret) {
index 9e63800f768a8f68613efcbdba70ee58bfcff920..3ba73f18841f9b68da3e83a007530937cea63e98 100644 (file)
@@ -37,6 +37,7 @@
 /* Max ECC buffer length */
 #define FMC2_MAX_ECC_BUF_LEN           (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
 
+#define FMC2_TIMEOUT_US                        1000
 #define FMC2_TIMEOUT_MS                        1000
 
 /* Timings */
@@ -53,6 +54,8 @@
 #define FMC2_PMEM                      0x88
 #define FMC2_PATT                      0x8c
 #define FMC2_HECCR                     0x94
+#define FMC2_ISR                       0x184
+#define FMC2_ICR                       0x188
 #define FMC2_CSQCR                     0x200
 #define FMC2_CSQCFGR1                  0x204
 #define FMC2_CSQCFGR2                  0x208
 #define FMC2_PATT_ATTHIZ(x)            (((x) & 0xff) << 24)
 #define FMC2_PATT_DEFAULT              0x0a0a0a0a
 
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF                  BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF                 BIT(1)
+
 /* Register: FMC2_CSQCR */
 #define FMC2_CSQCR_CSQSTART            BIT(0)
 
@@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
                stm32_fmc2_set_buswidth_16(fmc2, true);
 }
 
+static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       const struct nand_sdr_timings *timings;
+       u32 isr, sr;
+
+       /* Check if there is no pending requests to the NAND flash */
+       if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+                                             sr & FMC2_SR_NWRF, 1,
+                                             FMC2_TIMEOUT_US))
+               dev_warn(fmc2->dev, "Waitrdy timeout\n");
+
+       /* Wait tWB before R/B# signal is low */
+       timings = nand_get_sdr_timings(&chip->data_interface);
+       ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+       /* R/B# signal is low, clear high level flag */
+       writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+
+       /* Wait R/B# signal is high */
+       return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+                                                isr, isr & FMC2_ISR_IHLF,
+                                                5, 1000 * timeout_ms);
+}
+
 static int stm32_fmc2_exec_op(struct nand_chip *chip,
                              const struct nand_operation *op,
                              bool check_only)
@@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
                        break;
 
                case NAND_OP_WAITRDY_INSTR:
-                       ret = nand_soft_waitrdy(chip,
-                                               instr->ctx.waitrdy.timeout_ms);
+                       ret = stm32_fmc2_waitrdy(chip,
+                                                instr->ctx.waitrdy.timeout_ms);
                        break;
                }
        }
index 4744bf94ad9a912f5dd7a0cdd3478c4c27fac853..b9f272408c4d5f57b61dcc781e83c70fc76337e1 100644 (file)
@@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl,
 
        /* FTL can contain -1 entries that are by default filled with bits */
        if (block == -1) {
-               memset(buffer, 0xFF, SM_SECTOR_SIZE);
+               if (buffer)
+                       memset(buffer, 0xFF, SM_SECTOR_SIZE);
                return 0;
        }
 
index f4afe123e9dcd38be987dd79be7f20309a24e620..b0cd443dd758d1a098b8885ef034c71e41b4dacd 100644 (file)
@@ -2124,6 +2124,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
        if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
                return 0;
 
+       nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
        return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
 }
 
@@ -4596,6 +4598,7 @@ static void sst_set_default_init(struct spi_nor *nor)
 static void st_micron_set_default_init(struct spi_nor *nor)
 {
        nor->flags |= SNOR_F_HAS_LOCK;
+       nor->flags &= ~SNOR_F_HAS_16BIT_SR;
        nor->params.quad_enable = NULL;
        nor->params.set_4byte = st_micron_set_4byte;
 }
@@ -4768,9 +4771,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
 
 static void spansion_post_sfdp_fixups(struct spi_nor *nor)
 {
-       struct mtd_info *mtd = &nor->mtd;
-
-       if (mtd->size <= SZ_16M)
+       if (nor->params.size <= SZ_16M)
                return;
 
        nor->flags |= SNOR_F_4B_OPCODES;
index 4e1789ea2bc3ee7443cca88e7de8c5f6ba228add..eacd428e07e9f32c9c014b877d0d0105d8b4ffc9 100644 (file)
 #define TCAN4X5X_MODE_NORMAL BIT(7)
 
 #define TCAN4X5X_DISABLE_WAKE_MSK      (BIT(31) | BIT(30))
+#define TCAN4X5X_DISABLE_INH_MSK       BIT(9)
 
 #define TCAN4X5X_SW_RESET BIT(2)
 
@@ -166,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
        }
 }
 
+static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
+{
+       int ret = 0;
+
+       if (priv->reset_gpio) {
+               gpiod_set_value(priv->reset_gpio, 1);
+
+               /* tpulse_width minimum 30us */
+               usleep_range(30, 100);
+               gpiod_set_value(priv->reset_gpio, 0);
+       } else {
+               ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG,
+                                  TCAN4X5X_SW_RESET);
+               if (ret)
+                       return ret;
+       }
+
+       usleep_range(700, 1000);
+
+       return ret;
+}
+
 static int regmap_spi_gather_write(void *context, const void *reg,
                                   size_t reg_len, const void *val,
                                   size_t val_len)
@@ -348,14 +371,23 @@ static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
                                  TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
 }
 
+static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
+{
+       struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+       return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+                                 TCAN4X5X_DISABLE_INH_MSK, 0x01);
+}
+
 static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
 {
        struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+       int ret;
 
        tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
                                                    GPIOD_OUT_HIGH);
        if (IS_ERR(tcan4x5x->device_wake_gpio)) {
-               if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
+               if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
 
                tcan4x5x_disable_wake(cdev);
@@ -366,18 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
        if (IS_ERR(tcan4x5x->reset_gpio))
                tcan4x5x->reset_gpio = NULL;
 
-       usleep_range(700, 1000);
+       ret = tcan4x5x_reset(tcan4x5x);
+       if (ret)
+               return ret;
 
        tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
                                                              "device-state",
                                                              GPIOD_IN);
-       if (IS_ERR(tcan4x5x->device_state_gpio))
+       if (IS_ERR(tcan4x5x->device_state_gpio)) {
                tcan4x5x->device_state_gpio = NULL;
-
-       tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
-                                                     "vsup");
-       if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+               tcan4x5x_disable_state(cdev);
+       }
 
        return 0;
 }
@@ -412,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        if (!priv)
                return -ENOMEM;
 
+       priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+       if (PTR_ERR(priv->power) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       else
+               priv->power = NULL;
+
        mcan_class->device_data = priv;
 
        m_can_class_get_clocks(mcan_class);
@@ -451,11 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
                                        &spi->dev, &tcan4x5x_regmap);
 
-       ret = tcan4x5x_parse_config(mcan_class);
+       ret = tcan4x5x_power_enable(priv->power, 1);
        if (ret)
                goto out_clk;
 
-       tcan4x5x_power_enable(priv->power, 1);
+       ret = tcan4x5x_parse_config(mcan_class);
+       if (ret)
+               goto out_power;
+
+       ret = tcan4x5x_init(mcan_class);
+       if (ret)
+               goto out_power;
 
        ret = m_can_class_register(mcan_class);
        if (ret)
index 8caf7af0dee2040b9daa615447eae6502459a3ec..99101d7027a8fcc63336ab9ca74087d00353d9c8 100644 (file)
@@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
        struct net_device *dev = napi->dev;
        struct mscan_regs __iomem *regs = priv->reg_base;
        struct net_device_stats *stats = &dev->stats;
-       int npackets = 0;
-       int ret = 1;
+       int work_done = 0;
        struct sk_buff *skb;
        struct can_frame *frame;
        u8 canrflg;
 
-       while (npackets < quota) {
+       while (work_done < quota) {
                canrflg = in_8(&regs->canrflg);
                if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
                        break;
@@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
 
                stats->rx_packets++;
                stats->rx_bytes += frame->can_dlc;
-               npackets++;
+               work_done++;
                netif_receive_skb(skb);
        }
 
-       if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
-               napi_complete(&priv->napi);
-               clear_bit(F_RX_PROGRESS, &priv->flags);
-               if (priv->can.state < CAN_STATE_BUS_OFF)
-                       out_8(&regs->canrier, priv->shadow_canrier);
-               ret = 0;
+       if (work_done < quota) {
+               if (likely(napi_complete_done(&priv->napi, work_done))) {
+                       clear_bit(F_RX_PROGRESS, &priv->flags);
+                       if (priv->can.state < CAN_STATE_BUS_OFF)
+                               out_8(&regs->canrier, priv->shadow_canrier);
+               }
        }
-       return ret;
+       return work_done;
 }
 
 static irqreturn_t mscan_isr(int irq, void *dev_id)
index 2f74f6704c1286dc678c323aceaa34f0c64396ab..a4b4b742c80c322a5759fdb7b62c4f57368462ac 100644 (file)
@@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf,
                             GS_USB_BREQ_HOST_FORMAT,
                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
                             1,
-                            intf->altsetting[0].desc.bInterfaceNumber,
+                            intf->cur_altsetting->desc.bInterfaceNumber,
                             hconf,
                             sizeof(*hconf),
                             1000);
@@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf,
                             GS_USB_BREQ_DEVICE_CONFIG,
                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
                             1,
-                            intf->altsetting[0].desc.bInterfaceNumber,
+                            intf->cur_altsetting->desc.bInterfaceNumber,
                             dconf,
                             sizeof(*dconf),
                             1000);
index 5fc0be564274375f3d5c579521a2d3b89ecd4a88..7ab87a758754543a189672ad886ce8dbda2f19e3 100644 (file)
@@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
        struct usb_endpoint_descriptor *ep;
        int i;
 
-       iface_desc = &dev->intf->altsetting[0];
+       iface_desc = dev->intf->cur_altsetting;
 
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                ep = &iface_desc->endpoint[i].desc;
index ae4c37e1bb753ecd8e2cc01320c9f7797f7868c9..1b9957f12459a9780ac9e94a73fa773cca6483de 100644 (file)
@@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
        struct usb_endpoint_descriptor *endpoint;
        int i;
 
-       iface_desc = &dev->intf->altsetting[0];
+       iface_desc = dev->intf->cur_altsetting;
 
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                endpoint = &iface_desc->endpoint[i].desc;
index e43040c9f9ee7f1a7e4ea9de54661a220adb357e..3e8635311d0de1517d6d63b5dc4556cf30ead192 100644 (file)
@@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
                /* Force link status for IMP port */
                reg = core_readl(priv, offset);
-               reg |= (MII_SW_OR | LINK_STS);
+               reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
                core_writel(priv, reg, offset);
 
                /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
index f3f0c3f07391a231a34b561b77b97806a737202d..1962c8330daa06cad2f567858a294b9ea86aa8a9 100644 (file)
@@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
                return -EINVAL;
        }
 
-       ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+       ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
 
        /* Locate the first rule available */
        if (fs->location == RX_CLS_LOC_ANY)
@@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
 
                if (rule->fs.flow_type != fs->flow_type ||
                    rule->fs.ring_cookie != fs->ring_cookie ||
-                   rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+                   rule->fs.h_ext.data[0] != fs->h_ext.data[0])
                        continue;
 
                switch (fs->flow_type & ~FLOW_EXT) {
@@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
                return -EINVAL;
        }
 
-       ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+       ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
 
        layout = &udf_tcpip6_layout;
        slice_num = bcm_sf2_get_slice_number(layout, 0);
index 120a65d3e3efac3dfb2b323f1a841d55c72f966b..b016cc205f81f02c1c27a43a5f7618770c7a15ae 100644 (file)
@@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
 {
        u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
 
+       /* Use the default high priority for management frames sent to
+        * the CPU.
+        */
+       port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
        return mv88e6390_g1_monitor_write(chip, ptr, port);
 }
 
index bc5a6b2bb1e48caa933e89aa77f382f33d62cc3e..5324c6f4ae902355b18e3b3de05e5d7918556e99 100644 (file)
 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST         0x2000
 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST          0x2100
 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST             0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI     0x00e0
 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK                        0x00ff
 
 /* Offset 0x1C: Global Control 2 */
index 7fe256c5739d0c56489af4246ad028247c260878..0b43c650e100f7d19abce9963effeedbd46abcc9 100644 (file)
@@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
 }
 
 static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
-                                   phy_interface_t mode)
+                                   phy_interface_t mode, bool force)
 {
        u8 lane;
        u16 cmode;
@@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                cmode = 0;
        }
 
-       /* cmode doesn't change, nothing to do for us */
-       if (cmode == chip->ports[port].cmode)
+       /* cmode doesn't change, nothing to do for us unless forced */
+       if (cmode == chip->ports[port].cmode && !force)
                return 0;
 
        lane = mv88e6xxx_serdes_get_lane(chip, port);
@@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
        if (port != 9 && port != 10)
                return -EOPNOTSUPP;
 
-       return mv88e6xxx_port_set_cmode(chip, port, mode);
+       return mv88e6xxx_port_set_cmode(chip, port, mode, false);
 }
 
 int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
@@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                break;
        }
 
-       return mv88e6xxx_port_set_cmode(chip, port, mode);
+       return mv88e6xxx_port_set_cmode(chip, port, mode, false);
 }
 
 static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
@@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       return mv88e6xxx_port_set_cmode(chip, port, mode);
+       return mv88e6xxx_port_set_cmode(chip, port, mode, true);
 }
 
 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
index a51ac088c0bcafd4274aac22540bd97b62f8d6bb..bb91f3d17cf26b9d91a126bcde283c5dc13f5f80 100644 (file)
@@ -582,7 +582,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
        struct device *dev = &priv->spidev->dev;
        struct device_node *child;
 
-       for_each_child_of_node(ports_node, child) {
+       for_each_available_child_of_node(ports_node, child) {
                struct device_node *phy_node;
                phy_interface_t phy_mode;
                u32 index;
@@ -1569,8 +1569,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
 
        if (enabled) {
                /* Enable VLAN filtering. */
-               tpid  = ETH_P_8021AD;
-               tpid2 = ETH_P_8021Q;
+               tpid  = ETH_P_8021Q;
+               tpid2 = ETH_P_8021AD;
        } else {
                /* Disable VLAN filtering. */
                tpid  = ETH_P_SJA1105;
@@ -1579,9 +1579,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
 
        table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
        general_params = table->entries;
-       /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
-       general_params->tpid = tpid;
        /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+       general_params->tpid = tpid;
+       /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
        general_params->tpid2 = tpid2;
        /* When VLAN filtering is on, we need to at least be able to
         * decode management traffic through the "backup plan".
@@ -1855,7 +1855,7 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
        if (!clone)
                goto out;
 
-       sja1105_ptp_txtstamp_skb(ds, slot, clone);
+       sja1105_ptp_txtstamp_skb(ds, port, clone);
 
 out:
        mutex_unlock(&priv->mgmt_lock);
index 54258a25031db159a0da856a4d5b56ddd28ce1a7..43ab7589d0d0c530d976bec1437b4c94c2b00d55 100644 (file)
@@ -234,7 +234,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
        if (rw == SPI_WRITE)
                priv->info->ptp_cmd_packing(buf, cmd, PACK);
 
-       rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+       rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
                              SJA1105_SIZE_PTP_CMD);
 
        if (rw == SPI_READ)
@@ -659,7 +659,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
        ptp_data->clock = NULL;
 }
 
-void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
                              struct sk_buff *skb)
 {
        struct sja1105_private *priv = ds->priv;
@@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
                goto out;
        }
 
-       rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+       rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
        if (rc < 0) {
                dev_err(ds->dev, "timed out polling for tstamp\n");
                kfree_skb(skb);
index 0d03e13e99092e36c93adcb946f7f0f26f717b1b..63d2311817c474a460ac26218c93dacfe0678aa7 100644 (file)
@@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
        return size;
 }
 
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
 static size_t
 sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
                                         enum packing_op op)
@@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
        sja1105_packing(buf, &entry->mirr_port,   141, 139, size, op);
        sja1105_packing(buf, &entry->vlmarker,    138, 107, size, op);
        sja1105_packing(buf, &entry->vlmask,      106,  75, size, op);
-       sja1105_packing(buf, &entry->tpid        74,  59, size, op);
+       sja1105_packing(buf, &entry->tpid2,        74,  59, size, op);
        sja1105_packing(buf, &entry->ignore2stf,   58,  58, size, op);
-       sja1105_packing(buf, &entry->tpid2,        57,  42, size, op);
+       sja1105_packing(buf, &entry->tpid        57,  42, size, op);
        sja1105_packing(buf, &entry->queue_ts,     41,  41, size, op);
        sja1105_packing(buf, &entry->egrmirrvid,   40,  29, size, op);
        sja1105_packing(buf, &entry->egrmirrpcp,   28,  26, size, op);
index 26b925b5daced04b2eebb353010220d927f2e239..fa6750d973d7b1a28528062b2dbd30f39868f87e 100644 (file)
@@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
        if (admin->cycle_time_extension)
                return -ENOTSUPP;
 
-       if (!ns_to_sja1105_delta(admin->base_time)) {
-               dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
-               return -ERANGE;
-       }
-
        for (i = 0; i < admin->num_entries; i++) {
                s64 delta_ns = admin->entries[i].interval;
                s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
index a17a4da7bc158466082c5dd2591a0f9ff26bf05a..c85e3e29012c0be813dde299aa8731653362a791 100644 (file)
@@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
+       aq_nic_set_loopback(self);
+
        err = self->aq_hw_ops->hw_start(self->aq_hw);
        if (err < 0)
                goto err_exit;
@@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self)
 
        INIT_WORK(&self->service_task, aq_nic_service_task);
 
-       aq_nic_set_loopback(self);
-
        timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
        aq_nic_service_timer_cb(&self->service_timer);
 
index 58e891af6e09018682d7be6435923f9e38bf1e1b..ec041f78d0634426d76e23a23678c6be8321a69b 100644 (file)
@@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
        .rx_extract_ts           = hw_atl_b0_rx_extract_ts,
        .extract_hwts            = hw_atl_b0_extract_hwts,
        .hw_set_offload          = hw_atl_b0_hw_offload_set,
-       .hw_get_hw_stats         = hw_atl_utils_get_hw_stats,
-       .hw_get_fw_version       = hw_atl_utils_get_fw_version,
-       .hw_set_offload          = hw_atl_b0_hw_offload_set,
        .hw_set_loopback         = hw_atl_b0_set_loopback,
        .hw_set_fc               = hw_atl_b0_set_fc,
 };
index 8910b62e67ed79d938696fedbe5a2a7eaf7bdc03..f547baa6c95499e5f16313ea47b2c04f18f105ca 100644 (file)
@@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
        u32 speed;
 
        mpi_state = hw_atl_utils_mpi_get_state(self);
-       speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
-                            FW2X_RATE_2G5 | FW2X_RATE_5G |
-                            FW2X_RATE_10G);
+       speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
 
        if (!speed) {
                link_status->mbps = 0U;
index 035dbb1b2c985bd6bcd45a2a7782e1381b4c7ef0..ec25fd81985d63cc2821ee5774d616d9ae696a1a 100644 (file)
@@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
        int ethaddr_bytes = ETH_ALEN;
 
        memset(ppattern + offset, 0xff, magicsync);
-       for (j = 0; j < magicsync; j++)
-               set_bit(len++, (unsigned long *) pmask);
+       for (j = 0; j < magicsync; j++) {
+               pmask[len >> 3] |= BIT(len & 7);
+               len++;
+       }
 
        for (j = 0; j < B44_MAX_PATTERNS; j++) {
                if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
@@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
                for (k = 0; k< ethaddr_bytes; k++) {
                        ppattern[offset + magicsync +
                                (j * ETH_ALEN) + k] = macaddr[k];
-                       set_bit(len++, (unsigned long *) pmask);
+                       pmask[len >> 3] |= BIT(len & 7);
+                       len++;
                }
        }
        return len - 1;
index 825af709708ef7674795158be92fa7fb01fab303..d6b1a153f9df27be9716db319090263de18ca20a 100644 (file)
@@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
                ring->switch_queue = qp;
                ring->switch_port = port;
                ring->inspect = true;
-               priv->ring_map[q + port * num_tx_queues] = ring;
+               priv->ring_map[qp + port * num_tx_queues] = ring;
                qp++;
        }
 
@@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
        struct net_device *slave_dev;
        unsigned int num_tx_queues;
        struct net_device *dev;
-       unsigned int q, port;
+       unsigned int q, qp, port;
 
        priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
        if (priv->netdev != info->master)
@@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
                        continue;
 
                ring->inspect = false;
-               priv->ring_map[q + port * num_tx_queues] = NULL;
+               qp = ring->switch_queue;
+               priv->ring_map[qp + port * num_tx_queues] = NULL;
        }
 
        return 0;
index 7a6e82db423123585574c88765fb00f14ab3b603..bacc8552bce1c98f7e9b9733e8cd1445948cecd4 100644 (file)
@@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
        ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
         func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
 
+#define BNX2X_VFS_VLAN_CREDIT(bp)      \
+       (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
+
 #define PF_VLAN_CREDIT_E2(bp, func_num)                                         \
-       ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+       ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \
         func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
 
 #endif /* BNX2X_SP_VERBS */
index c779f9cf8822f329a06caebcb2636c92f5dd8748..e6f18f6070ef7527b1f3552d049b08ee01f30998 100644 (file)
@@ -11065,11 +11065,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
        struct flow_keys *keys1 = &f1->fkeys;
        struct flow_keys *keys2 = &f2->fkeys;
 
-       if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
-           keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
-           keys1->ports.ports == keys2->ports.ports &&
-           keys1->basic.ip_proto == keys2->basic.ip_proto &&
-           keys1->basic.n_proto == keys2->basic.n_proto &&
+       if (keys1->basic.n_proto != keys2->basic.n_proto ||
+           keys1->basic.ip_proto != keys2->basic.ip_proto)
+               return false;
+
+       if (keys1->basic.n_proto == htons(ETH_P_IP)) {
+               if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+                   keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+                       return false;
+       } else {
+               if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
+                          sizeof(keys1->addrs.v6addrs.src)) ||
+                   memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
+                          sizeof(keys1->addrs.v6addrs.dst)))
+                       return false;
+       }
+
+       if (keys1->ports.ports == keys2->ports.ports &&
            keys1->control.flags == keys2->control.flags &&
            ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
            ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
@@ -11361,7 +11373,7 @@ int bnxt_get_port_parent_id(struct net_device *dev,
                return -EOPNOTSUPP;
 
        /* The PF and it's VF-reps only support the switchdev framework */
-       if (!BNXT_PF(bp))
+       if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
                return -EOPNOTSUPP;
 
        ppid->id_len = sizeof(bp->switch_id);
@@ -11734,6 +11746,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
        put_unaligned_le32(dw, &dsn[0]);
        pci_read_config_dword(pdev, pos + 4, &dw);
        put_unaligned_le32(dw, &dsn[4]);
+       bp->flags |= BNXT_FLAG_DSN_VALID;
        return 0;
 }
 
@@ -11845,9 +11858,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        if (BNXT_PF(bp)) {
                /* Read the adapter's DSN to use as the eswitch switch_id */
-               rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
-               if (rc)
-                       goto init_err_pci_clean;
+               bnxt_pcie_dsn_get(bp, bp->switch_id);
        }
 
        /* MTU range: 60 - FW defined max */
index 505af5cfb1bd7712998a20b14a807ca8498b49ab..f14335433a64db8763bfc1a2d50c39973b33c1f0 100644 (file)
@@ -1532,6 +1532,7 @@ struct bnxt {
        #define BNXT_FLAG_NO_AGG_RINGS  0x20000
        #define BNXT_FLAG_RX_PAGE_MODE  0x40000
        #define BNXT_FLAG_MULTI_HOST    0x100000
+       #define BNXT_FLAG_DSN_VALID     0x200000
        #define BNXT_FLAG_DOUBLE_DB     0x400000
        #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
        #define BNXT_FLAG_DIM           0x2000000
@@ -1936,9 +1937,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type)
        case HWRM_CFA_ENCAP_RECORD_FREE:
        case HWRM_CFA_DECAP_FILTER_ALLOC:
        case HWRM_CFA_DECAP_FILTER_FREE:
-       case HWRM_CFA_NTUPLE_FILTER_ALLOC:
-       case HWRM_CFA_NTUPLE_FILTER_FREE:
-       case HWRM_CFA_NTUPLE_FILTER_CFG:
        case HWRM_CFA_EM_FLOW_ALLOC:
        case HWRM_CFA_EM_FLOW_FREE:
        case HWRM_CFA_EM_FLOW_CFG:
index f9bf7d7250abca8565684a2afa3bd9a29f347bbe..b010b34cdaf835fdf23eea5ffceaddce4386ec98 100644 (file)
@@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
        struct net_device *dev;
        int rc, i;
 
+       if (!(bp->flags & BNXT_FLAG_DSN_VALID))
+               return -ENODEV;
+
        bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
        if (!bp->vf_reps)
                return -ENOMEM;
index c5ee363ca5dc36f2512d3acabc401ceab3b968e2..f7d87c71aaa9c399ccb462ae377a2e5c8b42db10 100644 (file)
@@ -611,21 +611,24 @@ static const struct phylink_mac_ops macb_phylink_ops = {
        .mac_link_up = macb_mac_link_up,
 };
 
+static bool macb_phy_handle_exists(struct device_node *dn)
+{
+       dn = of_parse_phandle(dn, "phy-handle", 0);
+       of_node_put(dn);
+       return dn != NULL;
+}
+
 static int macb_phylink_connect(struct macb *bp)
 {
+       struct device_node *dn = bp->pdev->dev.of_node;
        struct net_device *dev = bp->dev;
        struct phy_device *phydev;
        int ret;
 
-       if (bp->pdev->dev.of_node &&
-           of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
-               ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
-                                            0);
-               if (ret) {
-                       netdev_err(dev, "Could not attach PHY (%d)\n", ret);
-                       return ret;
-               }
-       } else {
+       if (dn)
+               ret = phylink_of_phy_connect(bp->phylink, dn, 0);
+
+       if (!dn || (ret && !macb_phy_handle_exists(dn))) {
                phydev = phy_find_first(bp->mii_bus);
                if (!phydev) {
                        netdev_err(dev, "no PHY found\n");
@@ -634,10 +637,11 @@ static int macb_phylink_connect(struct macb *bp)
 
                /* attach the mac to the phy */
                ret = phylink_connect_phy(bp->phylink, phydev);
-               if (ret) {
-                       netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
-                       return ret;
-               }
+       }
+
+       if (ret) {
+               netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+               return ret;
        }
 
        phylink_start(bp->phylink);
@@ -4088,7 +4092,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
        mgmt->rate = 0;
        mgmt->hw.init = &init;
 
-       *tx_clk = clk_register(NULL, &mgmt->hw);
+       *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
        if (IS_ERR(*tx_clk))
                return PTR_ERR(*tx_clk);
 
@@ -4416,7 +4420,6 @@ static int macb_probe(struct platform_device *pdev)
 
 err_disable_clocks:
        clk_disable_unprepare(tx_clk);
-       clk_unregister(tx_clk);
        clk_disable_unprepare(hclk);
        clk_disable_unprepare(pclk);
        clk_disable_unprepare(rx_clk);
@@ -4446,7 +4449,6 @@ static int macb_remove(struct platform_device *pdev)
                pm_runtime_dont_use_autosuspend(&pdev->dev);
                if (!pm_runtime_suspended(&pdev->dev)) {
                        clk_disable_unprepare(bp->tx_clk);
-                       clk_unregister(bp->tx_clk);
                        clk_disable_unprepare(bp->hclk);
                        clk_disable_unprepare(bp->pclk);
                        clk_disable_unprepare(bp->rx_clk);
index a70ac2097892d290f6ab183c9a076bdc8f4fc996..becee29f5df7a647981c142b5987c0d0fc652cd9 100644 (file)
@@ -504,6 +504,7 @@ struct link_config {
 
        enum cc_pause  requested_fc;     /* flow control user has requested */
        enum cc_pause  fc;               /* actual link flow control */
+       enum cc_pause  advertised_fc;    /* actual advertised flow control */
 
        enum cc_fec    requested_fec;    /* Forward Error Correction: */
        enum cc_fec    fec;              /* requested and actual in use */
index 20ab3b6285a2f750331aa14408dabf413c4c70bd..c837382ee522da107650f1c1f00c74741f7dda52 100644 (file)
@@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev,
        struct port_info *p = netdev_priv(dev);
 
        epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
-       epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
-       epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+       epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
+       epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
 }
 
 static int set_pauseparam(struct net_device *dev,
index 12ff69b3ba91d740cd95dfef3861d82057f9f971..0dedd3e9c31e9752c3eb7b9982287159c4876f07 100644 (file)
@@ -3135,9 +3135,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adap = pi->adapter;
+       struct ch_sched_queue qe = { 0 };
+       struct ch_sched_params p = { 0 };
        struct sched_class *e;
-       struct ch_sched_params p;
-       struct ch_sched_queue qe;
        u32 req_rate;
        int err = 0;
 
@@ -3154,6 +3154,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
                return -EINVAL;
        }
 
+       qe.queue = index;
+       e = cxgb4_sched_queue_lookup(dev, &qe);
+       if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
+               dev_err(adap->pdev_dev,
+                       "Queue %u already bound to class %u of type: %u\n",
+                       index, e->idx, e->info.u.params.level);
+               return -EBUSY;
+       }
+
        /* Convert from Mbps to Kbps */
        req_rate = rate * 1000;
 
@@ -3183,7 +3192,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
                return 0;
 
        /* Fetch any available unused or matching scheduling class */
-       memset(&p, 0, sizeof(p));
        p.type = SCHED_CLASS_TYPE_PACKET;
        p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
        p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
index 102b370fbd3eff9ba791f15257624c2ffcf5f690..6d485803ddbe66b17b431468ca7bd3b3b3a5f9e9 100644 (file)
@@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
        struct flow_action *actions = &cls->rule->action;
        struct port_info *pi = netdev2pinfo(dev);
        struct flow_action_entry *entry;
+       struct ch_sched_queue qe;
+       struct sched_class *e;
        u64 max_link_rate;
        u32 i, speed;
        int ret;
@@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
                }
        }
 
+       for (i = 0; i < pi->nqsets; i++) {
+               memset(&qe, 0, sizeof(qe));
+               qe.queue = i;
+
+               e = cxgb4_sched_queue_lookup(dev, &qe);
+               if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Some queues are already bound to different class");
+                       return -EBUSY;
+               }
+       }
+
        return 0;
 }
 
+static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct ch_sched_queue qe;
+       int ret;
+       u32 i;
+
+       for (i = 0; i < pi->nqsets; i++) {
+               qe.queue = i;
+               qe.class = tc;
+               ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
+               if (ret)
+                       goto out_free;
+       }
+
+       return 0;
+
+out_free:
+       while (i--) {
+               qe.queue = i;
+               qe.class = SCHED_CLS_NONE;
+               cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+       }
+
+       return ret;
+}
+
+static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct ch_sched_queue qe;
+       u32 i;
+
+       for (i = 0; i < pi->nqsets; i++) {
+               qe.queue = i;
+               qe.class = SCHED_CLS_NONE;
+               cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+       }
+}
+
 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
                                   struct tc_cls_matchall_offload *cls)
 {
@@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
        struct adapter *adap = netdev2adap(dev);
        struct flow_action_entry *entry;
        struct sched_class *e;
+       int ret;
        u32 i;
 
        tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
@@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
                return -ENOMEM;
        }
 
+       ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
+       if (ret) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Could not bind queues to traffic class");
+               goto out_free;
+       }
+
        tc_port_matchall->egress.hwtc = e->idx;
        tc_port_matchall->egress.cookie = cls->cookie;
        tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
        return 0;
+
+out_free:
+       cxgb4_sched_class_free(dev, e->idx);
+       return ret;
 }
 
 static void cxgb4_matchall_free_tc(struct net_device *dev)
@@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
        struct adapter *adap = netdev2adap(dev);
 
        tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+       cxgb4_matchall_tc_unbind_queues(dev);
        cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
 
        tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
index 8971dddcdb7a835afe48020079a89524d2d5e746..ec3eb45ee3b4896f87ee8d9f739d829aacce4ea2 100644 (file)
@@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
        struct port_info *pi = netdev2pinfo(dev);
        struct adapter *adap = netdev2adap(dev);
        u32 speed, qcount = 0, qoffset = 0;
+       u32 start_a, start_b, end_a, end_b;
        int ret;
-       u8 i;
+       u8 i, j;
 
        if (!mqprio->qopt.num_tc)
                return 0;
@@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
                qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
                qcount += mqprio->qopt.count[i];
 
+               start_a = mqprio->qopt.offset[i];
+               end_a = start_a + mqprio->qopt.count[i] - 1;
+               for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
+                       start_b = mqprio->qopt.offset[j];
+                       end_b = start_b + mqprio->qopt.count[j] - 1;
+
+                       /* If queue count is 0, then the traffic
+                        * belonging to this class will not use
+                        * ETHOFLD queues. So, no need to validate
+                        * further.
+                        */
+                       if (!mqprio->qopt.count[i])
+                               break;
+
+                       if (!mqprio->qopt.count[j])
+                               continue;
+
+                       if (max_t(u32, start_a, start_b) <=
+                           min_t(u32, end_a, end_b)) {
+                               netdev_err(dev,
+                                          "Queues can't overlap across tc\n");
+                               return -EINVAL;
+                       }
+               }
+
                /* Convert byte per second to bits per second */
                min_rate += (mqprio->min_rate[i] * 8);
                max_rate += (mqprio->max_rate[i] * 8);
index 3e61bd5d0c290c75b792d887e2dd2864eef13728..cebe1412d9609b8bfdb62ed5091c9dab64a5c3d2 100644 (file)
@@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
        return found;
 }
 
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+                                            struct ch_sched_queue *p)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct sched_queue_entry *qe = NULL;
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_txq *txq;
+
+       if (p->queue < 0 || p->queue >= pi->nqsets)
+               return NULL;
+
+       txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+       qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+       return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
+}
+
 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
 {
        struct sched_queue_entry *qe = NULL;
index e92ff68bdd0aab504cf5e674b8d3268c008b4ed8..5cc74a5a177463cc68306f947becfa8fe9390089 100644 (file)
@@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
        return true;
 }
 
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+                                            struct ch_sched_queue *p);
 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
                           enum sched_bind_type type);
 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
index 19d18acfc9a6f3fc79050b5e32bfcf5cdb4621f9..844fdcf55118b232204ab87dac45b25dba5b6d18 100644 (file)
@@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
                if (cc_pause & PAUSE_TX)
                        fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
                else
-                       fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+                       fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
+                                   FW_PORT_CAP32_802_3_PAUSE;
        } else if (cc_pause & PAUSE_TX) {
                fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
        }
@@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
 {
        const struct fw_port_cmd *cmd = (const void *)rpl;
-       int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
-       struct adapter *adapter = pi->adapter;
+       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
        struct link_config *lc = &pi->link_cfg;
-       int link_ok, linkdnrc;
-       enum fw_port_type port_type;
+       struct adapter *adapter = pi->adapter;
+       unsigned int speed, fc, fec, adv_fc;
        enum fw_port_module_type mod_type;
-       unsigned int speed, fc, fec;
-       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+       int action, link_ok, linkdnrc;
+       enum fw_port_type port_type;
 
        /* Extract the various fields from the Port Information message.
         */
+       action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
        switch (action) {
        case FW_PORT_ACTION_GET_PORT_INFO: {
                u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
        }
 
        fec = fwcap_to_cc_fec(acaps);
+       adv_fc = fwcap_to_cc_pause(acaps);
        fc = fwcap_to_cc_pause(linkattr);
        speed = fwcap_to_speed(linkattr);
 
@@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
        }
 
        if (link_ok != lc->link_ok || speed != lc->speed ||
-           fc != lc->fc || fec != lc->fec) {   /* something changed */
+           fc != lc->fc || adv_fc != lc->advertised_fc ||
+           fec != lc->fec) {
+               /* something changed */
                if (!link_ok && lc->link_ok) {
                        lc->link_down_rc = linkdnrc;
                        dev_warn_ratelimited(adapter->pdev_dev,
@@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
                }
                lc->link_ok = link_ok;
                lc->speed = speed;
+               lc->advertised_fc = adv_fc;
                lc->fc = fc;
                lc->fec = fec;
 
index f6fc0875d5b0a285cf4dc5695debdf96b3cf2744..f4d41f968afa270df17efe4d4f502b79dab5c01e 100644 (file)
@@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
        struct port_info *pi = netdev_priv(dev);
 
        pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
-       pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
-       pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+       pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
+       pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
 }
 
 /*
index ccca67cf4487d22d06f7c2b42dc6b16647cec8d0..57cfd10a99ecc8508735aa4d5accd96d75176ebb 100644 (file)
@@ -135,6 +135,7 @@ struct link_config {
 
        enum cc_pause   requested_fc;   /* flow control user has requested */
        enum cc_pause   fc;             /* actual link flow control */
+       enum cc_pause   advertised_fc;  /* actual advertised flow control */
 
        enum cc_fec     auto_fec;       /* Forward Error Correction: */
        enum cc_fec     requested_fec;  /*   "automatic" (IEEE 802.3), */
index 8a389d617a238048f268be38771e37c808dd84e7..9d49ff211cc1a50ed0d9404a05119fb70e63dd73 100644 (file)
@@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
 static void t4vf_handle_get_port_info(struct port_info *pi,
                                      const struct fw_port_cmd *cmd)
 {
-       int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
-       struct adapter *adapter = pi->adapter;
+       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
        struct link_config *lc = &pi->link_cfg;
-       int link_ok, linkdnrc;
-       enum fw_port_type port_type;
+       struct adapter *adapter = pi->adapter;
+       unsigned int speed, fc, fec, adv_fc;
        enum fw_port_module_type mod_type;
-       unsigned int speed, fc, fec;
-       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+       int action, link_ok, linkdnrc;
+       enum fw_port_type port_type;
 
        /* Extract the various fields from the Port Information message. */
+       action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
        switch (action) {
        case FW_PORT_ACTION_GET_PORT_INFO: {
                u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
        }
 
        fec = fwcap_to_cc_fec(acaps);
+       adv_fc = fwcap_to_cc_pause(acaps);
        fc = fwcap_to_cc_pause(linkattr);
        speed = fwcap_to_speed(linkattr);
 
@@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
        }
 
        if (link_ok != lc->link_ok || speed != lc->speed ||
-           fc != lc->fc || fec != lc->fec) {   /* something changed */
+           fc != lc->fc || adv_fc != lc->advertised_fc ||
+           fec != lc->fec) {
+               /* something changed */
                if (!link_ok && lc->link_ok) {
                        lc->link_down_rc = linkdnrc;
                        dev_warn_ratelimited(adapter->pdev_dev,
@@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
                }
                lc->link_ok = link_ok;
                lc->speed = speed;
+               lc->advertised_fc = adv_fc;
                lc->fc = fc;
                lc->fec = fec;
 
index 6a9d12dad5d9d7be22a8f10970d8173e13dc858e..a301f00952231f75371879b8b254df8abfeb4942 100644 (file)
@@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
        int page_offset;
        unsigned int sz;
        int *count_ptr;
-       int i;
+       int i, j;
 
        vaddr = phys_to_virt(addr);
        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
@@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
                WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
                                    SMP_CACHE_BYTES));
 
+               dma_unmap_page(priv->rx_dma_dev, sg_addr,
+                              DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
                /* We may use multiple Rx pools */
                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
                if (!dpaa_bp)
                        goto free_buffers;
 
-               count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-               dma_unmap_page(priv->rx_dma_dev, sg_addr,
-                              DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
                if (!skb) {
                        sz = dpaa_bp->size +
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
                        skb_add_rx_frag(skb, i - 1, head_page, frag_off,
                                        frag_len, dpaa_bp->size);
                }
+
                /* Update the pool count for the current {cpu x bpool} */
+               count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
                (*count_ptr)--;
 
                if (qm_sg_entry_is_final(&sgt[i]))
@@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
        return skb;
 
 free_buffers:
-       /* compensate sw bpool counter changes */
-       for (i--; i >= 0; i--) {
-               dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
-               if (dpaa_bp) {
-                       count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-                       (*count_ptr)++;
-               }
-       }
        /* free all the SG entries */
-       for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
-               sg_addr = qm_sg_addr(&sgt[i]);
+       for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
+               sg_addr = qm_sg_addr(&sgt[j]);
                sg_vaddr = phys_to_virt(sg_addr);
+               /* all pages 0..i were unmaped */
+               if (j > i)
+                       dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
+                                      DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
                free_pages((unsigned long)sg_vaddr, 0);
-               dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
-               if (dpaa_bp) {
-                       count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-                       (*count_ptr)--;
+               /* counters 0..i-1 were decremented */
+               if (j >= i) {
+                       dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
+                       if (dpaa_bp) {
+                               count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+                               (*count_ptr)--;
+                       }
                }
 
-               if (qm_sg_entry_is_final(&sgt[i]))
+               if (qm_sg_entry_is_final(&sgt[j]))
                        break;
        }
        /* free the SGT fragment */
index 05c1899f6628990e14e167e338fcc435c2ecea52..9294027e9d909836cf211c3955d75bc81c97de15 100644 (file)
@@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev,
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+       struct device *dev = &fep->pdev->dev;
        u32 *buf = (u32 *)regbuf;
        u32 i, off;
+       int ret;
+
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0)
+               return;
 
        regs->version = fec_enet_register_version;
 
@@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev,
                off >>= 2;
                buf[off] = readl(&theregs[off]);
        }
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 static int fec_enet_get_ts_info(struct net_device *ndev,
index edec61dfc8687fff6f4867e4463753d28aab202e..9f52e72ff641d91bbeb43f2efe8d12c5a5184204 100644 (file)
@@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
        rx->cnt = cnt;
        rx->fill_cnt += work_done;
 
-       /* restock desc ring slots */
-       dma_wmb();      /* Ensure descs are visible before ringing doorbell */
        gve_rx_write_doorbell(priv, rx);
        return gve_rx_work_pending(rx);
 }
index f4889431f9b7049f0046338a3bed0884ade84782..d0244feb030118e857ec44155128133322c3272a 100644 (file)
@@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
                 * may have added descriptors without ringing the doorbell.
                 */
 
-               /* Ensure tx descs from a prior gve_tx are visible before
-                * ringing doorbell.
-                */
-               dma_wmb();
                gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
                return NETDEV_TX_BUSY;
        }
@@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
        if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
                return NETDEV_TX_OK;
 
-       /* Ensure tx descs are visible before ringing doorbell */
-       dma_wmb();
        gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
        return NETDEV_TX_OK;
 }
index 14ab20491fd02b1c867460ce42b4aa310f50a162..eb69e5c81a4d05a26a7210dc8bda00126ddbab25 100644 (file)
@@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
        skb = *out_skb = napi_alloc_skb(&ring_data->napi,
                                        HNS_RX_HEAD_SIZE);
        if (unlikely(!skb)) {
-               netdev_err(ndev, "alloc rx skb fail\n");
                ring->stats.sw_err_cnt++;
                return -ENOMEM;
        }
@@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
                container_of(napi, struct hns_nic_ring_data, napi);
        struct hnae_ring *ring = ring_data->ring;
 
-try_again:
        clean_complete += ring_data->poll_one(
                                ring_data, budget - clean_complete,
                                ring_data->ex_process);
@@ -1066,7 +1064,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
                        napi_complete(napi);
                        ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
                } else {
-                       goto try_again;
+                       return budget;
                }
        }
 
index 69545dd6c9380af7504dca623eafd59448f5930b..b3deb5e5ce29fbea66b38f84e9355c200505b9b8 100644 (file)
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
 #define HNS3_INNER_VLAN_TAG    1
 #define HNS3_OUTER_VLAN_TAG    2
 
+#define HNS3_MIN_TX_LEN                33U
+
 /* hns3_pci_tbl - PCI Device ID Table
  *
  * Last entry must be all 0s
@@ -1405,6 +1407,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
        int bd_num = 0;
        int ret;
 
+       /* Hardware can only handle short frames above 32 bytes */
+       if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+               return NETDEV_TX_OK;
+
        /* Prefetch the data used later */
        prefetch(skb->data);
 
index 6c51b1bad8c423ea243587446d3e5fc6705ca9ad..37a2314d3e6b187dd061793870af2fc46552829b 100644 (file)
@@ -185,13 +185,12 @@ struct e1000_phy_regs {
 
 /* board specific private data structure */
 struct e1000_adapter {
+       struct timer_list watchdog_timer;
        struct timer_list phy_info_timer;
        struct timer_list blink_timer;
 
        struct work_struct reset_task;
-       struct delayed_work watchdog_task;
-
-       struct workqueue_struct *e1000_workqueue;
+       struct work_struct watchdog_task;
 
        const struct e1000_info *ei;
 
index fe7997c18a109a2935978ca5248cd7413e0a342b..7c5b18d87b49b6fd822beb2dd254e4e0ad196759 100644 (file)
@@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
                }
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
-                       mod_delayed_work(adapter->e1000_workqueue,
-                                        &adapter->watchdog_task, HZ);
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
        /* Reset on uncorrectable ECC error */
@@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
                }
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
-                       mod_delayed_work(adapter->e1000_workqueue,
-                                        &adapter->watchdog_task, HZ);
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
        /* Reset on uncorrectable ECC error */
@@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
                hw->mac.get_link_status = true;
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
-                       mod_delayed_work(adapter->e1000_workqueue,
-                                        &adapter->watchdog_task, HZ);
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
        if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
 
        napi_synchronize(&adapter->napi);
 
+       del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
        spin_lock(&adapter->stats64_lock);
@@ -5155,11 +5153,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
        }
 }
 
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+       struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+       /* Do the rest outside of interrupt context */
+       schedule_work(&adapter->watchdog_task);
+
+       /* TODO: make this use queue_delayed_work() */
+}
+
 static void e1000_watchdog_task(struct work_struct *work)
 {
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
-                                                    watchdog_task.work);
+                                                    watchdog_task);
        struct net_device *netdev = adapter->netdev;
        struct e1000_mac_info *mac = &adapter->hw.mac;
        struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -5407,9 +5419,8 @@ static void e1000_watchdog_task(struct work_struct *work)
 
        /* Reset the timer */
        if (!test_bit(__E1000_DOWN, &adapter->state))
-               queue_delayed_work(adapter->e1000_workqueue,
-                                  &adapter->watchdog_task,
-                                  round_jiffies(2 * HZ));
+               mod_timer(&adapter->watchdog_timer,
+                         round_jiffies(jiffies + 2 * HZ));
 }
 
 #define E1000_TX_FLAGS_CSUM            0x00000001
@@ -7449,21 +7460,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_eeprom;
        }
 
-       adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
-                                                  e1000e_driver_name);
-
-       if (!adapter->e1000_workqueue) {
-               err = -ENOMEM;
-               goto err_workqueue;
-       }
-
-       INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
-       queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
-                          0);
-
+       timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
        timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
 
        INIT_WORK(&adapter->reset_task, e1000_reset_task);
+       INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
        INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
        INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
        INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7557,9 +7558,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_register:
-       flush_workqueue(adapter->e1000_workqueue);
-       destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_release_hw_control(adapter);
 err_eeprom:
@@ -7604,17 +7602,15 @@ static void e1000_remove(struct pci_dev *pdev)
         * from being rescheduled.
         */
        set_bit(__E1000_DOWN, &adapter->state);
+       del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
        cancel_work_sync(&adapter->reset_task);
+       cancel_work_sync(&adapter->watchdog_task);
        cancel_work_sync(&adapter->downshift_task);
        cancel_work_sync(&adapter->update_phy_task);
        cancel_work_sync(&adapter->print_hang_task);
 
-       cancel_delayed_work(&adapter->watchdog_task);
-       flush_workqueue(adapter->e1000_workqueue);
-       destroy_workqueue(adapter->e1000_workqueue);
-
        if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
                cancel_work_sync(&adapter->tx_hwtstamp_work);
                if (adapter->tx_hwtstamp_skb) {
index 9f0a4e92a2317f82a6bf521ce7610b28ba6391da..37514a75f9288c94c026daee1947b191251b44b4 100644 (file)
@@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
                    (aq->api_maj_ver == 1 &&
                     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
                        hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+               if (aq->api_maj_ver > 1 ||
+                   (aq->api_maj_ver == 1 &&
+                    aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+                       hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
                /* fall through */
        default:
                break;
index 6a3f0fc56c3b95406d9782ba52c8bb6b65657e7b..69523ac85639ef2204223352e86c5bedb62a9fbb 100644 (file)
@@ -2321,6 +2321,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
        return ret;
 }
 
+/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+       if ((!vqs->rx_queues && !vqs->tx_queues) ||
+           vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+           vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+               return false;
+
+       return true;
+}
+
 /**
  * i40e_vc_enable_queues_msg
  * @vf: pointer to the VF info
@@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
                goto error_param;
        }
 
-       if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+       if (i40e_vc_validate_vqs_bitmaps(vqs)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
                goto error_param;
        }
 
-       if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
-           vqs->rx_queues > I40E_MAX_VF_QUEUES ||
-           vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+       if (i40e_vc_validate_vqs_bitmaps(vqs)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
index 29de3ae96ef222d7545abead8c9004bacb312561..bd1b1ed323f4fe9e16134d31b99aad7341b5e89c 100644 (file)
@@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
 void iavf_disable_channels(struct iavf_adapter *adapter);
 void iavf_add_cloud_filter(struct iavf_adapter *adapter);
 void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+                                       const u8 *macaddr);
 #endif /* _IAVF_H_ */
index 821987da5698ac8cf0468ac90e6f0fee460916ba..8e16be960e96b64196285916e268035cedf70449 100644 (file)
@@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
  *
  * Returns ptr to the filter object or NULL when no memory available.
  **/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
-                                const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+                                       const u8 *macaddr)
 {
        struct iavf_mac_filter *f;
 
@@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work)
        struct virtchnl_vf_resource *vfres = adapter->vf_res;
        struct net_device *netdev = adapter->netdev;
        struct iavf_hw *hw = &adapter->hw;
+       struct iavf_mac_filter *f, *ftmp;
        struct iavf_vlan_filter *vlf;
        struct iavf_cloud_filter *cf;
-       struct iavf_mac_filter *f;
        u32 reg_val;
        int i = 0, err;
        bool running;
@@ -2181,6 +2180,16 @@ static void iavf_reset_task(struct work_struct *work)
 
        spin_lock_bh(&adapter->mac_vlan_list_lock);
 
+       /* Delete filter for the current MAC address, it could have
+        * been changed by the PF via administratively set MAC.
+        * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+        */
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+                       list_del(&f->list);
+                       kfree(f);
+               }
+       }
        /* re-add all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
                f->add = true;
index c46770eba320e6b8a05d67e566db95da1636e506..1ab9cb339acb4d195c4433d9fc0254189586cc6b 100644 (file)
@@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                        ether_addr_copy(netdev->perm_addr,
                                        adapter->hw.mac.addr);
                }
+               spin_lock_bh(&adapter->mac_vlan_list_lock);
+               iavf_add_filter(adapter, adapter->hw.mac.addr);
+               spin_unlock_bh(&adapter->mac_vlan_list_lock);
                iavf_process_config(adapter);
                }
                break;
index 8a6ef351412921c729db50e686201b51ff4795ec..438b42ce2cd9aa84931c9e699dd14c2158bdb35f 100644 (file)
@@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
                dev_spec->module_plugged = true;
                if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
                        hw->phy.media_type = e1000_media_type_internal_serdes;
-               } else if (eth_flags->e100_base_fx) {
+               } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
                        dev_spec->sgmii_active = true;
                        hw->phy.media_type = e1000_media_type_internal_serdes;
                } else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                        break;
                }
 
-               /* do not change link mode for 100BaseFX */
-               if (dev_spec->eth_flags.e100_base_fx)
-                       break;
-
                /* change current link mode setting */
                ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
 
-               if (hw->phy.media_type == e1000_media_type_copper)
+               if (dev_spec->sgmii_active)
                        ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
                else
                        ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
index 4690d6c87f39a52ce839e35b8983a157b373e436..445fbdce3e2592eed5fb2c2bf951dcacd714b85d 100644 (file)
@@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
                                advertising &= ~ADVERTISED_1000baseKX_Full;
                        }
                }
-               if (eth_flags->e100_base_fx) {
+               if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
                        supported |= SUPPORTED_100baseT_Full;
                        advertising |= ADVERTISED_100baseT_Full;
                }
index 82a30b597cf98986b921265226617aa0c1cf40db..a2b2ad1f60b111db43e1c2c161efb199e0aeafe7 100644 (file)
@@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct hlist_node *node2;
        struct ixgbe_fdir_filter *filter;
-       u64 action;
+       u8 queue;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
@@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
 
        hlist_for_each_entry_safe(filter, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
-               action = filter->action;
-               if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
-                       action =
-                       (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+               if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+                       queue = IXGBE_FDIR_DROP_QUEUE;
+               } else {
+                       u32 ring = ethtool_get_flow_spec_ring(filter->action);
+                       u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+                       if (!vf && (ring >= adapter->num_rx_queues)) {
+                               e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+                                     ring);
+                               continue;
+                       } else if (vf &&
+                                  ((vf > adapter->num_vfs) ||
+                                    ring >= adapter->num_rx_queues_per_pool)) {
+                               e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+                                     vf, ring);
+                               continue;
+                       }
+
+                       /* Map the ring onto the absolute queue index */
+                       if (!vf)
+                               queue = adapter->rx_ring[ring]->reg_idx;
+                       else
+                               queue = ((vf - 1) *
+                                       adapter->num_rx_queues_per_pool) + ring;
+               }
 
                ixgbe_fdir_write_perfect_filter_82599(hw,
-                               &filter->filter,
-                               filter->sw_idx,
-                               (action == IXGBE_FDIR_DROP_QUEUE) ?
-                               IXGBE_FDIR_DROP_QUEUE :
-                               adapter->rx_ring[action]->reg_idx);
+                               &filter->filter, filter->sw_idx, queue);
        }
 
        spin_unlock(&adapter->fdir_perfect_lock);
index 076f2da36f27825907ee817e8c919c72fde3a22d..64ec0e7c64b49504c108a1a423498e7e31808890 100644 (file)
@@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
        struct ixgbe_hw *hw = &adapter->hw;
        int count = 0;
 
-       if ((netdev_uc_count(netdev)) > 10) {
-               pr_err("Too many unicast filters - No Space\n");
-               return -ENOSPC;
-       }
-
        if (!netdev_uc_empty(netdev)) {
                struct netdev_hw_addr *ha;
 
index 71a872d46bc487ea3a9113e03b8ccaec89f2c0f8..67ad8b8b127d0e1cfc41d851efd5e9d7e7b0c521 100644 (file)
@@ -2081,7 +2081,11 @@ static int
 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
               struct bpf_prog *prog, struct xdp_buff *xdp)
 {
-       u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+       unsigned int len;
+       u32 ret, act;
+
+       len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+       act = bpf_prog_run_xdp(prog, xdp);
 
        switch (act) {
        case XDP_PASS:
@@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                if (err) {
                        ret = MVNETA_XDP_DROPPED;
                        __page_pool_put_page(rxq->page_pool,
-                                       virt_to_head_page(xdp->data),
-                                       xdp->data_end - xdp->data_hard_start,
-                                       true);
+                                            virt_to_head_page(xdp->data),
+                                            len, true);
                } else {
                        ret = MVNETA_XDP_REDIR;
                }
@@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                ret = mvneta_xdp_xmit_back(pp, xdp);
                if (ret != MVNETA_XDP_TX)
                        __page_pool_put_page(rxq->page_pool,
-                                       virt_to_head_page(xdp->data),
-                                       xdp->data_end - xdp->data_hard_start,
-                                       true);
+                                            virt_to_head_page(xdp->data),
+                                            len, true);
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
        case XDP_DROP:
                __page_pool_put_page(rxq->page_pool,
                                     virt_to_head_page(xdp->data),
-                                    xdp->data_end - xdp->data_hard_start,
-                                    true);
+                                    len, true);
                ret = MVNETA_XDP_DROPPED;
                break;
        }
index eaf08f7ad12843b661ce6cb0571ab3ab81e0871b..64ed725aec2858b6ead1990c6cb83d0eb448a0cb 100644 (file)
@@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
        crdump_enable_crspace_access(dev, cr_space);
 
        /* Get the available snapshot ID for the dumps */
-       id = devlink_region_shapshot_id_get(devlink);
+       id = devlink_region_snapshot_id_get(devlink);
 
        /* Try to capture dumps */
        mlx4_crdump_collect_crspace(dev, cr_space, id);
index 68d593074f6c8f9765188c192b92a0fbdadd481c..d48292ccda294b917a3ca42876b34d5b6059afe7 100644 (file)
@@ -122,6 +122,22 @@ enum {
 #endif
 };
 
+#define MLX5E_TTC_NUM_GROUPS   3
+#define MLX5E_TTC_GROUP1_SIZE  (BIT(3) + MLX5E_NUM_TUNNEL_TT)
+#define MLX5E_TTC_GROUP2_SIZE   BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE   BIT(0)
+#define MLX5E_TTC_TABLE_SIZE   (MLX5E_TTC_GROUP1_SIZE +\
+                                MLX5E_TTC_GROUP2_SIZE +\
+                                MLX5E_TTC_GROUP3_SIZE)
+
+#define MLX5E_INNER_TTC_NUM_GROUPS     3
+#define MLX5E_INNER_TTC_GROUP1_SIZE    BIT(3)
+#define MLX5E_INNER_TTC_GROUP2_SIZE    BIT(1)
+#define MLX5E_INNER_TTC_GROUP3_SIZE    BIT(0)
+#define MLX5E_INNER_TTC_TABLE_SIZE     (MLX5E_INNER_TTC_GROUP1_SIZE +\
+                                        MLX5E_INNER_TTC_GROUP2_SIZE +\
+                                        MLX5E_INNER_TTC_GROUP3_SIZE)
+
 #ifdef CONFIG_MLX5_EN_RXNFC
 
 struct mlx5e_ethtool_table {
index 1d6b58860da6d40d6b2cb0dfebe292b9bc364ade..3a975641f902adbb5da9286e39dbe3bb84c71b61 100644 (file)
@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
                        struct devlink_health_reporter *reporter, char *err_str,
                        struct mlx5e_err_ctx *err_ctx)
 {
-       if (!reporter) {
-               netdev_err(priv->netdev, err_str);
+       netdev_err(priv->netdev, err_str);
+
+       if (!reporter)
                return err_ctx->recover(&err_ctx->ctx);
-       }
+
        return devlink_health_report(reporter, err_str, err_ctx);
 }
index 15b7f0f1427c201fe4e29e8c81304d4718e3f17c..73d3dc07331f194bccef3b32fa99d7b6e3230656 100644 (file)
@@ -904,22 +904,6 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
        return err;
 }
 
-#define MLX5E_TTC_NUM_GROUPS   3
-#define MLX5E_TTC_GROUP1_SIZE  (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE   BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE   BIT(0)
-#define MLX5E_TTC_TABLE_SIZE   (MLX5E_TTC_GROUP1_SIZE +\
-                                MLX5E_TTC_GROUP2_SIZE +\
-                                MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS     3
-#define MLX5E_INNER_TTC_GROUP1_SIZE    BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE    BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE    BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE     (MLX5E_INNER_TTC_GROUP1_SIZE +\
-                                        MLX5E_INNER_TTC_GROUP2_SIZE +\
-                                        MLX5E_INNER_TTC_GROUP3_SIZE)
-
 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
                                         bool use_ipv)
 {
index 9b32a9c0f4979100949af0f1efc02a5d02e5f847..024e1cddfd0e86e1486f98ae6593fbba93e8496f 100644 (file)
@@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
                ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
 
-       ft_attr->max_fte = MLX5E_NUM_TT;
+       ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
        ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
        ft_attr->prio = MLX5E_TC_PRIO;
 }
@@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info
        return kmemdup(tun_info, tun_size, GFP_KERNEL);
 }
 
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+                                     struct mlx5e_tc_flow *flow,
+                                     int out_index,
+                                     struct mlx5e_encap_entry *e,
+                                     struct netlink_ext_ack *extack)
+{
+       int i;
+
+       for (i = 0; i < out_index; i++) {
+               if (flow->encaps[i].e != e)
+                       continue;
+               NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+               netdev_err(priv->netdev, "can't duplicate encap action\n");
+               return true;
+       }
+
+       return false;
+}
+
 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow,
                              struct net_device *mirred_dev,
@@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
 
        /* must verify if encap is valid or not */
        if (e) {
+               /* Check that entry was not already attached to this flow */
+               if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+                       err = -EOPNOTSUPP;
+                       goto out_err;
+               }
+
                mutex_unlock(&esw->offloads.encap_tbl_lock);
                wait_for_completion(&e->res_ready);
 
@@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
               same_hw_devs(priv, netdev_priv(out_dev));
 }
 
+static bool is_duplicated_output_device(struct net_device *dev,
+                                       struct net_device *out_dev,
+                                       int *ifindexes, int if_count,
+                                       struct netlink_ext_ack *extack)
+{
+       int i;
+
+       for (i = 0; i < if_count; i++) {
+               if (ifindexes[i] == out_dev->ifindex) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "can't duplicate output to same device");
+                       netdev_err(dev, "can't duplicate output to same device: %s\n",
+                                  out_dev->name);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                struct flow_action *flow_action,
                                struct mlx5e_tc_flow *flow,
@@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        const struct ip_tunnel_info *info = NULL;
+       int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
        bool ft_flow = mlx5e_is_ft_flow(flow);
        const struct flow_action_entry *act;
+       int err, i, if_count = 0;
        bool encap = false;
        u32 action = 0;
-       int err, i;
 
        if (!flow_action_has_entries(flow_action))
                return -EINVAL;
@@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
                                struct net_device *uplink_upper;
 
+                               if (is_duplicated_output_device(priv->netdev,
+                                                               out_dev,
+                                                               ifindexes,
+                                                               if_count,
+                                                               extack))
+                                       return -EOPNOTSUPP;
+
+                               ifindexes[if_count] = out_dev->ifindex;
+                               if_count++;
+
                                rcu_read_lock();
                                uplink_upper =
                                        netdev_master_upper_dev_get_rcu(uplink_dev);
index 9a48c4310887d1c15a83ac765fa439053cbf21c0..8c5df6c7d7b6b98fc73c1c9750433f11190df01c 100644 (file)
@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node)
        }
 }
 
-static void del_sw_fte_rcu(struct rcu_head *head)
-{
-       struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
-       struct mlx5_flow_steering *steering = get_steering(&fte->node);
-
-       kmem_cache_free(steering->ftes_cache, fte);
-}
-
 static void del_sw_fte(struct fs_node *node)
 {
+       struct mlx5_flow_steering *steering = get_steering(node);
        struct mlx5_flow_group *fg;
        struct fs_fte *fte;
        int err;
@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node)
                                     rhash_fte);
        WARN_ON(err);
        ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
-
-       call_rcu(&fte->rcu, del_sw_fte_rcu);
+       kmem_cache_free(steering->ftes_cache, fte);
 }
 
 static void del_hw_flow_group(struct fs_node *node)
@@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
 }
 
 static struct fs_fte *
-lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
+lookup_fte_locked(struct mlx5_flow_group *g,
+                 const u32 *match_value,
+                 bool take_write)
 {
        struct fs_fte *fte_tmp;
 
-       nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
-
-       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
-       if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
-               fte_tmp = NULL;
-               goto out;
-       }
-
-       if (!fte_tmp->node.active) {
-               tree_put_node(&fte_tmp->node, false);
-               fte_tmp = NULL;
-               goto out;
-       }
-       nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
-out:
-       up_write_ref_node(&g->node, false);
-       return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
-{
-       struct fs_fte *fte_tmp;
-
-       if (!tree_get_node(&g->node))
-               return NULL;
-
-       rcu_read_lock();
-       fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+       if (take_write)
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+       else
+               nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+                                        rhash_fte);
        if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
-               rcu_read_unlock();
                fte_tmp = NULL;
                goto out;
        }
-       rcu_read_unlock();
-
        if (!fte_tmp->node.active) {
                tree_put_node(&fte_tmp->node, false);
                fte_tmp = NULL;
@@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
        }
 
        nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
 out:
-       tree_put_node(&g->node, false);
-       return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
-{
-       if (write)
-               return lookup_fte_for_write_locked(g, match_value);
+       if (take_write)
+               up_write_ref_node(&g->node, false);
        else
-               return lookup_fte_for_read_locked(g, match_value);
+               up_read_ref_node(&g->node);
+       return fte_tmp;
 }
 
 static struct mlx5_flow_handle *
index e8cd997f413eb86cf5f3e5e75e7a75801a8c4f12..c2621b9115633b53fe21d738bcd909f53d1bf108 100644 (file)
@@ -203,7 +203,6 @@ struct fs_fte {
        enum fs_fte_status              status;
        struct mlx5_fc                  *counter;
        struct rhash_head               hash;
-       struct rcu_head rcu;
        int                             modify_mask;
 };
 
index 173e2c12e1c782ea02099920a5857a8206e72bb9..cf7b8da0f0105ebce808c3e05853ab6fd3dd3b7c 100644 (file)
@@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
        if (err)
                goto err_load;
 
+       if (boot) {
+               err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+               if (err)
+                       goto err_devlink_reg;
+       }
+
        if (mlx5_device_registered(dev)) {
                mlx5_attach_device(dev);
        } else {
@@ -1210,6 +1216,9 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
        return err;
 
 err_reg_dev:
+       if (boot)
+               mlx5_devlink_unregister(priv_to_devlink(dev));
+err_devlink_reg:
        mlx5_unload(dev);
 err_load:
        if (boot)
@@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        request_module_nowait(MLX5_IB_MOD);
 
-       err = mlx5_devlink_register(devlink, &pdev->dev);
-       if (err)
-               goto clean_load;
-
        err = mlx5_crdump_enable(dev);
        if (err)
                dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
@@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        pci_save_state(pdev);
        return 0;
 
-clean_load:
-       mlx5_unload_one(dev, true);
-
 err_load_one:
        mlx5_pci_close(dev);
 pci_init_err:
index 32e94d2ee5e4ab0381f4c904a0b9031f3709126e..e4cff7abb348e1087ed85bfef733a990912ac947 100644 (file)
@@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
        /* We need to copy the refcount since this ste
         * may have been traversed several times
         */
-       refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+       new_ste->refcount = cur_ste->refcount;
 
        /* Link old STEs rule_mem list to the new ste */
        mlx5dr_rule_update_rule_member(cur_ste, new_ste);
@@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
        if (!rule_mem)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&rule_mem->list);
+       INIT_LIST_HEAD(&rule_mem->use_ste_list);
+
        rule_mem->ste = ste;
        list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
 
index a5a266983dd33395536e1b73f48fb63c19d3f020..c6c7d1defbd788ee657f45c8e3914c6e8393243e 100644 (file)
@@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
        if (dst->next_htbl)
                dst->next_htbl->pointing_ste = dst;
 
-       refcount_set(&dst->refcount, refcount_read(&src->refcount));
+       dst->refcount = src->refcount;
 
        INIT_LIST_HEAD(&dst->rule_list);
        list_splice_tail_init(&src->rule_list, &dst->rule_list);
@@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
 
 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
 {
-       return !refcount_read(&ste->refcount);
+       return !ste->refcount;
 }
 
 /* Init one ste as a pattern for ste data array */
@@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
        htbl->ste_arr = chunk->ste_arr;
        htbl->hw_ste_arr = chunk->hw_ste_arr;
        htbl->miss_list = chunk->miss_list;
-       refcount_set(&htbl->refcount, 0);
+       htbl->refcount = 0;
 
        for (i = 0; i < chunk->num_of_entries; i++) {
                struct mlx5dr_ste *ste = &htbl->ste_arr[i];
 
                ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
                ste->htbl = htbl;
-               refcount_set(&ste->refcount, 0);
+               ste->refcount = 0;
                INIT_LIST_HEAD(&ste->miss_list_node);
                INIT_LIST_HEAD(&htbl->miss_list[i]);
                INIT_LIST_HEAD(&ste->rule_list);
@@ -713,7 +713,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
 
 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
 {
-       if (refcount_read(&htbl->refcount))
+       if (htbl->refcount)
                return -EBUSY;
 
        mlx5dr_icm_free_chunk(htbl->chunk);
index 290fe61c33d0bc37152880c17647870c24365a59..3fdf4a5eb031b72d67f2290cf61a2d4152a8fc92 100644 (file)
@@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx;
 struct mlx5dr_ste {
        u8 *hw_ste;
        /* refcount: indicates the num of rules that using this ste */
-       refcount_t refcount;
+       u32 refcount;
 
        /* attached to the miss_list head at each htbl entry */
        struct list_head miss_list_node;
@@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
 struct mlx5dr_ste_htbl {
        u8 lu_type;
        u16 byte_mask;
-       refcount_t refcount;
+       u32 refcount;
        struct mlx5dr_icm_chunk *chunk;
        struct mlx5dr_ste *ste_arr;
        u8 *hw_ste_arr;
@@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
 
 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
 {
-       if (refcount_dec_and_test(&htbl->refcount))
+       htbl->refcount--;
+       if (!htbl->refcount)
                mlx5dr_ste_htbl_free(htbl);
 }
 
 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
 {
-       refcount_inc(&htbl->refcount);
+       htbl->refcount++;
 }
 
 /* STE utils */
@@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
                                  struct mlx5dr_matcher *matcher,
                                  struct mlx5dr_matcher_rx_tx *nic_matcher)
 {
-       if (refcount_dec_and_test(&ste->refcount))
+       ste->refcount--;
+       if (!ste->refcount)
                mlx5dr_ste_free(ste, matcher, nic_matcher);
 }
 
 /* initial as 0, increased only when ste appears in a new rule */
 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
 {
-       refcount_inc(&ste->refcount);
+       ste->refcount++;
 }
 
 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
index 544344ac4894cb5d24335940afd703fe2fcd0d9a..79057af4fe997a9760d58af40b40ba0f19b9d647 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/netlink.h>
+#include <linux/vmalloc.h>
 #include <linux/xz.h>
 #include "mlxfw_mfa2.h"
 #include "mlxfw_mfa2_file.h"
@@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
        comp_size = be32_to_cpu(comp->size);
        comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
 
-       comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+       comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
        if (!comp_data)
                return ERR_PTR(-ENOMEM);
        comp_data->comp.data_size = comp_size;
@@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
        comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
        return &comp_data->comp;
 err_out:
-       kfree(comp_data);
+       vfree(comp_data);
        return ERR_PTR(err);
 }
 
@@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
        const struct mlxfw_mfa2_comp_data *comp_data;
 
        comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
-       kfree(comp_data);
+       vfree(comp_data);
 }
 
 void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
index 5294a1622643ead2c662c4915bb670234cace2cb..af30e8a76682093b00d8fca0c7d3d06ca9eb0aba 100644 (file)
@@ -5472,6 +5472,7 @@ enum mlxsw_reg_htgt_trap_group {
        MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+       MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
 
        __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
        MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
index 556dca328bb514b84ce6e34f8e01f719d24b6408..8ed15199eb4f3ffba26beebf34fbe16caaa0550d 100644 (file)
@@ -860,23 +860,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
        u64 len;
        int err;
 
+       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+               this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
 
        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
                return NETDEV_TX_BUSY;
 
-       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
-               struct sk_buff *skb_orig = skb;
-
-               skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
-               if (!skb) {
-                       this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
-                       dev_kfree_skb_any(skb_orig);
-                       return NETDEV_TX_OK;
-               }
-               dev_consume_skb_any(skb_orig);
-       }
-
        if (eth_skb_pad(skb)) {
                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
                return NETDEV_TX_OK;
@@ -1215,6 +1209,9 @@ static void update_stats_cache(struct work_struct *work)
                             periodic_hw_stats.update_dw.work);
 
        if (!netif_carrier_ok(mlxsw_sp_port->dev))
+               /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+                * necessary when port goes down.
+                */
                goto out;
 
        mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
@@ -4324,6 +4321,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
        return 0;
 }
 
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int i;
+
+       for (i = 0; i < TC_MAX_QUEUE; i++)
+               mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
                                     char *pude_pl, void *priv)
 {
@@ -4345,6 +4351,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
        } else {
                netdev_info(mlxsw_sp_port->dev, "link down\n");
                netif_carrier_off(mlxsw_sp_port->dev);
+               mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
        }
 }
 
@@ -4542,8 +4549,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
        MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
        MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
-       MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
-       MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+       MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
+       MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
        /* PKT Sample trap */
        MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
                  false, SP_IP2ME, DISCARD),
@@ -4626,6 +4633,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
                        rate = 19 * 1024;
                        burst_size = 12;
                        break;
+               case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
+                       rate = 360;
+                       burst_size = 7;
+                       break;
                default:
                        continue;
                }
@@ -4665,6 +4676,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
+               case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
                        priority = 5;
                        tc = 5;
                        break;
@@ -5127,6 +5139,27 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
        return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
 }
 
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_bus_info *mlxsw_bus_info,
+                         struct netlink_ext_ack *extack)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+       mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+       mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+       mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+       mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+       mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+       mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+       mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+       mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+       mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+       mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+       mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+
+       return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -5629,7 +5662,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
 static struct mlxsw_driver mlxsw_sp3_driver = {
        .kind                           = mlxsw_sp3_driver_name,
        .priv_size                      = sizeof(struct mlxsw_sp),
-       .init                           = mlxsw_sp2_init,
+       .init                           = mlxsw_sp3_init,
        .fini                           = mlxsw_sp_fini,
        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
        .port_split                     = mlxsw_sp_port_split,
index 68cc6737d45ce375af72e29f25c3a51fde6b66dc..0124bfe1963b7fc08b8b9732daa4ca410bba3427 100644 (file)
@@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
        return -EOPNOTSUPP;
 }
 
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+       return xstats->backlog[tclass_num] +
+              xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+       return xstats->tail_drop[tclass_num] +
+              xstats->tail_drop[tclass_num + 8];
+}
+
 static void
 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
                                       u8 prio_bitmap, u64 *tx_packets,
@@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                                               &stats_base->tx_bytes);
        red_base->prob_mark = xstats->ecn;
        red_base->prob_drop = xstats->wred_drop[tclass_num];
-       red_base->pdrop = xstats->tail_drop[tclass_num];
+       red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
 
        stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
        stats_base->drops = red_base->prob_drop + red_base->pdrop;
@@ -370,7 +384,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
        marks = xstats->ecn - xstats_base->prob_mark;
-       pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+       pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+                xstats_base->pdrop;
 
        res->pdrop += pdrops;
        res->prob_drop += early_drops;
@@ -403,9 +418,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
                     stats_base->overlimits;
-       drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
+       drops = xstats->wred_drop[tclass_num] +
+               mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
                stats_base->drops;
-       backlog = xstats->backlog[tclass_num];
+       backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
 
        _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
        stats_ptr->qstats->overlimits += overlimits;
@@ -576,9 +592,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
        tx_packets = stats->tx_packets - stats_base->tx_packets;
 
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               drops += xstats->tail_drop[i];
+               drops += mlxsw_sp_xstats_tail_drop(xstats, i);
                drops += xstats->wred_drop[i];
-               backlog += xstats->backlog[i];
+               backlog += mlxsw_sp_xstats_backlog(xstats, i);
        }
        drops = drops - stats_base->drops;
 
@@ -614,7 +630,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        stats_base->drops = 0;
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               stats_base->drops += xstats->tail_drop[i];
+               stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
                stats_base->drops += xstats->wred_drop[i];
        }
 
@@ -651,6 +667,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
            mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
                return 0;
 
+       if (!p->child_handle) {
+               /* This is an invisible FIFO replacing the original Qdisc.
+                * Ignore it--the original Qdisc's destroy will follow.
+                */
+               return 0;
+       }
+
        /* See if the grafted qdisc is already offloaded on any tclass. If so,
         * unoffload it.
         */
index 08b7e9f964daefee98e8b63bb1ec33cc76131f3e..8290e82240fc0046ed073193512dd48ad51dd859 100644 (file)
@@ -7079,6 +7079,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
                rif = mlxsw_sp->router->rifs[i];
+               if (rif && rif->ops &&
+                   rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
+                       continue;
                if (rif && rif->dev && rif->dev != dev &&
                    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
                                             mlxsw_sp->mac_mask)) {
index de6cb22f68b1fd0a17a08d7470a6d8afac02fadf..f0e98ec8f1eeac8b48a5cebb1986e98574fe1bf3 100644 (file)
@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
        u64 len;
        int err;
 
+       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+               this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
 
        if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
                return NETDEV_TX_BUSY;
 
-       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
-               struct sk_buff *skb_orig = skb;
-
-               skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
-               if (!skb) {
-                       this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
-                       dev_kfree_skb_any(skb_orig);
-                       return NETDEV_TX_OK;
-               }
-               dev_consume_skb_any(skb_orig);
-       }
        mlxsw_sx_txhdr_construct(skb, &tx_info);
        /* TX header is consumed by HW on the way so we shouldn't count its
         * bytes as being sent.
index e19b49c4013ed321e53c0bc684676697342be6b8..3591285250e19745d016d607749d6b408e8e9372 100644 (file)
@@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
        if (cd->tsu) {
                add_tsu_reg(ARSTR);
                add_tsu_reg(TSU_CTRST);
-               add_tsu_reg(TSU_FWEN0);
-               add_tsu_reg(TSU_FWEN1);
-               add_tsu_reg(TSU_FCM);
-               add_tsu_reg(TSU_BSYSL0);
-               add_tsu_reg(TSU_BSYSL1);
-               add_tsu_reg(TSU_PRISL0);
-               add_tsu_reg(TSU_PRISL1);
-               add_tsu_reg(TSU_FWSL0);
-               add_tsu_reg(TSU_FWSL1);
+               if (cd->dual_port) {
+                       add_tsu_reg(TSU_FWEN0);
+                       add_tsu_reg(TSU_FWEN1);
+                       add_tsu_reg(TSU_FCM);
+                       add_tsu_reg(TSU_BSYSL0);
+                       add_tsu_reg(TSU_BSYSL1);
+                       add_tsu_reg(TSU_PRISL0);
+                       add_tsu_reg(TSU_PRISL1);
+                       add_tsu_reg(TSU_FWSL0);
+                       add_tsu_reg(TSU_FWSL1);
+               }
                add_tsu_reg(TSU_FWSLC);
-               add_tsu_reg(TSU_QTAGM0);
-               add_tsu_reg(TSU_QTAGM1);
-               add_tsu_reg(TSU_FWSR);
-               add_tsu_reg(TSU_FWINMK);
-               add_tsu_reg(TSU_ADQT0);
-               add_tsu_reg(TSU_ADQT1);
-               add_tsu_reg(TSU_VTAG0);
-               add_tsu_reg(TSU_VTAG1);
+               if (cd->dual_port) {
+                       add_tsu_reg(TSU_QTAGM0);
+                       add_tsu_reg(TSU_QTAGM1);
+                       add_tsu_reg(TSU_FWSR);
+                       add_tsu_reg(TSU_FWINMK);
+                       add_tsu_reg(TSU_ADQT0);
+                       add_tsu_reg(TSU_ADQT1);
+                       add_tsu_reg(TSU_VTAG0);
+                       add_tsu_reg(TSU_VTAG1);
+               }
                add_tsu_reg(TSU_ADSBSY);
                add_tsu_reg(TSU_TEN);
                add_tsu_reg(TSU_POST1);
index c56fcbb370665de398bf72bb8887a9e6874a61d8..52ed111d98f4d6a0638fd1cff1304246993eeb05 100644 (file)
@@ -2296,7 +2296,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt);
 
 
 
-MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
 
 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
index f7e927ad67fac36ec5c02eaeeb0972a39b351a7b..b7032422393f69bd8c3ecd97bd8bee83258fdfff 100644 (file)
@@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev,
                phy_ethtool_get_wol(ndev->phydev, wol);
 }
 
-static int ave_ethtool_set_wol(struct net_device *ndev,
-                              struct ethtool_wolinfo *wol)
+static int __ave_ethtool_set_wol(struct net_device *ndev,
+                                struct ethtool_wolinfo *wol)
 {
-       int ret;
-
        if (!ndev->phydev ||
            (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
                return -EOPNOTSUPP;
 
-       ret = phy_ethtool_set_wol(ndev->phydev, wol);
+       return phy_ethtool_set_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+                              struct ethtool_wolinfo *wol)
+{
+       int ret;
+
+       ret = __ave_ethtool_set_wol(ndev, wol);
        if (!ret)
                device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
 
@@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev)
 
        /* set wol initial state disabled */
        wol.wolopts = 0;
-       ave_ethtool_set_wol(ndev, &wol);
+       __ave_ethtool_set_wol(ndev, &wol);
 
        if (!phy_interface_is_rgmii(phydev))
                phy_set_max_speed(phydev, SPEED_100);
@@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev)
 
        ave_ethtool_get_wol(ndev, &wol);
        wol.wolopts = priv->wolopts;
-       ave_ethtool_set_wol(ndev, &wol);
+       __ave_ethtool_set_wol(ndev, &wol);
 
        if (ndev->phydev) {
                ret = phy_resume(ndev->phydev);
index bd6c01004913a0853348cefd7a93d1f9285c1472..0e2fa14f142377b0c47751d2eb99fa11cb4e445d 100644 (file)
@@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
        struct device *dev = dwmac->dev;
        const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
        struct meson8b_dwmac_clk_configs *clk_configs;
+       static const struct clk_div_table div_table[] = {
+               { .div = 2, .val = 2, },
+               { .div = 3, .val = 3, },
+               { .div = 4, .val = 4, },
+               { .div = 5, .val = 5, },
+               { .div = 6, .val = 6, },
+               { .div = 7, .val = 7, },
+       };
 
        clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
        if (!clk_configs)
@@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
        clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
        clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
        clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
-       clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
-                               CLK_DIVIDER_ALLOW_ZERO |
-                               CLK_DIVIDER_ROUND_CLOSEST;
+       clk_configs->m250_div.table = div_table;
+       clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
+                                     CLK_DIVIDER_ROUND_CLOSEST;
        clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
                                         &clk_divider_ops,
                                         &clk_configs->m250_div.hw);
index 1c8d84ed841020936129aaaf83a9bd9b8a2c8364..01b484cb177e107718cd5d9f080a2293b6efa4f9 100644 (file)
@@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                /* default */
                break;
        case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
                break;
        case PHY_INTERFACE_MODE_RMII:
index 26353ef616b8bf1329f332ce01d8ae692ab617f1..7d40760e9ba887693e5b6b2a925eee73bd7ad92d 100644 (file)
@@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
         * rate, which then uses the auto-reparenting feature of the
         * clock driver, and enabling/disabling the clock.
         */
-       if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+       if (phy_interface_mode_is_rgmii(gmac->interface)) {
                clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
                clk_prepare_enable(gmac->tx_clk);
                gmac->clk_enabled = 1;
index 6f51a265459d7b85ed40dd68c717bc4795232eeb..80d59b77590797c6d091528676687e2a2b320455 100644 (file)
@@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
 #ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
 static void stmmac_init_fs(struct net_device *dev);
 static void stmmac_exit_fs(struct net_device *dev);
 #endif
@@ -4256,6 +4257,34 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
 }
 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
 
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       if (dev->netdev_ops != &stmmac_netdev_ops)
+               goto done;
+
+       switch (event) {
+       case NETDEV_CHANGENAME:
+               if (priv->dbgfs_dir)
+                       priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+                                                        priv->dbgfs_dir,
+                                                        stmmac_fs_dir,
+                                                        dev->name);
+               break;
+       }
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+       .notifier_call = stmmac_device_event,
+};
+
 static void stmmac_init_fs(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
@@ -4270,12 +4299,15 @@ static void stmmac_init_fs(struct net_device *dev)
        /* Entry to report the DMA HW features */
        debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
                            &stmmac_dma_cap_fops);
+
+       register_netdevice_notifier(&stmmac_notifier);
 }
 
 static void stmmac_exit_fs(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
+       unregister_netdevice_notifier(&stmmac_notifier);
        debugfs_remove_recursive(priv->dbgfs_dir);
 }
 #endif /* CONFIG_DEBUG_FS */
index cc8d7e7bf9acc88d6b9c53dfe2c9b750b639a1bf..4775f49d7f3b716fcee15cc5401b79f1e87112cf 100644 (file)
@@ -320,7 +320,7 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
                         struct device_node *np, struct device *dev)
 {
-       bool mdio = false;
+       bool mdio = !of_phy_is_fixed_link(np);
        static const struct of_device_id need_mdio_ids[] = {
                { .compatible = "snps,dwc-qos-ethernet-4.10" },
                {},
index 13227909287c03010189e00e7878029e08010d55..450d7dac3ea6f195fdd8cf083b86022a1cfa648f 100644 (file)
@@ -80,7 +80,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
        if (attr->max_size && (attr->max_size > size))
                size = attr->max_size;
 
-       skb = netdev_alloc_skb_ip_align(priv->dev, size);
+       skb = netdev_alloc_skb(priv->dev, size);
        if (!skb)
                return NULL;
 
@@ -244,6 +244,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
                                         struct net_device *orig_ndev)
 {
        struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+       unsigned char *src = tpriv->packet->src;
+       unsigned char *dst = tpriv->packet->dst;
        struct stmmachdr *shdr;
        struct ethhdr *ehdr;
        struct udphdr *uhdr;
@@ -260,15 +262,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
                goto out;
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (tpriv->packet->dst) {
-               if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+       if (dst) {
+               if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
                        goto out;
        }
        if (tpriv->packet->sarc) {
-               if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+               if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
                        goto out;
-       } else if (tpriv->packet->src) {
-               if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
+       } else if (src) {
+               if (!ether_addr_equal_unaligned(ehdr->h_source, src))
                        goto out;
        }
 
@@ -714,7 +716,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
        struct ethhdr *ehdr;
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+       if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
                goto out;
        if (ehdr->h_proto != htons(ETH_P_PAUSE))
                goto out;
@@ -851,12 +853,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb,
        if (tpriv->vlan_id) {
                if (skb->vlan_proto != htons(proto))
                        goto out;
-               if (skb->vlan_tci != tpriv->vlan_id)
+               if (skb->vlan_tci != tpriv->vlan_id) {
+                       /* Means filter did not work. */
+                       tpriv->ok = false;
+                       complete(&tpriv->comp);
                        goto out;
+               }
        }
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+       if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
                goto out;
 
        ihdr = ip_hdr(skb);
@@ -965,6 +971,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
 {
        int ret, prev_cap = priv->dma_cap.vlhash;
 
+       if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+               return -EOPNOTSUPP;
+
        priv->dma_cap.vlhash = 0;
        ret = __stmmac_test_vlanfilt(priv);
        priv->dma_cap.vlhash = prev_cap;
@@ -1057,6 +1066,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
 {
        int ret, prev_cap = priv->dma_cap.vlhash;
 
+       if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+               return -EOPNOTSUPP;
+
        priv->dma_cap.vlhash = 0;
        ret = __stmmac_test_dvlanfilt(priv);
        priv->dma_cap.vlhash = prev_cap;
@@ -1323,16 +1335,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
        struct stmmac_packet_attrs attr = { };
        struct flow_dissector *dissector;
        struct flow_cls_offload *cls;
+       int ret, old_enable = 0;
        struct flow_rule *rule;
-       int ret;
 
        if (!tc_can_offload(priv->dev))
                return -EOPNOTSUPP;
        if (!priv->dma_cap.l3l4fnum)
                return -EOPNOTSUPP;
-       if (priv->rss.enable)
+       if (priv->rss.enable) {
+               old_enable = priv->rss.enable;
+               priv->rss.enable = false;
                stmmac_rss_configure(priv, priv->hw, NULL,
                                     priv->plat->rx_queues_to_use);
+       }
 
        dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
        if (!dissector) {
@@ -1399,7 +1414,8 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
 cleanup_dissector:
        kfree(dissector);
 cleanup_rss:
-       if (priv->rss.enable) {
+       if (old_enable) {
+               priv->rss.enable = old_enable;
                stmmac_rss_configure(priv, priv->hw, &priv->rss,
                                     priv->plat->rx_queues_to_use);
        }
@@ -1444,16 +1460,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
        struct stmmac_packet_attrs attr = { };
        struct flow_dissector *dissector;
        struct flow_cls_offload *cls;
+       int ret, old_enable = 0;
        struct flow_rule *rule;
-       int ret;
 
        if (!tc_can_offload(priv->dev))
                return -EOPNOTSUPP;
        if (!priv->dma_cap.l3l4fnum)
                return -EOPNOTSUPP;
-       if (priv->rss.enable)
+       if (priv->rss.enable) {
+               old_enable = priv->rss.enable;
+               priv->rss.enable = false;
                stmmac_rss_configure(priv, priv->hw, NULL,
                                     priv->plat->rx_queues_to_use);
+       }
 
        dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
        if (!dissector) {
@@ -1525,7 +1544,8 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
 cleanup_dissector:
        kfree(dissector);
 cleanup_rss:
-       if (priv->rss.enable) {
+       if (old_enable) {
+               priv->rss.enable = old_enable;
                stmmac_rss_configure(priv, priv->hw, &priv->rss,
                                     priv->plat->rx_queues_to_use);
        }
@@ -1578,7 +1598,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb,
        struct arphdr *ahdr;
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
+       if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
                goto out;
 
        ahdr = arp_hdr(skb);
index 7d972e0fd2b04d8759a016b023e07b30381e3dd3..9ffae12a212273c8c3d85844ff0a1e0dc6efcfd3 100644 (file)
@@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
 {
        int ret = 0;
 
+       /* When RSS is enabled, the filtering will be bypassed */
+       if (priv->rss.enable)
+               return -EBUSY;
+
        switch (cls->command) {
        case FLOW_CLS_REPLACE:
                ret = tc_add_flow(priv, cls);
index e5b7d6d2286e0c25b7bf2c4bf039f64ecd871122..f6222ada68188e8201122a02da5cb3f2f254d1d0 100644 (file)
@@ -540,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
                mtu = dst_mtu(&rt->dst);
        }
 
-       rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+       rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
 
        if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
            mtu < ntohs(iph->tot_len)) {
@@ -813,7 +813,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
        lock_sock(sock->sk);
        if (sock->sk->sk_user_data) {
                sk = ERR_PTR(-EBUSY);
-               goto out_sock;
+               goto out_rel_sock;
        }
 
        sk = sock->sk;
@@ -826,8 +826,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
 
        setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
 
-out_sock:
+out_rel_sock:
        release_sock(sock->sk);
+out_sock:
        sockfd_put(sock);
        return sk;
 }
index 857c4bea451cdb1c606a9d83b35a1c042ef48371..e66d77dc28c8cc4ab4417b355bfbafe1344db489 100644 (file)
@@ -1443,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
        /* Halt and release the rndis device */
        rndis_filter_halt_device(net_dev, rndis_dev);
 
-       net_dev->extension = NULL;
-
        netvsc_device_remove(dev);
 }
 
index 05631d97eeb4fbfe3ca599dfdccb1c355b1feb45..c5bf61565726b15aa1ea63590d7d8627b0c20d4a 100644 (file)
@@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct macvlan_dev *dest;
 
        if (vlan->mode == MACVLAN_MODE_BRIDGE) {
-               const struct ethhdr *eth = (void *)skb->data;
+               const struct ethhdr *eth = skb_eth_hdr(skb);
 
                /* send to other bridge ports directly */
                if (is_multicast_ether_addr(eth->h_dest)) {
+                       skb_reset_mac_header(skb);
                        macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
                        goto xmit_world;
                }
index 059711edfc61e9d8119d1a35afc7b0dc468f9ed2..4b39aba2e9c47213872129ad2e1596f9334de3a9 100644 (file)
@@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
 
        get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
 
-       id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+       id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
        err = devlink_region_snapshot_create(nsim_dev->dummy_region,
                                             dummy_data, id, kfree);
        if (err) {
index 5848219005d7918ce8f403f49a5f04904e2db01f..8dc461f7574b79c77b3128752feea30851b01a4b 100644 (file)
@@ -340,14 +340,14 @@ config DAVICOM_PHY
          Currently supports dm9161e and dm9131
 
 config DP83822_PHY
-       tristate "Texas Instruments DP83822 PHY"
+       tristate "Texas Instruments DP83822/825 PHYs"
        ---help---
-         Supports the DP83822 PHY.
+         Supports the DP83822 and DP83825I PHYs.
 
 config DP83TC811_PHY
-       tristate "Texas Instruments DP83TC822 PHY"
+       tristate "Texas Instruments DP83TC811 PHY"
        ---help---
-         Supports the DP83TC822 PHY.
+         Supports the DP83TC811 PHY.
 
 config DP83848_PHY
        tristate "Texas Instruments DP83848 PHY"
index 3b29d381116f8e88dc25dc0d17c485db2b4ef6e2..975789d9349dc5f72184af47e5c132e2a19c265b 100644 (file)
@@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
        .read_status    = aqr_read_status,
+       .suspend        = aqr107_suspend,
+       .resume         = aqr107_resume,
 },
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
index 9cd9dcee4eb2e8621e6ad03f49ec837c993fdd30..01cf71358359a2bd4fea0d3ba0826d7017a64897 100644 (file)
@@ -97,6 +97,7 @@
 #define DP83867_PHYCR_FIFO_DEPTH_MAX           0x03
 #define DP83867_PHYCR_FIFO_DEPTH_MASK          GENMASK(15, 14)
 #define DP83867_PHYCR_RESERVED_MASK            BIT(11)
+#define DP83867_PHYCR_FORCE_LINK_GOOD          BIT(10)
 
 /* RGMIIDCTL bits */
 #define DP83867_RGMII_TX_CLK_DELAY_MAX         0xf
@@ -599,7 +600,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
 
        usleep_range(10, 20);
 
-       return 0;
+       /* After reset FORCE_LINK_GOOD bit is set. Although the
+        * default value should be unset. Disable FORCE_LINK_GOOD
+        * for the phy to work properly.
+        */
+       return phy_modify(phydev, MII_DP83867_PHYCTRL,
+                        DP83867_PHYCR_FORCE_LINK_GOOD, 0);
 }
 
 static struct phy_driver dp83867_driver[] = {
index 1585eebb73feb79998731807809f522a08e2df11..ee7a718662c6b4d56da00ad947f85e50d86db537 100644 (file)
@@ -566,6 +566,9 @@ static int phylink_register_sfp(struct phylink *pl,
        struct sfp_bus *bus;
        int ret;
 
+       if (!fwnode)
+               return 0;
+
        bus = sfp_bus_find_fwnode(fwnode);
        if (IS_ERR(bus)) {
                ret = PTR_ERR(bus);
index f940dc6485e56a7e8f905082ce920f5dd83232b0..75bdfae5f3e20afef3d2880171c7c6e8511546c5 100644 (file)
@@ -2724,11 +2724,6 @@ static int lan78xx_stop(struct net_device *net)
        return 0;
 }
 
-static int lan78xx_linearize(struct sk_buff *skb)
-{
-       return skb_linearize(skb);
-}
-
 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
                                       struct sk_buff *skb, gfp_t flags)
 {
@@ -2740,8 +2735,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
                return NULL;
        }
 
-       if (lan78xx_linearize(skb) < 0)
+       if (skb_linearize(skb)) {
+               dev_kfree_skb_any(skb);
                return NULL;
+       }
 
        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
 
@@ -3753,6 +3750,7 @@ static int lan78xx_probe(struct usb_interface *intf,
 
        /* MTU range: 68 - 9000 */
        netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+       netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
 
        dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
        dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
index 4196c0e3274036f3fb81c66b2f8921473c6d0ad3..9485c8d1de8a37c78b210dd2a9b41aea5c1c2eb7 100644 (file)
@@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)},     /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)},     /* Quectel EP06/EG06/EM06 */
        {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)},     /* Quectel EG12/EM12 */
+       {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)},     /* Quectel RM500Q-GL */
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
index c5ebf35d2488423357ca170249f8caeac2b0acd6..031cb8fff9094bb1281e1aaf8306838c2784d5a7 100644 (file)
@@ -6597,6 +6597,9 @@ static int rtl8152_probe(struct usb_interface *intf,
                return -ENODEV;
        }
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+               return -ENODEV;
+
        usb_reset_device(udev);
        netdev = alloc_etherdev(sizeof(struct r8152));
        if (!netdev) {
index 3ec6b506033d8565812991f796e3ef768a969dd5..1c5159dcc72024864000b8788a012b9907cf4204 100644 (file)
@@ -2541,7 +2541,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                ndst = &rt->dst;
                skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
 
-               tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+               tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
                err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
                                      vni, md, flags, udp_sum);
@@ -2581,7 +2581,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
 
-               tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+               tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                skb_scrub_packet(skb, xnet);
                err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
index ca0f3be2b6bf8c4c3c4ed330651dabd8bb756ff8..aef7de225783f6bd19c0ee2d9e52f5a11a7fcf81 100644 (file)
@@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = {
        },
 };
 
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
 
 static int uhdlc_init(struct ucc_hdlc_private *priv)
 {
index 0f1217b506ad2cac104fd6d2d1d3262e5cc4ada9..e30d91a38cfb637b376eab0e963ab0ade09fbbb5 100644 (file)
@@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
 {
        struct lapbethdev *lapbeth;
 
-       list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+       list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
                if (lapbeth->ethdev == dev) 
                        return lapbeth;
        }
index e2e679a01b65a2570f8bcd88870929622f3b7264..77ccf3672ede7db77071d0cba13ad49ba8f3e4b3 100644 (file)
@@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb,
 
                                        spin_lock_irqsave(&sdla_lock, flags);
                                        SDLA_WINDOW(dev, addr);
-                                       pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+                                       pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
                                        __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
                                        SDLA_WINDOW(dev, addr);
                                        pbuf->opp_flag = 1;
index 4590fbf82dc2a486709d8904f4288fd06e395712..f5bb7ace2ff57857d88ea28dc89a4923fc033a18 100644 (file)
@@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
                       cmd, sizeof(cmd), false);
 
        rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
-                         &transferred, 0);
+                         &transferred, 5000);
        kfree(buffer);
        if (rc || (transferred != sizeof(cmd))) {
                nfc_err(&phy->udev->dev,
index 667f18f465be1c6d35e52685f29095665efba00e..5dc32b72e7faab7875640106514ab88d4976ebf9 100644 (file)
@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status)
        case NVME_SC_CAP_EXCEEDED:
                return BLK_STS_NOSPC;
        case NVME_SC_LBA_RANGE:
+       case NVME_SC_CMD_INTERRUPTED:
+       case NVME_SC_NS_NOT_READY:
                return BLK_STS_TARGET;
        case NVME_SC_BAD_ATTRIBUTES:
        case NVME_SC_ONCS_NOT_SUPPORTED:
index 56c21b5011852b15eaf899e625d5a15b251b14b3..72a7e41f3018acdbbb2d1203e37cfec1c3b48583 100644 (file)
@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
        return len;
 }
 
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+       switch (cdw10 & 0xff) {
+       case NVME_FEAT_HOST_ID:
+               return sizeof(req->sq->ctrl->hostid);
+       default:
+               return 0;
+       }
+}
+
 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
 {
        return le64_to_cpu(cmd->get_log_page.lpo);
@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
        u16 status = 0;
 
-       if (!nvmet_check_data_len(req, 0))
+       if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
                return;
 
        switch (cdw10 & 0xff) {
index 982b46f0a54da967dad65ac94ae87c81787789cd..dcbcf1331bb24f33a579116261a7c1012e0f070a 100644 (file)
@@ -69,6 +69,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
        dev->ats_enabled = 1;
        return 0;
 }
+EXPORT_SYMBOL_GPL(pci_enable_ats);
 
 /**
  * pci_disable_ats - disable the ATS capability
@@ -87,6 +88,7 @@ void pci_disable_ats(struct pci_dev *dev)
 
        dev->ats_enabled = 0;
 }
+EXPORT_SYMBOL_GPL(pci_disable_ats);
 
 void pci_restore_ats_state(struct pci_dev *dev)
 {
index e87196cc1a7fba33d37661ad0f73ca1dd1418583..ad746d903cebeda1f7667b6e24d180075465640a 100644 (file)
@@ -131,6 +131,7 @@ bool pci_ats_disabled(void)
 {
        return pcie_ats_disabled;
 }
+EXPORT_SYMBOL_GPL(pci_ats_disabled);
 
 /* Disable bridge_d3 for all PCIe ports */
 static bool pci_bridge_d3_disable;
index ead06c6c26019a91424a7e6be990f9ed7a8dbb0f..12e71a315a2cba315cb4f1c9b28422e3cbc8b93f 100644 (file)
@@ -115,7 +115,7 @@ struct cpcap_usb_ints_state {
 enum cpcap_gpio_mode {
        CPCAP_DM_DP,
        CPCAP_MDM_RX_TX,
-       CPCAP_UNKNOWN,
+       CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */
        CPCAP_OTG_DM_DP,
 };
 
@@ -134,6 +134,8 @@ struct cpcap_phy_ddata {
        struct iio_channel *id;
        struct regulator *vusb;
        atomic_t active;
+       unsigned int vbus_provider:1;
+       unsigned int docked:1;
 };
 
 static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
@@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
 static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
 static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
 
+static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
+                                      enum musb_vbus_id_status status)
+{
+       int error;
+
+       error = musb_mailbox(status);
+       if (!error)
+               return;
+
+       dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
+               __func__, error);
+}
+
 static void cpcap_usb_detect(struct work_struct *work)
 {
        struct cpcap_phy_ddata *ddata;
@@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work)
        if (error)
                return;
 
-       if (s.id_ground) {
-               dev_dbg(ddata->dev, "id ground, USB host mode\n");
+       vbus = cpcap_usb_vbus_valid(ddata);
+
+       /* We need to kick the VBUS as USB A-host */
+       if (s.id_ground && ddata->vbus_provider) {
+               dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n");
+
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+               error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+                                          CPCAP_BIT_VBUSSTBY_EN |
+                                          CPCAP_BIT_VBUSEN_SPI,
+                                          CPCAP_BIT_VBUSEN_SPI);
+               if (error)
+                       goto out_err;
+
+               return;
+       }
+
+       if (vbus && s.id_ground && ddata->docked) {
+               dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n");
+
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+               return;
+       }
+
+       /* No VBUS needed with docks */
+       if (vbus && s.id_ground && !ddata->vbus_provider) {
+               dev_dbg(ddata->dev, "connected to a dock\n");
+
+               ddata->docked = true;
+
                error = cpcap_usb_set_usb_mode(ddata);
                if (error)
                        goto out_err;
 
-               error = musb_mailbox(MUSB_ID_GROUND);
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+               /*
+                * Force check state again after musb has reoriented,
+                * otherwise devices won't enumerate after loading PHY
+                * driver.
+                */
+               schedule_delayed_work(&ddata->detect_work,
+                                     msecs_to_jiffies(1000));
+
+               return;
+       }
+
+       if (s.id_ground && !ddata->docked) {
+               dev_dbg(ddata->dev, "id ground, USB host mode\n");
+
+               ddata->vbus_provider = true;
+
+               error = cpcap_usb_set_usb_mode(ddata);
                if (error)
                        goto out_err;
 
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
                error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
                                           CPCAP_BIT_VBUSSTBY_EN |
                                           CPCAP_BIT_VBUSEN_SPI,
@@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work)
 
        vbus = cpcap_usb_vbus_valid(ddata);
 
+       /* Otherwise assume we're connected to a USB host */
        if (vbus) {
-               /* Are we connected to a docking station with vbus? */
-               if (s.id_ground) {
-                       dev_dbg(ddata->dev, "connected to a dock\n");
-
-                       /* No VBUS needed with docks */
-                       error = cpcap_usb_set_usb_mode(ddata);
-                       if (error)
-                               goto out_err;
-                       error = musb_mailbox(MUSB_ID_GROUND);
-                       if (error)
-                               goto out_err;
-
-                       return;
-               }
-
-               /* Otherwise assume we're connected to a USB host */
                dev_dbg(ddata->dev, "connected to USB host\n");
                error = cpcap_usb_set_usb_mode(ddata);
                if (error)
                        goto out_err;
-               error = musb_mailbox(MUSB_VBUS_VALID);
-               if (error)
-                       goto out_err;
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
 
                return;
        }
 
+       ddata->vbus_provider = false;
+       ddata->docked = false;
+       cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
+
        /* Default to debug UART mode */
        error = cpcap_usb_set_uart_mode(ddata);
        if (error)
                goto out_err;
 
-       error = musb_mailbox(MUSB_VBUS_OFF);
-       if (error)
-               goto out_err;
-
        dev_dbg(ddata->dev, "set UART mode\n");
 
        return;
@@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
 {
        int error;
 
-       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+       /* Disable lines to prevent glitches from waking up mdm6600 */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
        if (error)
                goto out_err;
 
@@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
        if (error)
                goto out_err;
 
+       /* Enable UART mode */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+       if (error)
+               goto out_err;
+
        return 0;
 
 out_err:
@@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
 {
        int error;
 
-       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+       /* Disable lines to prevent glitches from waking up mdm6600 */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
        if (error)
                return error;
 
@@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
        if (error)
                goto out_err;
 
-       error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
-                                  CPCAP_BIT_USBXCVREN,
-                                  CPCAP_BIT_USBXCVREN);
-       if (error)
-               goto out_err;
-
        error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
                                   CPCAP_BIT_PU_SPI |
                                   CPCAP_BIT_DMPD_SPI |
@@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
        if (error)
                goto out_err;
 
+       /* Enable USB mode */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+       if (error)
+               goto out_err;
+
        return 0;
 
 out_err:
@@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
        if (error)
                dev_err(ddata->dev, "could not set UART mode\n");
 
-       error = musb_mailbox(MUSB_VBUS_OFF);
-       if (error)
-               dev_err(ddata->dev, "could not set mailbox\n");
+       cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
 
        usb_remove_phy(&ddata->phy);
        cancel_delayed_work_sync(&ddata->detect_work);
index ee184d5607bdb39240b2729b82688ef67501df06..f20524f0c21d9aea86013601ec47c381bf32db26 100644 (file)
@@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work)
        struct phy_mdm6600 *ddata;
        struct device *dev;
        DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
-       int error, i, val = 0;
+       int error;
 
        ddata = container_of(work, struct phy_mdm6600, status_work.work);
        dev = ddata->dev;
@@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work)
        if (error)
                return;
 
-       for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
-               val |= test_bit(i, values) << i;
-               dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
-                       __func__, i, test_bit(i, values), val);
-       }
-       ddata->status = values[0];
+       ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
 
        dev_info(dev, "modem status: %i %s\n",
                 ddata->status,
-                phy_mdm6600_status_name[ddata->status & 7]);
+                phy_mdm6600_status_name[ddata->status]);
        complete(&ddata->ack);
 }
 
index 091e20303a14d6b3569eb806eb3bf72beb56ae4e..66f91726b8b24b4ded24806e2e701ff4bc066904 100644 (file)
@@ -66,7 +66,7 @@
 /* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
 #define CLAMP_EN                               BIT(0) /* enables i/o clamp_n */
 
-#define PHY_INIT_COMPLETE_TIMEOUT              1000
+#define PHY_INIT_COMPLETE_TIMEOUT              10000
 #define POWER_DOWN_DELAY_US_MIN                        10
 #define POWER_DOWN_DELAY_US_MAX                        11
 
index 2b97fb1185a00e68b41858c15a8a99b0ae7303b6..9ca20c947283de2f5d03866aa404c0411b2eb72c 100644 (file)
@@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
 {
        const struct pre_pll_config *cfg = pre_pll_cfg_table;
 
+       rate = (rate / 1000) * 1000;
+
        for (; cfg->pixclock != 0; cfg++)
                if (cfg->pixclock == rate && !cfg->fracdiv)
                        break;
@@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
 {
        const struct pre_pll_config *cfg = pre_pll_cfg_table;
 
+       rate = (rate / 1000) * 1000;
+
        for (; cfg->pixclock != 0; cfg++)
                if (cfg->pixclock == rate)
                        break;
index f1806fd781a05c6b952bf685e6a577bd09715986..530426a74f75148881bbe5007e9eaac541012435 100644 (file)
@@ -2,6 +2,7 @@
 config PINCTRL_LOCHNAGAR
        tristate "Cirrus Logic Lochnagar pinctrl driver"
        depends on MFD_LOCHNAGAR
+       select GPIOLIB
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
index 3c80828a5e503bf634f281bd57b7f6031a1df453..bbc919bef2bf5d367498978e2b3955e4b7b30177 100644 (file)
@@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
                return ret;
 
        meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
+       bit = bit << 1;
 
        ret = regmap_read(pc->reg_ds, reg, &val);
        if (ret)
index bb0edf51dfda4cf96bd5c7f5496ba213163e5468..5731d1b60e28a4d4e9b8a82b98310ff706a7cccf 100644 (file)
@@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
                return ret;
        }
 
-       if (response->status) {
-               dev_err(ec->dev,
-                       "EC reported failure sending keyboard LEDs command: %d",
-                       response->status);
-               return -EIO;
-       }
-
        return 0;
 }
 
@@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
 {
        struct wilco_keyboard_leds_msg request;
        struct wilco_keyboard_leds_msg response;
+       int ret;
 
        memset(&request, 0, sizeof(request));
        request.command = WILCO_EC_COMMAND_KBBL;
@@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
        request.mode    = WILCO_KBBL_MODE_FLAG_PWM;
        request.percent = brightness;
 
-       return send_kbbl_msg(ec, &request, &response);
+       ret = send_kbbl_msg(ec, &request, &response);
+       if (ret < 0)
+               return ret;
+
+       if (response.status) {
+               dev_err(ec->dev,
+                       "EC reported failure sending keyboard LEDs command: %d",
+                       response.status);
+               return -EIO;
+       }
+
+       return 0;
 }
 
 static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
@@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec)
        if (ret < 0)
                return ret;
 
+       if (response.status) {
+               dev_err(ec->dev,
+                       "EC reported failure sending keyboard LEDs command: %d",
+                       response.status);
+               return -EIO;
+       }
+
        if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
                return response.percent;
 
index 9a5c9fd2dbc60cf2ab441ef48ba4d6db3cb4e119..5739a9669b29139319738c8775731f89074929df 100644 (file)
@@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
  * @work: work struct for deferred process
  * @timer: background timer
  * @vring: Tx/Rx ring
- * @spin_lock: spin lock
+ * @spin_lock: Tx/Rx spin lock
  * @is_ready: ready flag
  */
 struct mlxbf_tmfifo {
@@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
        struct work_struct work;
        struct timer_list timer;
        struct mlxbf_tmfifo_vring *vring[2];
-       spinlock_t spin_lock;           /* spin lock */
+       spinlock_t spin_lock[2];        /* spin lock */
        bool is_ready;
 };
 
@@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
        writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
 
        /* Use spin-lock to protect the 'cons->tx_buf'. */
-       spin_lock_irqsave(&fifo->spin_lock, flags);
+       spin_lock_irqsave(&fifo->spin_lock[0], flags);
 
        while (size > 0) {
                addr = cons->tx_buf.buf + cons->tx_buf.tail;
@@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
                }
        }
 
-       spin_unlock_irqrestore(&fifo->spin_lock, flags);
+       spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
 }
 
 /* Rx/Tx one word in the descriptor buffer. */
@@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
                fifo->vring[is_rx] = NULL;
 
                /* Notify upper layer that packet is done. */
-               spin_lock_irqsave(&fifo->spin_lock, flags);
+               spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
                vring_interrupt(0, vring->vq);
-               spin_unlock_irqrestore(&fifo->spin_lock, flags);
+               spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
        }
 
 mlxbf_tmfifo_desc_done:
@@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
                 * worker handler.
                 */
                if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
-                       spin_lock_irqsave(&fifo->spin_lock, flags);
+                       spin_lock_irqsave(&fifo->spin_lock[0], flags);
                        tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
                        mlxbf_tmfifo_console_output(tm_vdev, vring);
-                       spin_unlock_irqrestore(&fifo->spin_lock, flags);
+                       spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
                } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
                                            &fifo->pend_events)) {
                        return true;
@@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
        if (!fifo)
                return -ENOMEM;
 
-       spin_lock_init(&fifo->spin_lock);
+       spin_lock_init(&fifo->spin_lock[0]);
+       spin_lock_init(&fifo->spin_lock[1]);
        INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
        mutex_init(&fifo->lock);
 
index f4d0a86c00d079557d66e8fe71e6110d762a46fa..5e77b0dc5fd6b958450915eb13507fc08f69b6fe 100644 (file)
@@ -18,7 +18,7 @@ if MIPS_PLATFORM_DEVICES
 
 config CPU_HWMON
        tristate "Loongson-3 CPU HWMon Driver"
-       depends on CONFIG_MACH_LOONGSON64
+       depends on MACH_LOONGSON64
        select HWMON
        default y
        help
index 821b08e01635760f3c38a4d1d42741358f876824..982f0cc8270ce6ea6bd6f27addf3074e1bbb43ca 100644 (file)
@@ -512,13 +512,7 @@ static void kbd_led_update(struct asus_wmi *asus)
 {
        int ctrl_param = 0;
 
-       /*
-        * bits 0-2: level
-        * bit 7: light on/off
-        */
-       if (asus->kbd_led_wk > 0)
-               ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+       ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
        asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
 }
 
index be85ed966bf33bd7b809440f53dd0dac1b405273..b471b86c28fe8d2e15aca3766de871e2dfececad 100644 (file)
 
 #define MAX_SPEED 3
 
-static int temp_limits[3] = { 55000, 60000, 65000 };
+#define TEMP_LIMIT0_DEFAULT    55000
+#define TEMP_LIMIT1_DEFAULT    60000
+#define TEMP_LIMIT2_DEFAULT    65000
+
+#define HYSTERESIS_DEFAULT     3000
+
+#define SPEED_ON_AC_DEFAULT    2
+
+static int temp_limits[3] = {
+       TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
 module_param_array(temp_limits, int, NULL, 0444);
 MODULE_PARM_DESC(temp_limits,
                 "Millicelsius values above which the fan speed increases");
 
-static int hysteresis = 3000;
+static int hysteresis = HYSTERESIS_DEFAULT;
 module_param(hysteresis, int, 0444);
 MODULE_PARM_DESC(hysteresis,
                 "Hysteresis in millicelsius before lowering the fan speed");
 
-static int speed_on_ac = 2;
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
 module_param(speed_on_ac, int, 0444);
 MODULE_PARM_DESC(speed_on_ac,
                 "minimum fan speed to allow when system is powered by AC");
@@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
-               if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+               if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
                        dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
                                temp_limits[i]);
-                       return -EINVAL;
+                       temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+                       temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+                       temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+                       break;
                }
        }
        if (hysteresis < 1000 || hysteresis > 10000) {
                dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
                        hysteresis);
-               return -EINVAL;
+               hysteresis = HYSTERESIS_DEFAULT;
        }
        if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
                dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
                        speed_on_ac);
-               return -EINVAL;
+               speed_on_ac = SPEED_ON_AC_DEFAULT;
        }
 
        fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
index 512ad234ad0d67fccdb05d241d37fd887da164d5..35ed9711c7b97d3554dceb135432ceaa1729de66 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2010 Intel Corporation
  */
index fdee5772e5322a9112a3c40d0f7cf494841f98ee..8203ae38dc4649b73a6d88d6b1ccc85fc7be4bdb 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Intel Core SoC Power Management Controller Header File
  *
index 6fe829f30997d07e5986362ed4a94fc8ad3582d6..e1266f5c63593e958a7d9412ff2166b3556d8209 100644 (file)
@@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
        INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
        INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
        INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
+       INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
+       INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
index a67701ed93e8b065817dcdbe310772a2ce8587d1..2e5b6a6834da5d71ff768fef433a1067406e890a 100644 (file)
@@ -1295,6 +1295,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        int ret;
 
+       if (!rapl_defaults)
+               return ERR_PTR(-ENODEV);
+
        rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
        if (!rp)
                return ERR_PTR(-ENOMEM);
index e60eab7f8a616f90a2d0c061368610ef994100c7..b84f16bbd6f24fb03e8268dd5d23c9848fdc8381 100644 (file)
@@ -166,10 +166,11 @@ static struct posix_clock_operations ptp_clock_ops = {
        .read           = ptp_read,
 };
 
-static void delete_ptp_clock(struct posix_clock *pc)
+static void ptp_clock_release(struct device *dev)
 {
-       struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+       struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
 
+       ptp_cleanup_pin_groups(ptp);
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        ida_simple_remove(&ptp_clocks_map, ptp->index);
@@ -213,7 +214,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        }
 
        ptp->clock.ops = ptp_clock_ops;
-       ptp->clock.release = delete_ptp_clock;
        ptp->info = info;
        ptp->devid = MKDEV(major, index);
        ptp->index = index;
@@ -236,15 +236,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        if (err)
                goto no_pin_groups;
 
-       /* Create a new device in our class. */
-       ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
-                                            ptp, ptp->pin_attr_groups,
-                                            "ptp%d", ptp->index);
-       if (IS_ERR(ptp->dev)) {
-               err = PTR_ERR(ptp->dev);
-               goto no_device;
-       }
-
        /* Register a new PPS source. */
        if (info->pps) {
                struct pps_source_info pps;
@@ -260,8 +251,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                }
        }
 
-       /* Create a posix clock. */
-       err = posix_clock_register(&ptp->clock, ptp->devid);
+       /* Initialize a new device of our class in our clock structure. */
+       device_initialize(&ptp->dev);
+       ptp->dev.devt = ptp->devid;
+       ptp->dev.class = ptp_class;
+       ptp->dev.parent = parent;
+       ptp->dev.groups = ptp->pin_attr_groups;
+       ptp->dev.release = ptp_clock_release;
+       dev_set_drvdata(&ptp->dev, ptp);
+       dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+       /* Create a posix clock and link it to the device. */
+       err = posix_clock_register(&ptp->clock, &ptp->dev);
        if (err) {
                pr_err("failed to create posix clock\n");
                goto no_clock;
@@ -273,8 +274,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
 no_pps:
-       device_destroy(ptp_class, ptp->devid);
-no_device:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
        if (ptp->kworker)
@@ -304,10 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
 
-       device_destroy(ptp_class, ptp->devid);
-       ptp_cleanup_pin_groups(ptp);
-
        posix_clock_unregister(&ptp->clock);
+
        return 0;
 }
 EXPORT_SYMBOL(ptp_clock_unregister);
index 9171d42468fdb05321814f1bae8c239ebe22693a..6b97155148f11931b89c95e7c9d84e0f0e960470 100644 (file)
@@ -28,7 +28,7 @@ struct timestamp_event_queue {
 
 struct ptp_clock {
        struct posix_clock clock;
-       struct device *dev;
+       struct device dev;
        struct ptp_clock_info *info;
        dev_t devid;
        int index; /* index into clocks.map */
index 989506bd90b193bc696abf90096e0c4ae1a37efa..16f0c857003606958dd0f1a415d27327eb182847 100644 (file)
@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
                int i;
 
                for (i = 0; i < rate_count; i++) {
-                       if (ramp <= slew_rates[i])
-                               cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
-                       else
+                       if (ramp > slew_rates[i])
                                break;
+
+                       if (id == AXP20X_DCDC2)
+                               cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
+                       else
+                               cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
                }
 
                if (cfg == 0xff) {
@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = {
                 AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
        AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
                 AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
-                AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
+                AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
        AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
                 AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
                 AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
index ec764022621f194ce41045eaddaf6203f71e3ef4..5bf8a2dc5fe778302d3e79525e1ef4d4f70f3282 100644 (file)
@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_time_sel = regulator_set_voltage_time_sel,
-       .set_ramp_delay = bd70528_set_ramp_delay,
 };
 
 static const struct regulator_ops bd70528_led_ops = {
index df2829dd55ad6a3fe5e6934cd1614c20b790198b..2ecd8752b088b6cfe1bdb97fd57511d72778c562 100644 (file)
@@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time)
        save_control = CMOS_READ(RTC_CONTROL);
        CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
        save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-
-#ifdef CONFIG_X86
-       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-            boot_cpu_data.x86 == 0x17) ||
-            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
-               CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
-                       RTC_FREQ_SELECT);
-               save_freq_select &= ~RTC_DIV_RESET2;
-       } else
-               CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
-                       RTC_FREQ_SELECT);
-#else
-       CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
-#endif
+       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 
 #ifdef CONFIG_MACH_DECSTATION
        CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
index 5249fc99fd5fb7b2eadbdf9a14c3ef2c7b94c96f..9135e21017523959d257484f272b493232bb7b28 100644 (file)
@@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
                irqen = irqsta & ~RTC_IRQ_EN_AL;
                mutex_lock(&rtc->lock);
                if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
-                                irqen) < 0)
+                                irqen) == 0)
                        mtk_rtc_write_trigger(rtc);
                mutex_unlock(&rtc->lock);
 
@@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
        alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
        mutex_unlock(&rtc->lock);
 
-       tm->tm_sec = data[RTC_OFFSET_SEC];
-       tm->tm_min = data[RTC_OFFSET_MIN];
-       tm->tm_hour = data[RTC_OFFSET_HOUR];
-       tm->tm_mday = data[RTC_OFFSET_DOM];
-       tm->tm_mon = data[RTC_OFFSET_MTH];
-       tm->tm_year = data[RTC_OFFSET_YEAR];
+       tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
+       tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
+       tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
+       tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
+       tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
+       tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
 
        tm->tm_year += RTC_MIN_YEAR_OFFSET;
        tm->tm_mon--;
@@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        tm->tm_year -= RTC_MIN_YEAR_OFFSET;
        tm->tm_mon++;
 
-       data[RTC_OFFSET_SEC] = tm->tm_sec;
-       data[RTC_OFFSET_MIN] = tm->tm_min;
-       data[RTC_OFFSET_HOUR] = tm->tm_hour;
-       data[RTC_OFFSET_DOM] = tm->tm_mday;
-       data[RTC_OFFSET_MTH] = tm->tm_mon;
-       data[RTC_OFFSET_YEAR] = tm->tm_year;
-
        mutex_lock(&rtc->lock);
+       ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
+                              data, RTC_OFFSET_COUNT);
+       if (ret < 0)
+               goto exit;
+
+       data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
+                               (tm->tm_sec & RTC_AL_SEC_MASK));
+       data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
+                               (tm->tm_min & RTC_AL_MIN_MASK));
+       data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
+                               (tm->tm_hour & RTC_AL_HOU_MASK));
+       data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
+                               (tm->tm_mday & RTC_AL_DOM_MASK));
+       data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
+                               (tm->tm_mon & RTC_AL_MTH_MASK));
+       data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
+                               (tm->tm_year & RTC_AL_YEA_MASK));
+
        if (alm->enabled) {
                ret = regmap_bulk_write(rtc->regmap,
                                        rtc->addr_base + RTC_AL_SEC,
index 8dcd20b34dde3d15601596d230dc0030c71b6c98..852f5f3b359283a0e57c0f6f7f4494caadb41c57 100644 (file)
@@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
 CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
                      sun50i_h6_rtc_clk_init);
 
+/*
+ * The R40 user manual is self-conflicting on whether the prescaler is
+ * fixed or configurable. The clock diagram shows it as fixed, but there
+ * is also a configurable divider in the RTC block.
+ */
+static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
+       .rc_osc_rate = 16000000,
+       .fixed_prescaler = 512,
+};
+static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
+{
+       sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
+}
+CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
+                     sun8i_r40_rtc_clk_init);
+
 static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
        .rc_osc_rate = 32000,
        .has_out_clk = 1,
index a1915061932eb0325d95a644c6e8d6249bd09d47..5256e3ce84e56a5144368d9017c17da5bb336c12 100644 (file)
@@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
                drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
                if (!!devres != !!drvres)
                        return -ENODEV;
-               /* (re-)init queue's state machine */
-               ap_queue_reinit_state(to_ap_queue(dev));
        }
 
        /* Add queue/card to list of active queues/cards */
index 433b7b64368d6710523c468eaac5f3270cc5701f..bb35ba4a8d243955545596908170fbdd10620400 100644 (file)
@@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
                               int comp_device_type, unsigned int functions);
index dad2be333d826fd5d49601e6b9c11f11b118b755..37c3bdc3642dc6756dbcf10a6a34f2d9974fb930 100644 (file)
@@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
        aq->ap_dev.device.type = &ap_queue_type;
        aq->ap_dev.device_type = device_type;
        aq->qid = qid;
-       aq->state = AP_STATE_RESET_START;
+       aq->state = AP_STATE_UNBOUND;
        aq->interrupt = AP_INTR_DISABLED;
        spin_lock_init(&aq->lock);
        INIT_LIST_HEAD(&aq->list);
@@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
        spin_unlock_bh(&aq->lock);
 }
 
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
 {
        spin_lock_bh(&aq->lock);
        aq->state = AP_STATE_RESET_START;
        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
        spin_unlock_bh(&aq->lock);
 }
+EXPORT_SYMBOL(ap_queue_init_state);
index c1db64a2db21b92e4f44b475a828a8357a097206..110fe9d0cb91090b4879021a81c24e86f4a8fa59 100644 (file)
@@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
        prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
 
        /* do some plausibility checks on the key block */
-       if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
-           prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+       if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+           prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
                DEBUG_ERR("%s reply with invalid or unknown key block\n",
                          __func__);
                rc = -EIO;
index c50f3e86cc7487377e09211264a4d4cc43ec4b61..7cbb384ec5352d618d15f0015f7e60f87c9e4a09 100644 (file)
@@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
        zq->queue = aq;
        zq->online = 1;
        atomic_set(&zq->load, 0);
+       ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2A_CLEANUP_TIME,
        aq->private = zq;
index 35c7c6672713b70b33959b48322de5c0254c4a29..c78c0d119806f17784a0a38dac53f6a48b616b6d 100644 (file)
@@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        zq->queue = aq;
        zq->online = 1;
        atomic_set(&zq->load, 0);
+       ap_rapq(aq->qid);
        rc = zcrypt_cex2c_rng_supported(aq);
        if (rc < 0) {
                zcrypt_queue_free(zq);
@@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        else
                zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
                                         MSGTYPE06_VARIANT_NORNG);
+       ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2C_CLEANUP_TIME;
        aq->private = zq;
index 442e3d6162f761645d5f1df2849d28c3b486c855..6fabc906114c0e3b9de976bc6a5670923662de20 100644 (file)
@@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
        zq->queue = aq;
        zq->online = 1;
        atomic_set(&zq->load, 0);
+       ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX4_CLEANUP_TIME,
        aq->private = zq;
index bc4158888af96e19e72dbe6cc2065c83a08ea149..29facb9136715638719bb271f86888ec120616f6 100644 (file)
@@ -2482,50 +2482,46 @@ static int qeth_mpc_initialize(struct qeth_card *card)
        rc = qeth_cm_enable(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "2err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_cm_setup(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "3err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_ulp_enable(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "4err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_ulp_setup(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_alloc_qdio_queues(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_qdio_establish(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "6err%d", rc);
                qeth_free_qdio_queues(card);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_qdio_activate(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "7err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_dm_act(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "8err%d", rc);
-               goto out_qdio;
+               return rc;
        }
 
        return 0;
-out_qdio:
-       qeth_qdio_clear_card(card, !IS_IQD(card));
-       qdio_free(CARD_DDEV(card));
-       return rc;
 }
 
 void qeth_print_status_message(struct qeth_card *card)
@@ -3429,11 +3425,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
                        goto out;
                }
 
-               if (card->state != CARD_STATE_DOWN) {
-                       rc = -1;
-                       goto out;
-               }
-
                qeth_free_qdio_queues(card);
                card->options.cq = cq;
                rc = 0;
@@ -5035,10 +5026,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
        }
        if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
                rc = qeth_query_setdiagass(card);
-               if (rc < 0) {
+               if (rc)
                        QETH_CARD_TEXT_(card, 2, "8err%d", rc);
-                       goto out;
-               }
        }
 
        if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
index 8c95e6019bacd3f036ba25ecb26453c04af110de..47d37e75dda68530800fdf09c3a9e2617ea2a936 100644 (file)
@@ -287,12 +287,12 @@ static void qeth_l2_stop_card(struct qeth_card *card)
                card->state = CARD_STATE_HARDSETUP;
        }
        if (card->state == CARD_STATE_HARDSETUP) {
-               qeth_qdio_clear_card(card, 0);
                qeth_drain_output_queues(card);
                qeth_clear_working_pool_list(card);
                card->state = CARD_STATE_DOWN;
        }
 
+       qeth_qdio_clear_card(card, 0);
        flush_workqueue(card->event_wq);
        card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
        card->info.promisc_mode = 0;
@@ -1952,8 +1952,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
 /* check if VNICC is currently enabled */
 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
 {
-       /* if everything is turned off, VNICC is not active */
-       if (!card->options.vnicc.cur_chars)
+       if (!card->options.vnicc.sup_chars)
                return false;
        /* default values are only OK if rx_bcast was not enabled by user
         * or the card is offline.
@@ -2040,8 +2039,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
        /* enforce assumed default values and recover settings, if changed  */
        error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
                                               timeout);
-       chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
-       chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
+       /* Change chars, if necessary  */
+       chars_tmp = card->options.vnicc.wanted_chars ^
+                   card->options.vnicc.cur_chars;
        chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
        for_each_set_bit(i, &chars_tmp, chars_len) {
                vnicc = BIT(i);
index 04e301de376f0957796100e6fbadc8a022c7ba80..5508ab89b518dd306a7a63c399d69ff7dba64b9f 100644 (file)
@@ -1307,12 +1307,12 @@ static void qeth_l3_stop_card(struct qeth_card *card)
                card->state = CARD_STATE_HARDSETUP;
        }
        if (card->state == CARD_STATE_HARDSETUP) {
-               qeth_qdio_clear_card(card, 0);
                qeth_drain_output_queues(card);
                qeth_clear_working_pool_list(card);
                card->state = CARD_STATE_DOWN;
        }
 
+       qeth_qdio_clear_card(card, 0);
        flush_workqueue(card->event_wq);
        card->info.promisc_mode = 0;
 }
index f9067ed6c7d32168997b33d529128a1f953ff7b5..e8c848f72c6d2ccd3c6c278b6f4fe4d3c18b1b1e 100644 (file)
@@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       int rc = 0;
        char *tmp;
-       int rc;
 
        if (!IS_IQD(card))
                return -EPERM;
-       if (card->state != CARD_STATE_DOWN)
-               return -EPERM;
-       if (card->options.sniffer)
-               return -EPERM;
-       if (card->options.cq == QETH_CQ_NOTAVAILABLE)
-               return -EPERM;
+
+       mutex_lock(&card->conf_mutex);
+       if (card->state != CARD_STATE_DOWN) {
+               rc = -EPERM;
+               goto out;
+       }
+
+       if (card->options.sniffer) {
+               rc = -EPERM;
+               goto out;
+       }
+
+       if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
+               rc = -EPERM;
+               goto out;
+       }
 
        tmp = strsep((char **)&buf, "\n");
-       if (strlen(tmp) > 8)
-               return -EINVAL;
+       if (strlen(tmp) > 8) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        if (card->options.hsuid[0])
                /* delete old ip address */
@@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                card->options.hsuid[0] = '\0';
                memcpy(card->dev->perm_addr, card->options.hsuid, 9);
                qeth_configure_cq(card, QETH_CQ_DISABLED);
-               return count;
+               goto out;
        }
 
-       if (qeth_configure_cq(card, QETH_CQ_ENABLED))
-               return -EPERM;
+       if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
+               rc = -EPERM;
+               goto out;
+       }
 
        snprintf(card->options.hsuid, sizeof(card->options.hsuid),
                 "%-8s", tmp);
@@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 
        rc = qeth_l3_modify_hsuid(card, true);
 
+out:
+       mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
 
index c4e4b0136f86e7d86e784c831af1106af846d615..4bc794d2f51c96dd5da708d8c5ef10dcbd14978d 100644 (file)
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
                "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
        cxgbi_hbas_remove(cdev);
        cxgbi_device_portmap_cleanup(cdev);
-       cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+       if (cdev->cdev2ppm)
+               cxgbi_ppm_release(cdev->cdev2ppm(cdev));
        if (cdev->pmap.max_connect)
                cxgbi_free_big_mem(cdev->pmap.port_csk);
        kfree(cdev);
index 1f55b9e4e74ab63262c0a1ce9e4598243e533cb2..1b88a3b53eee2e4f06c0e41aad1de7f75db6bad4 100644 (file)
@@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
 
 int vnic_dev_hang_notify(struct vnic_dev *vdev)
 {
-       u64 a0, a1;
+       u64 a0 = 0, a1 = 0;
        int wait = 1000;
        return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
 }
 
 int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
 {
-       u64 a0, a1;
+       u64 a[2] = {};
        int wait = 1000;
        int err, i;
 
        for (i = 0; i < ETH_ALEN; i++)
                mac_addr[i] = 0;
 
-       err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+       err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
        if (err)
                return err;
 
        for (i = 0; i < ETH_ALEN; i++)
-               mac_addr[i] = ((u8 *)&a0)[i];
+               mac_addr[i] = ((u8 *)&a)[i];
 
        return 0;
 }
@@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
 
 void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
 {
-       u64 a0 = 0, a1 = 0;
+       u64 a[2] = {};
        int wait = 1000;
        int err;
        int i;
 
        for (i = 0; i < ETH_ALEN; i++)
-               ((u8 *)&a0)[i] = addr[i];
+               ((u8 *)&a)[i] = addr[i];
 
-       err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+       err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
        if (err)
                pr_err("Can't add addr [%pM], %d\n", addr, err);
 }
 
 void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
 {
-       u64 a0 = 0, a1 = 0;
+       u64 a[2] = {};
        int wait = 1000;
        int err;
        int i;
 
        for (i = 0; i < ETH_ALEN; i++)
-               ((u8 *)&a0)[i] = addr[i];
+               ((u8 *)&a)[i] = addr[i];
 
-       err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+       err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
        if (err)
                pr_err("Can't del addr [%pM], %d\n", addr, err);
 }
index 2e6a68d9ea4feaef041a2ed1851d1a2fcb6676af..a5ecbce4eda238a4376ef2230f966ae7eb2c5cb2 100644 (file)
@@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = {
        .read =         lpfc_debugfs_read,
        .release =      lpfc_debugfs_ras_log_release,
 };
-#endif
 
 #undef lpfc_debugfs_op_dumpHBASlim
 static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
@@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
        .write =        lpfc_idiag_extacc_write,
        .release =      lpfc_idiag_cmd_release,
 };
-
+#endif
 
 /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
  * @phba: Pointer to HBA context object.
index 6298b17290989d1f04458864756b0952aa131572..6a04fdb3fbf219a073d1ac65a97dd0d33eb7ecf9 100644 (file)
@@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
                        break;
                default:
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                                       "1804 Invalid asynchrous event code: "
+                                       "1804 Invalid asynchronous event code: "
                                        "x%x\n", bf_get(lpfc_trailer_code,
                                        &cq_event->cqe.mcqe_cmpl));
                        break;
index c82b5792da98ce5ca09627ab984338d58189a824..625c046ac4efae6778dc665e80395d4a886d7827 100644 (file)
@@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
        psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
        spin_unlock_irq(&phba->hbalock);
 
-       /* wake up worker thread to post asynchronlous mailbox command */
+       /* wake up worker thread to post asynchronous mailbox command */
        lpfc_worker_wake_up(phba);
 }
 
@@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
                return rc;
        }
 
-       /* Now, interrupt mode asynchrous mailbox command */
+       /* Now, interrupt mode asynchronous mailbox command */
        rc = lpfc_mbox_cmd_check(phba, mboxq);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
 }
 
 /**
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
  * @phba: Pointer to HBA context object.
  * @cqe: Pointer to mailbox completion queue entry.
  *
- * This routine process a mailbox completion queue entry with asynchrous
+ * This routine process a mailbox completion queue entry with asynchronous
  * event.
  *
  * Return: true if work posted to worker thread, otherwise false.
@@ -13270,7 +13270,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
  * @cqe: Pointer to mailbox completion queue entry.
  *
  * This routine process a mailbox completion queue entry, it invokes the
- * proper mailbox complete handling or asynchrous event handling routine
+ * proper mailbox complete handling or asynchronous event handling routine
  * according to the MCQE's async bit.
  *
  * Return: true if work posted to worker thread, otherwise false.
index 848fbec7bda6a27f56ccdce67434fef98c04137a..45fd8dfb7c4056fba801fe880e0647ef1d40f09a 100644 (file)
@@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                                        &ct->chain_buffer_dma);
                        if (!ct->chain_buffer) {
                                ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
-                               _base_release_memory_pools(ioc);
                                goto out;
                        }
                }
index cea625906440ab6f50f16006e4056196b7cb4f99..65ce10c7989ca3a36fab578042b6281e566d47f1 100644 (file)
@@ -2211,8 +2211,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
        u8 type;
        int ret = 0;
 
-       if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+       if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+               sdkp->protection_type = 0;
                return ret;
+       }
 
        type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
 
index f8faf8b3d9652feca0867387045d433e84a45cb0..fb41636519ee80a6dd92723311783b84346e7c63 100644 (file)
@@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device,
         */
        host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
        /*
+        * For non-IDE disks, the host supports multiple channels.
         * Set the number of HW queues we are supporting.
         */
-       host->nr_hw_queues = num_present_cpus();
+       if (!dev_is_ide)
+               host->nr_hw_queues = num_present_cpus();
 
        /*
         * Set the error handler work queue.
index 5823f5b67d1619988451ab1d097f2a46881ac7a9..3f0261d53ad9528b6d2d012a6b4eca49c422b33f 100644 (file)
@@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
                                     struct meson_ee_pwrc *pwrc,
                                     struct meson_ee_pwrc_domain *dom)
 {
+       int ret;
+
        dom->pwrc = pwrc;
        dom->num_rstc = dom->desc.reset_names_count;
        dom->num_clks = dom->desc.clk_names_count;
@@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
          * prepare/enable counters won't be in sync.
          */
        if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
-               int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+               ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
                if (ret)
                        return ret;
 
-               pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
-       } else
-               pm_genpd_init(&dom->base, NULL,
-                             (dom->desc.get_power ?
-                              dom->desc.get_power(dom) : true));
+               ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
+                                   false);
+               if (ret)
+                       return ret;
+       } else {
+               ret = pm_genpd_init(&dom->base, NULL,
+                                   (dom->desc.get_power ?
+                                    dom->desc.get_power(dom) : true));
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
@@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
                pwrc->xlate.domains[i] = &dom->base;
        }
 
-       of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
-
-       return 0;
+       return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
 }
 
 static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
index a9ffff3277c77fa7b1847e45b890689aed909d7c..a5069394cd617771681cc7372167f87e2ac9a9de 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
 
 #define SIFIVE_L2_DIRECCFIX_LOW 0x100
 #define SIFIVE_L2_DIRECCFIX_HIGH 0x104
index 378369d9364ae4d6c4f24cdf40f5e4aa5b4fcc04..e9ece45d7a3334e7612cc4f61c616873472e312c 100644 (file)
@@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
        ret = rproc_boot(m3_ipc->rproc);
        if (ret)
                dev_err(dev, "rproc_boot failed\n");
+       else
+               m3_ipc_state = m3_ipc;
 
        do_exit(0);
 }
@@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
                goto err_put_rproc;
        }
 
-       m3_ipc_state = m3_ipc;
-
        return 0;
 
 err_put_rproc:
index 76d6b94a7597ff9b0686f3ac81c989f1fac711d2..5a25da3771192b0a69a1b04b7ff7d21608b22229 100644 (file)
@@ -172,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws)
 
 static void dw_writer(struct dw_spi *dws)
 {
-       u32 max = tx_max(dws);
+       u32 max;
        u16 txw = 0;
 
+       spin_lock(&dws->buf_lock);
+       max = tx_max(dws);
        while (max--) {
                /* Set the tx word if the transfer's original "tx" is not null */
                if (dws->tx_end - dws->len) {
@@ -186,13 +188,16 @@ static void dw_writer(struct dw_spi *dws)
                dw_write_io_reg(dws, DW_SPI_DR, txw);
                dws->tx += dws->n_bytes;
        }
+       spin_unlock(&dws->buf_lock);
 }
 
 static void dw_reader(struct dw_spi *dws)
 {
-       u32 max = rx_max(dws);
+       u32 max;
        u16 rxw;
 
+       spin_lock(&dws->buf_lock);
+       max = rx_max(dws);
        while (max--) {
                rxw = dw_read_io_reg(dws, DW_SPI_DR);
                /* Care rx only if the transfer's original "rx" is not null */
@@ -204,6 +209,7 @@ static void dw_reader(struct dw_spi *dws)
                }
                dws->rx += dws->n_bytes;
        }
+       spin_unlock(&dws->buf_lock);
 }
 
 static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -276,18 +282,20 @@ static int dw_spi_transfer_one(struct spi_controller *master,
 {
        struct dw_spi *dws = spi_controller_get_devdata(master);
        struct chip_data *chip = spi_get_ctldata(spi);
+       unsigned long flags;
        u8 imask = 0;
        u16 txlevel = 0;
        u32 cr0;
        int ret;
 
        dws->dma_mapped = 0;
-
+       spin_lock_irqsave(&dws->buf_lock, flags);
        dws->tx = (void *)transfer->tx_buf;
        dws->tx_end = dws->tx + transfer->len;
        dws->rx = transfer->rx_buf;
        dws->rx_end = dws->rx + transfer->len;
        dws->len = transfer->len;
+       spin_unlock_irqrestore(&dws->buf_lock, flags);
 
        spi_enable_chip(dws, 0);
 
@@ -471,6 +479,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        dws->type = SSI_MOTO_SPI;
        dws->dma_inited = 0;
        dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+       spin_lock_init(&dws->buf_lock);
 
        spi_controller_set_devdata(master, dws);
 
index 38c7de1f0aa948c0fff3af98d596f53c3bfddd18..1bf5713e047d35c6f44fd329860fa0db490a3e61 100644 (file)
@@ -119,6 +119,7 @@ struct dw_spi {
        size_t                  len;
        void                    *tx;
        void                    *tx_end;
+       spinlock_t              buf_lock;
        void                    *rx;
        void                    *rx_end;
        int                     dma_mapped;
index 442cff71a0d2e95afdc378b0b1ad9d2d1b71653d..8428b69c858bc960db75b80279003020c7a681fd 100644 (file)
@@ -185,6 +185,7 @@ struct fsl_dspi {
        struct spi_transfer                     *cur_transfer;
        struct spi_message                      *cur_msg;
        struct chip_data                        *cur_chip;
+       size_t                                  progress;
        size_t                                  len;
        const void                              *tx;
        void                                    *rx;
@@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
        dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
 
        if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
-               /* Write two TX FIFO entries first, and then the corresponding
-                * CMD FIFO entry.
+               /* Write the CMD FIFO entry first, and then the two
+                * corresponding TX FIFO entries.
                 */
                u32 data = dspi_pop_tx(dspi);
 
-               if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
-                       /* LSB */
-                       tx_fifo_write(dspi, data & 0xFFFF);
-                       tx_fifo_write(dspi, data >> 16);
-               } else {
-                       /* MSB */
-                       tx_fifo_write(dspi, data >> 16);
-                       tx_fifo_write(dspi, data & 0xFFFF);
-               }
                cmd_fifo_write(dspi);
+               tx_fifo_write(dspi, data & 0xFFFF);
+               tx_fifo_write(dspi, data >> 16);
        } else {
                /* Write one entry to both TX FIFO and CMD FIFO
                 * simultaneously.
@@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
        u32 spi_tcr;
 
        spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
-                               dspi->tx - dspi->bytes_per_word, !dspi->irq);
+                               dspi->progress, !dspi->irq);
 
        /* Get transfer counter (in number of SPI transfers). It was
         * reset to 0 when transfer(s) were started.
@@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
        spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
        /* Update total number of bytes that were transferred */
        msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+       dspi->progress += spi_tcnt;
 
        trans_mode = dspi->devtype_data->trans_mode;
        if (trans_mode == DSPI_EOQ_MODE)
@@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
                return 0;
 
        spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
-                              dspi->tx, !dspi->irq);
+                              dspi->progress, !dspi->irq);
 
        if (trans_mode == DSPI_EOQ_MODE)
                dspi_eoq_write(dspi);
@@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
                dspi->rx = transfer->rx_buf;
                dspi->rx_end = dspi->rx + transfer->len;
                dspi->len = transfer->len;
+               dspi->progress = 0;
                /* Validated transfer specific frame size (defaults applied) */
                dspi->bits_per_word = transfer->bits_per_word;
                if (transfer->bits_per_word <= 8)
@@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
                                     SPI_CTARE_DTCP(1));
 
                spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
-                                      dspi->tx, !dspi->irq);
+                                      dspi->progress, !dspi->irq);
 
                trans_mode = dspi->devtype_data->trans_mode;
                switch (trans_mode) {
index 47cde1864630e02b5e3c8d253bb636c698cebdb9..ce9b30112e260e5ee8bb550907f5ef52d209f63a 100644 (file)
@@ -290,25 +290,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
        }
 }
 
-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
+                                           unsigned int threshold)
 {
-       unsigned int fifo_threshold, fill_bytes;
        u32 val;
 
-       fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
-                               bytes_per_word(priv->bits_per_word));
-       fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
-
-       fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
-
-       /* set fifo threshold */
        val = readl(priv->base + SSI_FC);
        val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
-       val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
-       val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
+       val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
+       val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
        writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+       unsigned int fifo_threshold, fill_words;
+       unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+       fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
+       fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+       uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
+
+       fill_words = fifo_threshold -
+               DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
 
-       while (fill_bytes--)
+       while (fill_words--)
                uniphier_spi_send(priv);
 }
 
index 5e4c4532f7f326f74dc7d816db7699ea6d03e09c..8994545367a2da7571b6a9707e291e0b857ab186 100644 (file)
@@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work)
  *                         advances its @tx buffer pointer monotonically.
  * @ctlr: Pointer to the spi_controller structure of the driver
  * @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
- *     preparing to transmit right now.
+ * @progress: How many words (not bytes) have been transferred so far
  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
  *           transfer, for less jitter in time measurement. Only compatible
  *           with PIO drivers. If true, must follow up with
@@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work)
  */
 void spi_take_timestamp_pre(struct spi_controller *ctlr,
                            struct spi_transfer *xfer,
-                           const void *tx, bool irqs_off)
+                           size_t progress, bool irqs_off)
 {
-       u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
        if (!xfer->ptp_sts)
                return;
 
        if (xfer->timestamped_pre)
                return;
 
-       if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+       if (progress < xfer->ptp_sts_word_pre)
                return;
 
        /* Capture the resolution of the timestamp */
-       xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+       xfer->ptp_sts_word_pre = progress;
 
        xfer->timestamped_pre = true;
 
@@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
  *                          timestamped.
  * @ctlr: Pointer to the spi_controller structure of the driver
  * @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
- *     just transmitted.
+ * @progress: How many words (not bytes) have been transferred so far
  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
  */
 void spi_take_timestamp_post(struct spi_controller *ctlr,
                             struct spi_transfer *xfer,
-                            const void *tx, bool irqs_off)
+                            size_t progress, bool irqs_off)
 {
-       u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
        if (!xfer->ptp_sts)
                return;
 
        if (xfer->timestamped_post)
                return;
 
-       if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+       if (progress < xfer->ptp_sts_word_post)
                return;
 
        ptp_read_system_postts(xfer->ptp_sts);
@@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
        }
 
        /* Capture the resolution of the timestamp */
-       xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+       xfer->ptp_sts_word_post = progress;
 
        xfer->timestamped_post = true;
 }
index dbff0f7e7cf5272212c2d16bfffcad9a3aff8ca8..ddc0dc93d08b64182cc9d2b56325bde0e3af0931 100644 (file)
@@ -46,8 +46,8 @@
 #define PCI171X_RANGE_UNI      BIT(4)
 #define PCI171X_RANGE_GAIN(x)  (((x) & 0x7) << 0)
 #define PCI171X_MUX_REG                0x04    /* W:   A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x)   (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x)   (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x)   (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x)   (((x) & 0xff) << 0)
 #define PCI171X_MUX_CHAN(x)    (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
 #define PCI171X_STATUS_REG     0x06    /* R:   status register */
 #define PCI171X_STATUS_IRQ     BIT(11) /* 1=IRQ occurred */
index 673d732dcb8faefe70039b08e4fa39cf342ef365..8f398b30f5bf9d6a4572519073e3de81548b5bff 100644 (file)
@@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family,
                }
        }
 
-       if (!rv)
-               return -ENODATA;
-
        /* Second, find the set of routes valid for this device. */
        for (i = 0; ni_device_routes_list[i]; ++i) {
                if (memcmp(ni_device_routes_list[i]->device, board_name,
@@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family,
                }
        }
 
-       if (!dr)
-               return -ENODATA;
-
        tables->route_values = rv;
        tables->valid_routes = dr;
 
+       if (!rv || !dr)
+               return -ENODATA;
+
        return 0;
 }
 
@@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest,
 {
        int src;
 
+       if (!tables->route_values)
+               return -EINVAL;
+
        dest = B(dest); /* subtract NI names offset */
        /* ensure we are not going to under/over run the route value table */
        if (dest < 0 || dest >= NI_NUM_NAMES)
index 08eaa0bad0de66405f3b716f239a9fdb107d39ea..1c9c3ba4d518dae30d63e387e0b97b9316dd887e 100644 (file)
@@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s {
        __u16 reserved1;
        __u32 bayer_sign;
        __u8 bayer_nf;
-       __u8 reserved2[3];
+       __u8 reserved2[7];
 } __attribute__((aligned(32))) __packed;
 
 /**
index a7cac0719b8bcb322ed6af3ffa2e925352bd9572..b5d42f411dd810bd2670c6a34c9217609bbdbbd3 100644 (file)
@@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
        {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
        {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+       {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
        {}      /* Terminating entry */
index 8d19ae71e7cc9124f666f1807e8752e379a222b3..4e651b698617f1a025b7704ead8c1f1e275728df 100644 (file)
@@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
 
        memcpy(array, addr, length);
 
-       ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
-                             MESSAGE_REQUEST_BBREG, length, array);
+       ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+                                    MESSAGE_REQUEST_BBREG, length, array);
        if (ret)
                goto end;
 
index 56cd77fd9ea021d9849b0e4ed5a1abc26bc9c7a6..7958fc165462fd0b951183e8b2a5bb481a113389 100644 (file)
@@ -719,7 +719,7 @@ int vnt_radio_power_off(struct vnt_private *priv)
  */
 int vnt_radio_power_on(struct vnt_private *priv)
 {
-       int ret = true;
+       int ret = 0;
 
        vnt_exit_deep_sleep(priv);
 
index 6074ceda78bfc1c0b3b7f85d947c9efa5351abc6..50e1c89180409cc50f6e96633dedc46bd3b8ae95 100644 (file)
@@ -259,6 +259,7 @@ struct vnt_private {
        u8 mac_hw;
        /* netdev */
        struct usb_device *usb;
+       struct usb_interface *intf;
 
        u64 tsf_time;
        u8 rx_rate;
index 4ac85ecb0921df0b1066aa46b7b2c05ea0115b0d..9cb924c545719bd5c49cccc3f86cdd41916147c2 100644 (file)
@@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = {
 
 int vnt_init(struct vnt_private *priv)
 {
-       if (!(vnt_init_registers(priv)))
+       if (vnt_init_registers(priv))
                return -EAGAIN;
 
        SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr);
@@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
        priv = hw->priv;
        priv->hw = hw;
        priv->usb = udev;
+       priv->intf = intf;
 
        vnt_set_options(priv);
 
index d3304df6bd53de0a6db5c41f903cce5ee7561146..d977d4777e4f672889f886223b41ea3db07cb00c 100644 (file)
@@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
 
        kfree(usb_buffer);
 
-       if (ret >= 0 && ret < (int)length)
+       if (ret == (int)length)
+               ret = 0;
+       else
                ret = -EIO;
 
 end_unlock:
@@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
                               reg_off, reg, sizeof(u8), &data);
 }
 
+int vnt_control_out_blocks(struct vnt_private *priv,
+                          u16 block, u8 reg, u16 length, u8 *data)
+{
+       int ret = 0, i;
+
+       for (i = 0; i < length; i += block) {
+               u16 len = min_t(int, length - i, block);
+
+               ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE,
+                                     i, reg, len, data + i);
+               if (ret)
+                       goto end;
+       }
+end:
+       return ret;
+}
+
 int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
                   u16 index, u16 length, u8 *buffer)
 {
@@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
 
        kfree(usb_buffer);
 
-       if (ret >= 0 && ret < (int)length)
+       if (ret == (int)length)
+               ret = 0;
+       else
                ret = -EIO;
 
 end_unlock:
index 95147ec7b96ae8e63af958a7453f231e6a084825..b65d9c01a211da4dd2204fca2feab873b6a2a5ac 100644 (file)
@@ -18,6 +18,8 @@
 
 #include "device.h"
 
+#define VNT_REG_BLOCK_SIZE     64
+
 int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
                    u16 index, u16 length, u8 *buffer);
 int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
@@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
 int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
 int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
 
+int vnt_control_out_blocks(struct vnt_private *priv,
+                          u16 block, u8 reg, u16 len, u8 *data);
+
 int vnt_start_interrupt_urb(struct vnt_private *priv);
 int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
 int vnt_tx_context(struct vnt_private *priv,
index 3eb2f11a5de13676247c8daf4d2ac7407ecfe555..2c5250ca2801d455aa65408f3f07571a9c280e86 100644 (file)
@@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work)
                if (vnt_init(priv)) {
                        /* If fail all ends TODO retry */
                        dev_err(&priv->usb->dev, "failed to start\n");
+                       usb_set_intfdata(priv->intf, NULL);
                        ieee80211_free_hw(priv->hw);
                        return;
                }
index 6949ea8bc387c7acac2571862d36f0c3f2e0ca04..51ffd5c002dee2da58281ce0e1c2032dac1ad9e2 100644 (file)
@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
        }
 
        bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
-       bip_set_seed(bip, bio->bi_iter.bi_sector);
+       /* virtual start sector must be in integrity interval units */
+       bip_set_seed(bip, bio->bi_iter.bi_sector >>
+                                 (bi->interval_exp - SECTOR_SHIFT));
 
        pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
                 (unsigned long long)bip->bip_iter.bi_sector);
index 0332a5301d6136d83f6cd101fd7be666efc9dd17..d767eebf30bdd5625b10350904b019edbe8166ca 100644 (file)
@@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
        shm->size = PAGE_SIZE << order;
 
        if (shm->flags & TEE_SHM_DMA_BUF) {
+               unsigned int nr_pages = 1 << order, i;
+               struct page **pages;
+
+               pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+               if (!pages)
+                       return -ENOMEM;
+
+               for (i = 0; i < nr_pages; i++) {
+                       pages[i] = page;
+                       page++;
+               }
+
                shm->flags |= TEE_SHM_REGISTER;
-               rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+               rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
                                        (unsigned long)shm->kaddr);
+               kfree(pages);
        }
 
        return rc;
index 015e7d2015985343edab8f139312706da4e29d1a..0e7cf52369326af5d814c9874c35faf207a98e57 100644 (file)
@@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv)
        irq = platform_get_irq_byname(pdev, "uplow");
        if (irq < 0) {
                ret = irq;
+               /* For old DTs with no IRQ defined */
+               if (irq == -ENXIO)
+                       ret = 0;
                goto err_put_device;
        }
 
index 226adeec2aedc0b231924a3ddaa5b743a5c850c8..ce5309d002805ef6f4f05220204c339a723965d3 100644 (file)
@@ -663,6 +663,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
        return AE_OK;
 }
 
+static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
+       { "INT3511", 0 },
+       { "INT3512", 0 },
+       { },
+};
+
 static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
                                          void *data, void **return_value)
 {
@@ -675,6 +681,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
        if (acpi_device_enumerated(adev))
                return AE_OK;
 
+       /* Skip if black listed */
+       if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
+               return AE_OK;
+
        if (acpi_serdev_check_resources(ctrl, adev))
                return AE_OK;
 
index 5023c85ebc6e0002d9984ad8e24090b8531a7c8b..044c3cbdcfa40664497d13bd00e607584eff99c7 100644 (file)
@@ -89,8 +89,7 @@ void tty_port_link_device(struct tty_port *port,
 {
        if (WARN_ON(index >= driver->num))
                return;
-       if (!driver->ports[index])
-               driver->ports[index] = port;
+       driver->ports[index] = port;
 }
 EXPORT_SYMBOL_GPL(tty_port_link_device);
 
index 4c1e755093039d0582ca30922d620009441aed0c..02f6ca2cb1ba1c99d26efd2e225889b0f9c3908d 100644 (file)
@@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
  */
 static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
 {
-       struct cdns3_device *priv_dev;
-       struct cdns3 *cdns = data;
+       struct cdns3_device *priv_dev = data;
        irqreturn_t ret = IRQ_NONE;
        u32 reg;
 
-       priv_dev = cdns->gadget_dev;
-
        /* check USB device interrupt */
        reg = readl(&priv_dev->regs->usb_ists);
        if (reg) {
@@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
  */
 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
 {
-       struct cdns3_device *priv_dev;
-       struct cdns3 *cdns = data;
+       struct cdns3_device *priv_dev = data;
        irqreturn_t ret = IRQ_NONE;
        unsigned long flags;
        int bit;
        u32 reg;
 
-       priv_dev = cdns->gadget_dev;
        spin_lock_irqsave(&priv_dev->lock, flags);
 
        reg = readl(&priv_dev->regs->usb_ists);
@@ -2539,7 +2534,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
 
        priv_dev = cdns->gadget_dev;
 
-       devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
+       devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
 
        pm_runtime_mark_last_busy(cdns->dev);
        pm_runtime_put_autosuspend(cdns->dev);
@@ -2710,7 +2705,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
        ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
                                        cdns3_device_irq_handler,
                                        cdns3_device_thread_irq_handler,
-                                       IRQF_SHARED, dev_name(cdns->dev), cdns);
+                                       IRQF_SHARED, dev_name(cdns->dev),
+                                       cdns->gadget_dev);
 
        if (ret)
                goto err0;
index b45ceb91c735e71a6f4db81e5dd24aa3a77819e8..48e4a5ca183591fd577b9e3cc9e0174a3b739beb 100644 (file)
@@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd);
 
 struct ehci_ci_priv {
        struct regulator *reg_vbus;
+       bool enabled;
 };
 
 static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
@@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
        int ret = 0;
        int port = HCS_N_PORTS(ehci->hcs_params);
 
-       if (priv->reg_vbus) {
+       if (priv->reg_vbus && enable != priv->enabled) {
                if (port > 1) {
                        dev_warn(dev,
                                "Not support multi-port regulator control\n");
@@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
                                enable ? "enable" : "disable", ret);
                        return ret;
                }
+               priv->enabled = enable;
        }
 
        if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
index 5f40117e68e76e07949d95182abfe39af7936a21..26bc05e48d8a7414121dd348e7dfc06e6916cedc 100644 (file)
@@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_INT] = 1024,
 };
 
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
-    int asnum, struct usb_host_interface *ifp, int num_ep,
-    unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+               struct usb_endpoint_descriptor *e2)
+{
+       if (e1->bEndpointAddress == e2->bEndpointAddress)
+               return true;
+
+       if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+               if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+               int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+       struct usb_endpoint_descriptor *epd;
+       struct usb_interface_cache *intfc;
+       struct usb_host_interface *alt;
+       int i, j, k;
+
+       for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+               intfc = config->intf_cache[i];
+
+               for (j = 0; j < intfc->num_altsetting; ++j) {
+                       alt = &intfc->altsetting[j];
+
+                       if (alt->desc.bInterfaceNumber == inum &&
+                                       alt->desc.bAlternateSetting != asnum)
+                               continue;
+
+                       for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+                               epd = &alt->endpoint[k].desc;
+
+                               if (endpoint_is_duplicate(epd, d))
+                                       return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+               struct usb_host_config *config, int inum, int asnum,
+               struct usb_host_interface *ifp, int num_ep,
+               unsigned char *buffer, int size)
 {
        unsigned char *buffer0 = buffer;
        struct usb_endpoint_descriptor *d;
@@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                goto skip_to_next_endpoint_or_interface_descriptor;
 
        /* Check for duplicate endpoint addresses */
-       for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
-               if (ifp->endpoint[i].desc.bEndpointAddress ==
-                   d->bEndpointAddress) {
-                       dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
-                           cfgno, inum, asnum, d->bEndpointAddress);
-                       goto skip_to_next_endpoint_or_interface_descriptor;
-               }
+       if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+               dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+                               cfgno, inum, asnum, d->bEndpointAddress);
+               goto skip_to_next_endpoint_or_interface_descriptor;
        }
 
        endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                        endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
        }
 
-       /* Validate the wMaxPacketSize field */
+       /*
+        * Validate the wMaxPacketSize field.
+        * Some devices have isochronous endpoints in altsetting 0;
+        * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+        * (see the end of section 5.6.3), so don't warn about them.
+        */
        maxp = usb_endpoint_maxp(&endpoint->desc);
-       if (maxp == 0) {
-               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+       if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
                    cfgno, inum, asnum, d->bEndpointAddress);
-               goto skip_to_next_endpoint_or_interface_descriptor;
        }
 
        /* Find the highest legal maxpacket size for this endpoint */
@@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
                if (((struct usb_descriptor_header *) buffer)->bDescriptorType
                     == USB_DT_INTERFACE)
                        break;
-               retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
-                   num_ep, buffer, size);
+               retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+                               alt, num_ep, buffer, size);
                if (retval < 0)
                        return retval;
                ++n;
index f229ad6952c0a941a9952fe619d847cf5ccab58b..3405b146edc94f3e6fa153a769fcccccb7ee4465 100644 (file)
@@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                         * PORT_OVER_CURRENT is not. So check for any of them.
                         */
                        if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+                           (portchange & USB_PORT_STAT_C_CONNECTION) ||
                            (portstatus & USB_PORT_STAT_OVERCURRENT) ||
                            (portchange & USB_PORT_STAT_C_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
@@ -2692,7 +2693,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 #define SET_ADDRESS_TRIES      2
 #define GET_DESCRIPTOR_TRIES   2
 #define SET_CONFIG_TRIES       (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i, scheme)      ((i) / 2 == (int)scheme)
+#define USE_NEW_SCHEME(i, scheme)      ((i) / 2 == (int)(scheme))
 
 #define HUB_ROOT_RESET_TIME    60      /* times are in msec */
 #define HUB_SHORT_RESET_TIME   10
index 0c960a97ea0214ed73e5fe018587aafb6c8d3d8f..154f3f3e8cff849754904721e2f0730f25fff7cd 100644 (file)
@@ -2467,6 +2467,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
 
 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
 {
+       /*
+        * For OUT direction, host may send less than the setup
+        * length. Return true for all OUT requests.
+        */
+       if (!req->direction)
+               return true;
+
        return req->request.actual == req->request.length;
 }
 
index ae70ce29d5e42d8e3a4e280de8a9b5ecc969adc9..797d6ace89943a0015a3814bef336b7dde724524 100644 (file)
@@ -445,6 +445,7 @@ config USB_TEGRA_XUDC
        tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
        depends on ARCH_TEGRA || COMPILE_TEST
        depends on PHY_TEGRA_XUSB
+       select USB_ROLE_SWITCH
        help
         Enables NVIDIA Tegra USB 3.0 device mode controller driver.
 
index 38183ac438c673871b08ecb485bcf6b20893bb18..1371b0c249ece0d4985222e74f91ea6caf637128 100644 (file)
@@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
        }
 
        da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
-       if (IS_ERR(da8xx_ohci->oc_gpio))
+       if (IS_ERR(da8xx_ohci->oc_gpio)) {
+               error = PTR_ERR(da8xx_ohci->oc_gpio);
                goto err;
+       }
 
        if (da8xx_ohci->oc_gpio) {
                oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
-               if (oc_irq < 0)
+               if (oc_irq < 0) {
+                       error = oc_irq;
                        goto err;
+               }
 
                error = devm_request_threaded_irq(dev, oc_irq, NULL,
                                ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
index 5261f8dfedecd4c6fa4b7da20b43edce50cd0d62..e3b8c84ccdb80c6087e14dae2e3fca5e3c0122fd 100644 (file)
@@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
 static int jz4740_musb_init(struct musb *musb)
 {
        struct device *dev = musb->controller->parent;
+       int err;
 
        if (dev->of_node)
                musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
        else
                musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
        if (IS_ERR(musb->xceiv)) {
-               dev_err(dev, "No transceiver configured\n");
-               return PTR_ERR(musb->xceiv);
+               err = PTR_ERR(musb->xceiv);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev, "No transceiver configured: %d", err);
+               return err;
        }
 
        /* Silicon does not implement ConfigData register.
index 15cca912c53e3e5849ac3f54b9f4ab8d57340d6a..5ebf30bd61bd1f4da6b45e0935d988f261a09f83 100644 (file)
@@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb);
 #define MUSB_QUIRK_B_INVALID_VBUS_91   (MUSB_DEVCTL_BDEVICE | \
                                         (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
                                         MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99     (MUSB_DEVCTL_BDEVICE | \
+                                        (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+                                        MUSB_DEVCTL_SESSION)
 #define MUSB_QUIRK_A_DISCONNECT_19     ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
                                         MUSB_DEVCTL_SESSION)
 
@@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb)
        s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
+       case MUSB_QUIRK_B_DISCONNECT_99:
+               musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+               schedule_delayed_work(&musb->irq_work,
+                                     msecs_to_jiffies(1000));
+               break;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
@@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_disable_interrupts(musb);
        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 
+       /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+       musb_writeb(musb->mregs, MUSB_POWER, 0);
+
        /* Init IRQ workqueue before request_irq */
        INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
        INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
index 5fc6825745f21bd6246d71b99fd98d7313396f01..2d3751d885b429cb008e46dbfab5a295b815b794 100644 (file)
@@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
        controller->controller.channel_abort = dma_channel_abort;
 
        if (request_irq(irq, dma_controller_irq, 0,
-                       dev_name(musb->controller), &controller->controller)) {
+                       dev_name(musb->controller), controller)) {
                dev_err(dev, "request_irq %d failed!\n", irq);
                musb_dma_controller_destroy(&controller->controller);
 
index df582fe855f06b727aa517487703177ccfbf8d79..d3f420f3a083570d4d3372514c39430caa9fc8d8 100644 (file)
@@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty)
 static int ch341_reset_resume(struct usb_serial *serial)
 {
        struct usb_serial_port *port = serial->port[0];
-       struct ch341_private *priv = usb_get_serial_port_data(port);
+       struct ch341_private *priv;
        int ret;
 
+       priv = usb_get_serial_port_data(port);
+       if (!priv)
+               return 0;
+
        /* reconfigure ch341 serial port after bus-reset */
        ch341_configure(serial->dev, priv);
 
index 9690a5f4b9d611b26680f767310e5bcc34892b5c..5737add6a2a436b440792a14112288c4c4801c02 100644 (file)
@@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb)
                        if (txCredits) {
                                port = edge_serial->serial->port[portNumber];
                                edge_port = usb_get_serial_port_data(port);
-                               if (edge_port->open) {
+                               if (edge_port && edge_port->open) {
                                        spin_lock_irqsave(&edge_port->ep_lock,
                                                          flags);
                                        edge_port->txCredits += txCredits;
@@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
 static void process_rcvd_data(struct edgeport_serial *edge_serial,
                                unsigned char *buffer, __u16 bufferLength)
 {
-       struct device *dev = &edge_serial->serial->dev->dev;
+       struct usb_serial *serial = edge_serial->serial;
+       struct device *dev = &serial->dev->dev;
        struct usb_serial_port *port;
        struct edgeport_port *edge_port;
        __u16 lastBufferLength;
@@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
 
                        /* spit this data back into the tty driver if this
                           port is open */
-                       if (rxLen) {
-                               port = edge_serial->serial->port[
-                                                       edge_serial->rxPort];
+                       if (rxLen && edge_serial->rxPort < serial->num_ports) {
+                               port = serial->port[edge_serial->rxPort];
                                edge_port = usb_get_serial_port_data(port);
-                               if (edge_port->open) {
+                               if (edge_port && edge_port->open) {
                                        dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
                                                __func__, rxLen,
                                                edge_serial->rxPort);
@@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
                                                        rxLen);
                                        edge_port->port->icount.rx += rxLen;
                                }
-                               buffer += rxLen;
                        }
+                       buffer += rxLen;
                        break;
 
                case EXPECT_HDR3:       /* Expect 3rd byte of status header */
@@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
        __u8 code = edge_serial->rxStatusCode;
 
        /* switch the port pointer to the one being currently talked about */
+       if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+               return;
        port = edge_serial->serial->port[edge_serial->rxPort];
        edge_port = usb_get_serial_port_data(port);
        if (edge_port == NULL) {
index e66a59ef43a1cb59cfa0d41b8af8c7316ededa69..aa3dbce22cfbe857af446804f28440c691729cb7 100644 (file)
@@ -1058,6 +1058,8 @@ static void       usa49_glocont_callback(struct urb *urb)
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
                p_priv = usb_get_serial_port_data(port);
+               if (!p_priv)
+                       continue;
 
                if (p_priv->resend_cont) {
                        dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb)
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
                p_priv = usb_get_serial_port_data(port);
+               if (!p_priv)
+                       continue;
 
                if (p_priv->resend_cont) {
                        dev_dbg(&port->dev, "%s - sending setup\n", __func__);
index cb7aac9cd9e72aadac7bc474ffecc4b57e88e828..ed2b4e6dca385bf4d6ae234e83bf11dd3e76df85 100644 (file)
@@ -113,7 +113,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
        retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                requesttype,
                                USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
-                               0, 0, buffer, 1, 0);
+                               0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
        kfree(buffer);
 
        if (retval < 0)
index e9491d400a24fae06c90387f585a3c88e38a71bd..084cc2fff3ae318f4819bc495f865c7952d79983 100644 (file)
@@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM12                   0x0512
+#define QUECTEL_PRODUCT_RM500Q                 0x0800
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -567,6 +568,9 @@ static void option_instat_callback(struct urb *urb);
 /* Interface must have two endpoints */
 #define NUMEP2         BIT(16)
 
+/* Device needs ZLP */
+#define ZLP            BIT(17)
+
 
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1101,6 +1105,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+         .driver_info = ZLP },
+
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1172,6 +1181,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),    /* Telit ME910 (ECM) */
          .driver_info = NCTRL(0) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff),    /* Telit ME910G1 */
+         .driver_info = NCTRL(0) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1196,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
          .driver_info = NCTRL(0) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* Telit SBL FN980 flashing device */
+         .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
@@ -2097,6 +2110,9 @@ static int option_attach(struct usb_serial *serial)
        if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
                data->use_send_setup = 1;
 
+       if (device_flags & ZLP)
+               data->use_zlp = 1;
+
        spin_lock_init(&data->susp_lock);
 
        usb_set_serial_data(serial, data);
index a62981ca7a73557bbc788a994ed99df82d669a34..f93b81a297d6798f6b10da299945e60533c8d56f 100644 (file)
@@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
        u8 newMSR = (u8) *ch;
        unsigned long flags;
 
+       /* May be called from qt2_process_read_urb() for an unbound port. */
        port_priv = usb_get_serial_port_data(port);
+       if (!port_priv)
+               return;
 
        spin_lock_irqsave(&port_priv->lock, flags);
        port_priv->shadowMSR = newMSR;
@@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
        unsigned long flags;
        u8 newLSR = (u8) *ch;
 
+       /* May be called from qt2_process_read_urb() for an unbound port. */
        port_priv = usb_get_serial_port_data(port);
+       if (!port_priv)
+               return;
 
        if (newLSR & UART_LSR_BI)
                newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
index edbbb13d6de6ee39285fef25268be3d08f4e3b0e..bd23a7cb1be2bceaa8422e88bc3cdb5255dd566c 100644 (file)
@@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS);
 #define MOTOROLA_TETRA_IDS()                   \
        { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
        { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+       { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+       { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
        { USB_DEVICE(0x0cad, 0x9016) }  /* TPG2200 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
index 8f066bb55d7d33eb18b8f43ba845c632ed100e2a..dc7a65b9ec982113f5b40dc1fb23c0179b655dac 100644 (file)
@@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
                return -EINVAL;
        }
 
+       /* Prevent individual ports from being unbound. */
+       driver->driver.suppress_bind_attrs = true;
+
        usb_serial_operations_init(driver);
 
        /* Add this device to our list of devices */
index 1c120eaf4091c05c4f279d5e703177a906074965..934e9361cf6bb47b0c5f2bcf327597c321589c20 100644 (file)
@@ -38,6 +38,7 @@ struct usb_wwan_intf_private {
        spinlock_t susp_lock;
        unsigned int suspended:1;
        unsigned int use_send_setup:1;
+       unsigned int use_zlp:1;
        int in_flight;
        unsigned int open_ports;
        void *private;
index 7e855c87e4f7bb1a1364923cacf9a5f6ae970285..13be21aad2f40c7eefd5588a105364d38eeed4e9 100644 (file)
@@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
                                      void (*callback) (struct urb *))
 {
        struct usb_serial *serial = port->serial;
+       struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
        struct urb *urb;
 
        urb = usb_alloc_urb(0, GFP_KERNEL);     /* No ISO */
@@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
                          usb_sndbulkpipe(serial->dev, endpoint) | dir,
                          buf, len, callback, ctx);
 
+       if (intfdata->use_zlp && dir == USB_DIR_OUT)
+               urb->transfer_flags |= URB_ZERO_PACKET;
+
        return urb;
 }
 
index c1f7073a56de7329adafc115655a96be7740ff88..8b4ff9fff340c9a3a4f06c33a6446dfb778c6a53 100644 (file)
@@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
 
        if (status & TCPC_ALERT_RX_STATUS) {
                struct pd_message msg;
-               unsigned int cnt;
+               unsigned int cnt, payload_cnt;
                u16 header;
 
                regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
+               /*
+                * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
+                * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
+                * defined in table 4-36 as one greater than the number of
+                * bytes received. And that number includes the header. So:
+                */
+               if (cnt > 3)
+                       payload_cnt = cnt - (1 + sizeof(msg.header));
+               else
+                       payload_cnt = 0;
 
                tcpci_read16(tcpci, TCPC_RX_HDR, &header);
                msg.header = cpu_to_le16(header);
 
-               if (WARN_ON(cnt > sizeof(msg.payload)))
-                       cnt = sizeof(msg.payload);
+               if (WARN_ON(payload_cnt > sizeof(msg.payload)))
+                       payload_cnt = sizeof(msg.payload);
 
-               if (cnt > 0)
+               if (payload_cnt > 0)
                        regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
-                                       &msg.payload, cnt);
+                                       &msg.payload, payload_cnt);
 
                /* Read complete, clear RX status alert bit */
                tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
index 8569bbd3762fdfde81e28a2b7cf7a36388f19fe0..831c9470bdc1f21227a662e06782c36ffd5c8afa 100644 (file)
@@ -94,15 +94,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
 #define UCSI_ENABLE_NTFY_CMD_COMPLETE          BIT(16)
 #define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE    BIT(17)
 #define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE     BIT(18)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE            BIT(19)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE      BIT(20)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE     BIT(21)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE            BIT(22)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE     BIT(23)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE                BIT(24)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE                BIT(25)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE      BIT(26)
-#define UCSI_ENABLE_NTFY_ERROR                 BIT(27)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE            BIT(21)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE      BIT(22)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE     BIT(23)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE            BIT(24)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE     BIT(25)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE                BIT(27)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE                BIT(28)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE      BIT(30)
+#define UCSI_ENABLE_NTFY_ERROR                 BIT(31)
 #define UCSI_ENABLE_NTFY_ALL                   0xdbe70000
 
 /* SET_UOR command bits */
index 1679e0dc869b049b1282a2db04a31ad946bc9426..cec868f8db3f9645ac9bd7741f7f60e28c86bbb6 100644 (file)
@@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG
 config MAX77620_WATCHDOG
        tristate "Maxim Max77620 Watchdog Timer"
        depends on MFD_MAX77620 || COMPILE_TEST
+       select WATCHDOG_CORE
        help
         This is the driver for the Max77620 watchdog timer.
         Say 'Y' here to enable the watchdog timer support for
@@ -1444,6 +1445,7 @@ config SMSC37B787_WDT
 config TQMX86_WDT
        tristate "TQ-Systems TQMX86 Watchdog Timer"
        depends on X86
+       select WATCHDOG_CORE
        help
        This is the driver for the hardware watchdog timer in the TQMX86 IO
        controller found on some of their ComExpress Modules.
index 0a87c6f4bab222928106b9091639cf0b55d84831..11b9e7c6b7f596a3b5a28c4bfe7acf0e02bece85 100644 (file)
@@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
 {
        struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
 
-       imx7ulp_wdt_enable(wdt->base, true);
+       imx7ulp_wdt_enable(wdog, true);
        imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
 
        /* wait for wdog to fire */
index 1cccf8eb1c5d4ed4cdad590027cc181b89dfb146..8e6dfe76f9c9d48affaa01a1f927eb25308acd2b 100644 (file)
@@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
                set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
 
        /* Request the IRQ only after the watchdog is disabled */
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq > 0) {
                /*
                 * Not all supported platforms specify an interrupt for the
@@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
        }
 
        /* Optional 2nd interrupt for pretimeout */
-       irq = platform_get_irq(pdev, 1);
+       irq = platform_get_irq_optional(pdev, 1);
        if (irq > 0) {
                orion_wdt_info.options |= WDIOF_PRETIMEOUT;
                ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
index 2348760474317e6fd8a33104bab713bbc5c09d05..6e524c8e26a8f06a39ac0bf18aa2019c03c88fb2 100644 (file)
@@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = {
 
 module_platform_driver(rn5t618_wdt_driver);
 
+MODULE_ALIAS("platform:rn5t618-wdt");
 MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
 MODULE_DESCRIPTION("RN5T618 watchdog driver");
 MODULE_LICENSE("GPL v2");
index fdf533fe0bb21d8c72a00cee7d40110686d458ba..56a4a4030ca9650b902f8ac55511b3c71ff4c6a5 100644 (file)
@@ -420,7 +420,7 @@ static int wdt_find(int addr)
                cr_wdt_csr = NCT6102D_WDT_CSR;
                break;
        case NCT6116_ID:
-               ret = nct6102;
+               ret = nct6116;
                cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
                cr_wdt_control = NCT6102D_WDT_CONTROL;
                cr_wdt_csr = NCT6102D_WDT_CSR;
index 497f979018c2f5f38ee5d69c6a9f999acfa6a1e5..5c794f4b051afcc953f97ceff3b562402a31b4e0 100644 (file)
@@ -908,6 +908,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
                                 unsigned int flags)
 {
        struct afs_vnode *dvnode = AFS_FS_I(dir);
+       struct afs_fid fid = {};
        struct inode *inode;
        struct dentry *d;
        struct key *key;
@@ -951,21 +952,18 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
        afs_stat_v(dvnode, n_lookup);
        inode = afs_do_lookup(dir, dentry, key);
        key_put(key);
-       if (inode == ERR_PTR(-ENOENT)) {
+       if (inode == ERR_PTR(-ENOENT))
                inode = afs_try_auto_mntpt(dentry, dir);
-       } else {
-               dentry->d_fsdata =
-                       (void *)(unsigned long)dvnode->status.data_version;
-       }
+
+       if (!IS_ERR_OR_NULL(inode))
+               fid = AFS_FS_I(inode)->fid;
+
        d = d_splice_alias(inode, dentry);
        if (!IS_ERR_OR_NULL(d)) {
                d->d_fsdata = dentry->d_fsdata;
-               trace_afs_lookup(dvnode, &d->d_name,
-                                inode ? AFS_FS_I(inode) : NULL);
+               trace_afs_lookup(dvnode, &d->d_name, &fid);
        } else {
-               trace_afs_lookup(dvnode, &dentry->d_name,
-                                IS_ERR_OR_NULL(inode) ? NULL
-                                : AFS_FS_I(inode));
+               trace_afs_lookup(dvnode, &dentry->d_name, &fid);
        }
        return d;
 }
index ee834ef7beb4a375884f91c260c69246412f012e..43e1660f450f01ba8b6326d54e1bfdc6162a8080 100644 (file)
@@ -447,7 +447,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
        if (blkcg_css) {
                bio->bi_opf |= REQ_CGROUP_PUNT;
-               bio_associate_blkg_from_css(bio, blkcg_css);
+               kthread_associate_blkcg(blkcg_css);
        }
        refcount_set(&cb->pending_bios, 1);
 
@@ -491,6 +491,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                        bio->bi_opf = REQ_OP_WRITE | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
+                       if (blkcg_css)
+                               bio->bi_opf |= REQ_CGROUP_PUNT;
                        bio_add_page(bio, page, PAGE_SIZE, 0);
                }
                if (bytes_left < PAGE_SIZE) {
@@ -517,6 +519,9 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                bio_endio(bio);
        }
 
+       if (blkcg_css)
+               kthread_associate_blkcg(NULL);
+
        return 0;
 }
 
index e3c76645cad76ecdf324b9613cbba36e185a0522..c70baafb2a3920ced47005c3d60b11c07c89af88 100644 (file)
@@ -1479,10 +1479,10 @@ static noinline int run_delalloc_nocow(struct inode *inode,
                        disk_num_bytes =
                                btrfs_file_extent_disk_num_bytes(leaf, fi);
                        /*
-                        * If extent we got ends before our range starts, skip
-                        * to next extent
+                        * If the extent we got ends before our current offset,
+                        * skip to the next extent.
                         */
-                       if (extent_end <= start) {
+                       if (extent_end <= cur_offset) {
                                path->slots[0]++;
                                goto next_slot;
                        }
@@ -4238,18 +4238,30 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
 }
 
 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
-                              struct inode *dir, u64 objectid,
-                              const char *name, int name_len)
+                              struct inode *dir, struct dentry *dentry)
 {
        struct btrfs_root *root = BTRFS_I(dir)->root;
+       struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_dir_item *di;
        struct btrfs_key key;
+       const char *name = dentry->d_name.name;
+       int name_len = dentry->d_name.len;
        u64 index;
        int ret;
+       u64 objectid;
        u64 dir_ino = btrfs_ino(BTRFS_I(dir));
 
+       if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
+               objectid = inode->root->root_key.objectid;
+       } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
+               objectid = inode->location.objectid;
+       } else {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -4271,13 +4283,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(path);
 
-       ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
-                                dir_ino, &index, name, name_len);
-       if (ret < 0) {
-               if (ret != -ENOENT) {
-                       btrfs_abort_transaction(trans, ret);
-                       goto out;
-               }
+       /*
+        * This is a placeholder inode for a subvolume we didn't have a
+        * reference to at the time of the snapshot creation.  In the meantime
+        * we could have renamed the real subvol link into our snapshot, so
+        * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
+        * Instead simply lookup the dir_index_item for this entry so we can
+        * remove it.  Otherwise we know we have a ref to the root and we can
+        * call btrfs_del_root_ref, and it _shouldn't_ fail.
+        */
+       if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
                di = btrfs_search_dir_index_item(root, path, dir_ino,
                                                 name, name_len);
                if (IS_ERR_OR_NULL(di)) {
@@ -4292,8 +4307,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
                index = key.offset;
+               btrfs_release_path(path);
+       } else {
+               ret = btrfs_del_root_ref(trans, objectid,
+                                        root->root_key.objectid, dir_ino,
+                                        &index, name, name_len);
+               if (ret) {
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
        }
-       btrfs_release_path(path);
 
        ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
        if (ret) {
@@ -4487,8 +4510,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
 
        btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
 
-       ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
-                                 dentry->d_name.name, dentry->d_name.len);
+       ret = btrfs_unlink_subvol(trans, dir, dentry);
        if (ret) {
                err = ret;
                btrfs_abort_transaction(trans, ret);
@@ -4583,10 +4605,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
                return PTR_ERR(trans);
 
        if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
-               err = btrfs_unlink_subvol(trans, dir,
-                                         BTRFS_I(inode)->location.objectid,
-                                         dentry->d_name.name,
-                                         dentry->d_name.len);
+               err = btrfs_unlink_subvol(trans, dir, dentry);
                goto out;
        }
 
@@ -9536,7 +9555,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
        u64 old_idx = 0;
        u64 new_idx = 0;
-       u64 root_objectid;
        int ret;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
@@ -9642,10 +9660,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        /* src is a subvolume */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
-               root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
-               ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
-                                         old_dentry->d_name.name,
-                                         old_dentry->d_name.len);
+               ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
        } else { /* src is an inode */
                ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
                                           BTRFS_I(old_dentry->d_inode),
@@ -9661,10 +9676,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        /* dest is a subvolume */
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
-               root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
-               ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
-                                         new_dentry->d_name.name,
-                                         new_dentry->d_name.len);
+               ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
        } else { /* dest is an inode */
                ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
                                           BTRFS_I(new_dentry->d_inode),
@@ -9862,7 +9874,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct inode *new_inode = d_inode(new_dentry);
        struct inode *old_inode = d_inode(old_dentry);
        u64 index = 0;
-       u64 root_objectid;
        int ret;
        u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
        bool log_pinned = false;
@@ -9970,10 +9981,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                                BTRFS_I(old_inode), 1);
 
        if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
-               root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
-               ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
-                                       old_dentry->d_name.name,
-                                       old_dentry->d_name.len);
+               ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
        } else {
                ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
                                        BTRFS_I(d_inode(old_dentry)),
@@ -9992,10 +10000,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_inode->i_ctime = current_time(new_inode);
                if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
                             BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
-                       root_objectid = BTRFS_I(new_inode)->location.objectid;
-                       ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
-                                               new_dentry->d_name.name,
-                                               new_dentry->d_name.len);
+                       ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
                        BUG_ON(new_inode->i_nlink == 0);
                } else {
                        ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
index 18e328ce4b54964a6dbbddbf36450e24bf77460d..12ae31e1813e4e0a02079faa48daa7be86ab8dea 100644 (file)
@@ -4252,7 +4252,19 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
                              &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
                              0);
 
-       if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
+       /*
+        * Copy scrub args to user space even if btrfs_scrub_dev() returned an
+        * error. This is important as it allows user space to know how much
+        * progress scrub has done. For example, if scrub is canceled we get
+        * -ECANCELED from btrfs_scrub_dev() and return that error back to user
+        * space. Later user space can inspect the progress from the structure
+        * btrfs_ioctl_scrub_args and resume scrub from where it left off
+        * previously (btrfs-progs does this).
+        * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
+        * then return -EFAULT to signal the structure was not copied or it may
+        * be corrupt and unreliable due to a partial copy.
+        */
+       if (copy_to_user(arg, sa, sizeof(*sa)))
                ret = -EFAULT;
 
        if (!(sa->flags & BTRFS_SCRUB_READONLY))
index d4282e12f2a64a2e3c1fa696d00c9862baff6861..39fc8c3d3a75df2387e43c7dec3aae9d90e59350 100644 (file)
@@ -2423,8 +2423,12 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
        u64 nr_old_roots = 0;
        int ret = 0;
 
+       /*
+        * If quotas get disabled meanwhile, the resouces need to be freed and
+        * we can't just exit here.
+        */
        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
-               return 0;
+               goto out_free;
 
        if (new_roots) {
                if (!maybe_fs_roots(new_roots))
index c58245797f30af0c87b12e2c81fa98e4cbbe5a30..da5abd62db2232d48e128e6b37123f4bbfbd9072 100644 (file)
@@ -517,6 +517,34 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
        return 1;
 }
 
+static bool reloc_root_is_dead(struct btrfs_root *root)
+{
+       /*
+        * Pair with set_bit/clear_bit in clean_dirty_subvols and
+        * btrfs_update_reloc_root. We need to see the updated bit before
+        * trying to access reloc_root
+        */
+       smp_rmb();
+       if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+               return true;
+       return false;
+}
+
+/*
+ * Check if this subvolume tree has valid reloc tree.
+ *
+ * Reloc tree after swap is considered dead, thus not considered as valid.
+ * This is enough for most callers, as they don't distinguish dead reloc root
+ * from no reloc root.  But should_ignore_root() below is a special case.
+ */
+static bool have_reloc_root(struct btrfs_root *root)
+{
+       if (reloc_root_is_dead(root))
+               return false;
+       if (!root->reloc_root)
+               return false;
+       return true;
+}
 
 static int should_ignore_root(struct btrfs_root *root)
 {
@@ -525,6 +553,10 @@ static int should_ignore_root(struct btrfs_root *root)
        if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                return 0;
 
+       /* This root has been merged with its reloc tree, we can ignore it */
+       if (reloc_root_is_dead(root))
+               return 1;
+
        reloc_root = root->reloc_root;
        if (!reloc_root)
                return 0;
@@ -1439,7 +1471,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
         * The subvolume has reloc tree but the swap is finished, no need to
         * create/update the dead reloc tree
         */
-       if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+       if (reloc_root_is_dead(root))
                return 0;
 
        if (root->reloc_root) {
@@ -1478,8 +1510,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
        struct btrfs_root_item *root_item;
        int ret;
 
-       if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
-           !root->reloc_root)
+       if (!have_reloc_root(root))
                goto out;
 
        reloc_root = root->reloc_root;
@@ -1489,6 +1520,11 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
        if (fs_info->reloc_ctl->merge_reloc_tree &&
            btrfs_root_refs(root_item) == 0) {
                set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
+               /*
+                * Mark the tree as dead before we change reloc_root so
+                * have_reloc_root will not touch it from now on.
+                */
+               smp_wmb();
                __del_reloc_root(reloc_root);
        }
 
@@ -2201,6 +2237,11 @@ static int clean_dirty_subvols(struct reloc_control *rc)
                                if (ret2 < 0 && !ret)
                                        ret = ret2;
                        }
+                       /*
+                        * Need barrier to ensure clear_bit() only happens after
+                        * root->reloc_root = NULL. Pairs with have_reloc_root.
+                        */
+                       smp_wmb();
                        clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
                        btrfs_put_fs_root(root);
                } else {
@@ -4718,7 +4759,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
        struct btrfs_root *root = pending->root;
        struct reloc_control *rc = root->fs_info->reloc_ctl;
 
-       if (!root->reloc_root || !rc)
+       if (!rc || !have_reloc_root(root))
                return;
 
        if (!rc->merge_reloc_tree)
@@ -4752,7 +4793,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
        struct reloc_control *rc = root->fs_info->reloc_ctl;
        int ret;
 
-       if (!root->reloc_root || !rc)
+       if (!rc || !have_reloc_root(root))
                return 0;
 
        rc = root->fs_info->reloc_ctl;
index 3b17b647d002f2c5cb747ade373500a0921f08b4..612411c74550f5d1776c318269249abb0b7953fc 100644 (file)
@@ -376,11 +376,13 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
                leaf = path->nodes[0];
                ref = btrfs_item_ptr(leaf, path->slots[0],
                                     struct btrfs_root_ref);
-
-               WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
-               WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
                ptr = (unsigned long)(ref + 1);
-               WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
+               if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
+                   (btrfs_root_ref_name_len(leaf, ref) != name_len) ||
+                   memcmp_extent_buffer(leaf, name, ptr, name_len)) {
+                       err = -ENOENT;
+                       goto out;
+               }
                *sequence = btrfs_root_ref_sequence(leaf, ref);
 
                ret = btrfs_del_item(trans, tree_root, path);
index a6d3f08bfff37bae0676ee7f0a12892dddfb48c0..9b78e720c6973c6e141fff069e212cd388fd58f8 100644 (file)
@@ -3881,7 +3881,11 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
                }
        }
 
-       num_devices = btrfs_num_devices(fs_info);
+       /*
+        * rw_devices will not change at the moment, device add/delete/replace
+        * are excluded by EXCL_OP
+        */
+       num_devices = fs_info->fs_devices->rw_devices;
 
        /*
         * SINGLE profile on-disk has no profile bit, but in-memory we have a
index d8c7242426bb349782c9c7b7fd106ea4b8f12eb5..18a87ec8a465bbf1dccee2cd59e29a3c180fcb5c 100644 (file)
@@ -3031,11 +3031,9 @@ static void end_bio_bh_io_sync(struct bio *bio)
  * errors, this only handles the "we need to be able to
  * do IO at the final sector" case.
  */
-void guard_bio_eod(int op, struct bio *bio)
+void guard_bio_eod(struct bio *bio)
 {
        sector_t maxsector;
-       struct bio_vec *bvec = bio_last_bvec_all(bio);
-       unsigned truncated_bytes;
        struct hd_struct *part;
 
        rcu_read_lock();
@@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio)
        if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
                return;
 
-       /* Uhhuh. We've got a bio that straddles the device size! */
-       truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
-
-       /*
-        * The bio contains more than one segment which spans EOD, just return
-        * and let IO layer turn it into an EIO
-        */
-       if (truncated_bytes > bvec->bv_len)
-               return;
-
-       /* Truncate the bio.. */
-       bio->bi_iter.bi_size -= truncated_bytes;
-       bvec->bv_len -= truncated_bytes;
-
-       /* ..and clear the end of the buffer for reads */
-       if (op == REQ_OP_READ) {
-               struct bio_vec bv;
-
-               mp_bvec_last_segment(bvec, &bv);
-               zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
-                               truncated_bytes);
-       }
+       bio_truncate(bio, maxsector << 9);
 }
 
 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@@ -3118,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
 
-       /* Take care of bh's that straddle the end of the device */
-       guard_bio_eod(op, bio);
-
        if (buffer_meta(bh))
                op_flags |= REQ_META;
        if (buffer_prio(bh))
                op_flags |= REQ_PRIO;
        bio_set_op_attrs(bio, op, op_flags);
 
+       /* Take care of bh's that straddle the end of the device */
+       guard_bio_eod(bio);
+
        if (wbc) {
                wbc_init_bio(wbc, bio);
                wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
index 00dfe17871ac2fa4c98f23db35010d89967de96d..c5e6eff5a38164e2dda4b20e1918c287233be1b3 100644 (file)
@@ -352,7 +352,7 @@ static struct kobject *cdev_get(struct cdev *p)
 
        if (owner && !try_module_get(owner))
                return NULL;
-       kobj = kobject_get(&p->kobj);
+       kobj = kobject_get_unless_zero(&p->kobj);
        if (!kobj)
                module_put(owner);
        return kobj;
index ce9bac756c2a1a5a36ba0b3d8a4715fded4d3bff..40705e86245190474322c86241311e487560b15c 100644 (file)
@@ -1693,6 +1693,7 @@ struct cifs_fattr {
        struct timespec64 cf_atime;
        struct timespec64 cf_mtime;
        struct timespec64 cf_ctime;
+       u32             cf_cifstag;
 };
 
 static inline void free_dfs_info_param(struct dfs_info3_param *param)
index 3925a7bfc74d61c6c85f8ab6537498f064512870..d17587c2c4abb67ed9586715ab9313041bb58932 100644 (file)
@@ -139,6 +139,28 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
        dput(dentry);
 }
 
+static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
+{
+       if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
+               return false;
+       /*
+        * The DFS tags should be only intepreted by server side as per
+        * MS-FSCC 2.1.2.1, but let's include them anyway.
+        *
+        * Besides, if cf_cifstag is unset (0), then we still need it to be
+        * revalidated to know exactly what reparse point it is.
+        */
+       switch (fattr->cf_cifstag) {
+       case IO_REPARSE_TAG_DFS:
+       case IO_REPARSE_TAG_DFSR:
+       case IO_REPARSE_TAG_SYMLINK:
+       case IO_REPARSE_TAG_NFS:
+       case 0:
+               return true;
+       }
+       return false;
+}
+
 static void
 cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
 {
@@ -158,7 +180,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
         * is a symbolic link, DFS referral or a reparse point with a direct
         * access like junctions, deduplicated files, NFS symlinks.
         */
-       if (fattr->cf_cifsattrs & ATTR_REPARSE)
+       if (reparse_file_needs_reval(fattr))
                fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
 
        /* non-unix readdir doesn't provide nlink */
@@ -194,19 +216,37 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
        }
 }
 
+static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
+{
+       const FILE_DIRECTORY_INFO *fi = info;
+
+       memset(fattr, 0, sizeof(*fattr));
+       fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
+       fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
+       fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
+       fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
+       fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
+       fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
+       fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
+}
+
 void
 cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
                       struct cifs_sb_info *cifs_sb)
 {
-       memset(fattr, 0, sizeof(*fattr));
-       fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
-       fattr->cf_eof = le64_to_cpu(info->EndOfFile);
-       fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
-       fattr->cf_createtime = le64_to_cpu(info->CreationTime);
-       fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
-       fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
-       fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+       __dir_info_to_fattr(fattr, info);
+       cifs_fill_common_info(fattr, cifs_sb);
+}
 
+static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+                                      SEARCH_ID_FULL_DIR_INFO *info,
+                                      struct cifs_sb_info *cifs_sb)
+{
+       __dir_info_to_fattr(fattr, info);
+
+       /* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */
+       if (fattr->cf_cifsattrs & ATTR_REPARSE)
+               fattr->cf_cifstag = le32_to_cpu(info->EaSize);
        cifs_fill_common_info(fattr, cifs_sb);
 }
 
@@ -755,6 +795,11 @@ static int cifs_filldir(char *find_entry, struct file *file,
                                       (FIND_FILE_STANDARD_INFO *)find_entry,
                                       cifs_sb);
                break;
+       case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+               cifs_fulldir_info_to_fattr(&fattr,
+                                          (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+                                          cifs_sb);
+               break;
        default:
                cifs_dir_info_to_fattr(&fattr,
                                       (FILE_DIRECTORY_INFO *)find_entry,
index 8b0b512c57920cebd5a022c2357a31e376cf87ed..afe1f03aabe386cab41a9a4f502115b1d5ded406 100644 (file)
@@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                goto out;
 
 
-        if (oparms->tcon->use_resilient) {
+       if (oparms->tcon->use_resilient) {
                /* default timeout is 0, servers pick default (120 seconds) */
                nr_ioctl_req.Timeout =
                        cpu_to_le32(oparms->tcon->handle_timeout);
index 0ec4f270139f6774d97700a79bd6d0be52752ff1..00b4d15bb811af2506a493d3f889bbee7dd179b5 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/atomic.h>
 #include <linux/prefetch.h>
 
+#include "internal.h"
+
 /*
  * How many user pages to map in one call to get_user_pages().  This determines
  * the size of a structure in the slab cache
index 2f4fcf985079d52400e7d58b732b7b189133b36c..3da91a112babe874af392635a32e971d8885937f 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -960,7 +960,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
        return ksys_dup3(oldfd, newfd, 0);
 }
 
-SYSCALL_DEFINE1(dup, unsigned int, fildes)
+int ksys_dup(unsigned int fildes)
 {
        int ret = -EBADF;
        struct file *file = fget_raw(fildes);
@@ -975,6 +975,11 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
        return ret;
 }
 
+SYSCALL_DEFINE1(dup, unsigned int, fildes)
+{
+       return ksys_dup(fildes);
+}
+
 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
 {
        int err;
index a63d779eac10409b6e44a00c70ed7bf8d6acb832..ce715380143cd890057605575323ecddc01706d1 100644 (file)
@@ -882,6 +882,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
        struct fuse_args_pages *ap = &ia->ap;
        loff_t pos = page_offset(ap->pages[0]);
        size_t count = ap->num_pages << PAGE_SHIFT;
+       ssize_t res;
        int err;
 
        ap->args.out_pages = true;
@@ -896,7 +897,8 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
                if (!err)
                        return;
        } else {
-               err = fuse_simple_request(fc, &ap->args);
+               res = fuse_simple_request(fc, &ap->args);
+               err = res < 0 ? res : 0;
        }
        fuse_readpages_end(fc, &ap->args, err);
 }
index d5c2a315861064b459a0adef1d6d3e490d05dbff..a66e425884d14e000aa2f85b85c776cae0d347c0 100644 (file)
@@ -1498,8 +1498,10 @@ static int __init init_hugetlbfs_fs(void)
        /* other hstates are optional */
        i = 0;
        for_each_hstate(h) {
-               if (i == default_hstate_idx)
+               if (i == default_hstate_idx) {
+                       i++;
                        continue;
+               }
 
                mnt = mount_one_hugetlbfs(h);
                if (IS_ERR(mnt))
index 4a7da1df573da9070fa352c1ba5d538b61aaecfe..e3fa69544b66c0bd18e99a023ccf0f32f7b8e009 100644 (file)
@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
 /*
  * buffer.c
  */
-extern void guard_bio_eod(int rw, struct bio *bio);
+extern void guard_bio_eod(struct bio *bio);
 extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
                get_block_t *get_block, struct iomap *iomap);
 
index 11e80b7252a82cefaf12d56e792f9b679e2f5a13..5147d2213b019f9b80841a0e14a1709ba719bf69 100644 (file)
@@ -92,7 +92,6 @@ struct io_wqe {
        struct io_wqe_acct acct[2];
 
        struct hlist_nulls_head free_list;
-       struct hlist_nulls_head busy_list;
        struct list_head all_list;
 
        struct io_wq *wq;
@@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
        if (worker->flags & IO_WORKER_F_FREE) {
                worker->flags &= ~IO_WORKER_F_FREE;
                hlist_nulls_del_init_rcu(&worker->nulls_node);
-               hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
        }
 
        /*
@@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
 {
        if (!(worker->flags & IO_WORKER_F_FREE)) {
                worker->flags |= IO_WORKER_F_FREE;
-               hlist_nulls_del_init_rcu(&worker->nulls_node);
                hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
        }
 
@@ -432,6 +429,8 @@ static void io_worker_handle_work(struct io_worker *worker)
                if (signal_pending(current))
                        flush_signals(current);
 
+               cond_resched();
+
                spin_lock_irq(&worker->lock);
                worker->cur_work = work;
                spin_unlock_irq(&worker->lock);
@@ -446,10 +445,14 @@ static void io_worker_handle_work(struct io_worker *worker)
                        task_unlock(current);
                }
                if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
-                   wq->mm && mmget_not_zero(wq->mm)) {
-                       use_mm(wq->mm);
-                       set_fs(USER_DS);
-                       worker->mm = wq->mm;
+                   wq->mm) {
+                       if (mmget_not_zero(wq->mm)) {
+                               use_mm(wq->mm);
+                               set_fs(USER_DS);
+                               worker->mm = wq->mm;
+                       } else {
+                               work->flags |= IO_WQ_WORK_CANCEL;
+                       }
                }
                if (!worker->creds)
                        worker->creds = override_creds(wq->creds);
@@ -798,10 +801,6 @@ void io_wq_cancel_all(struct io_wq *wq)
 
        set_bit(IO_WQ_BIT_CANCEL, &wq->state);
 
-       /*
-        * Browse both lists, as there's a gap between handing work off
-        * to a worker and the worker putting itself on the busy_list
-        */
        rcu_read_lock();
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
@@ -1049,7 +1048,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
                spin_lock_init(&wqe->lock);
                INIT_WQ_LIST(&wqe->work_list);
                INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
-               INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
                INIT_LIST_HEAD(&wqe->all_list);
        }
 
index 6f084e3cf835124e4c138abbfcf5c217a0893693..187dd94fd6b124a15a45c16ee68142b08b7f52c9 100644 (file)
@@ -330,6 +330,26 @@ struct io_timeout {
        struct file                     *file;
        u64                             addr;
        int                             flags;
+       unsigned                        count;
+};
+
+struct io_rw {
+       /* NOTE: kiocb has the file as the first member, so don't do it here */
+       struct kiocb                    kiocb;
+       u64                             addr;
+       u64                             len;
+};
+
+struct io_connect {
+       struct file                     *file;
+       struct sockaddr __user          *addr;
+       int                             addr_len;
+};
+
+struct io_sr_msg {
+       struct file                     *file;
+       struct user_msghdr __user       *msg;
+       int                             msg_flags;
 };
 
 struct io_async_connect {
@@ -351,7 +371,6 @@ struct io_async_rw {
 };
 
 struct io_async_ctx {
-       struct io_uring_sqe             sqe;
        union {
                struct io_async_rw      rw;
                struct io_async_msghdr  msg;
@@ -369,15 +388,16 @@ struct io_async_ctx {
 struct io_kiocb {
        union {
                struct file             *file;
-               struct kiocb            rw;
+               struct io_rw            rw;
                struct io_poll_iocb     poll;
                struct io_accept        accept;
                struct io_sync          sync;
                struct io_cancel        cancel;
                struct io_timeout       timeout;
+               struct io_connect       connect;
+               struct io_sr_msg        sr_msg;
        };
 
-       const struct io_uring_sqe       *sqe;
        struct io_async_ctx             *io;
        struct file                     *ring_file;
        int                             ring_fd;
@@ -411,7 +431,6 @@ struct io_kiocb {
 #define REQ_F_INFLIGHT         16384   /* on inflight list */
 #define REQ_F_COMP_LOCKED      32768   /* completion under lock */
 #define REQ_F_HARDLINK         65536   /* doesn't sever on completion < 0 */
-#define REQ_F_PREPPED          131072  /* request already opcode prepared */
        u64                     user_data;
        u32                     result;
        u32                     sequence;
@@ -609,33 +628,31 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
 {
        bool do_hashed = false;
 
-       if (req->sqe) {
-               switch (req->opcode) {
-               case IORING_OP_WRITEV:
-               case IORING_OP_WRITE_FIXED:
-                       /* only regular files should be hashed for writes */
-                       if (req->flags & REQ_F_ISREG)
-                               do_hashed = true;
-                       /* fall-through */
-               case IORING_OP_READV:
-               case IORING_OP_READ_FIXED:
-               case IORING_OP_SENDMSG:
-               case IORING_OP_RECVMSG:
-               case IORING_OP_ACCEPT:
-               case IORING_OP_POLL_ADD:
-               case IORING_OP_CONNECT:
-                       /*
-                        * We know REQ_F_ISREG is not set on some of these
-                        * opcodes, but this enables us to keep the check in
-                        * just one place.
-                        */
-                       if (!(req->flags & REQ_F_ISREG))
-                               req->work.flags |= IO_WQ_WORK_UNBOUND;
-                       break;
-               }
-               if (io_req_needs_user(req))
-                       req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+       switch (req->opcode) {
+       case IORING_OP_WRITEV:
+       case IORING_OP_WRITE_FIXED:
+               /* only regular files should be hashed for writes */
+               if (req->flags & REQ_F_ISREG)
+                       do_hashed = true;
+               /* fall-through */
+       case IORING_OP_READV:
+       case IORING_OP_READ_FIXED:
+       case IORING_OP_SENDMSG:
+       case IORING_OP_RECVMSG:
+       case IORING_OP_ACCEPT:
+       case IORING_OP_POLL_ADD:
+       case IORING_OP_CONNECT:
+               /*
+                * We know REQ_F_ISREG is not set on some of these
+                * opcodes, but this enables us to keep the check in
+                * just one place.
+                */
+               if (!(req->flags & REQ_F_ISREG))
+                       req->work.flags |= IO_WQ_WORK_UNBOUND;
+               break;
        }
+       if (io_req_needs_user(req))
+               req->work.flags |= IO_WQ_WORK_NEEDS_USER;
 
        *link = io_prep_linked_timeout(req);
        return do_hashed;
@@ -1180,7 +1197,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
 
        ret = 0;
        list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
-               struct kiocb *kiocb = &req->rw;
+               struct kiocb *kiocb = &req->rw.kiocb;
 
                /*
                 * Move completed entries to our local list. If we find a
@@ -1335,7 +1352,7 @@ static inline void req_set_fail_links(struct io_kiocb *req)
 
 static void io_complete_rw_common(struct kiocb *kiocb, long res)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
@@ -1347,7 +1364,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        io_complete_rw_common(kiocb, res);
        io_put_req(req);
@@ -1355,7 +1372,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 
 static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_kiocb *nxt = NULL;
 
        io_complete_rw_common(kiocb, res);
@@ -1366,7 +1383,7 @@ static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
@@ -1400,7 +1417,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
 
                list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
                                                list);
-               if (list_req->rw.ki_filp != req->rw.ki_filp)
+               if (list_req->file != req->file)
                        ctx->poll_multi_file = true;
        }
 
@@ -1471,11 +1488,11 @@ static bool io_file_supports_async(struct file *file)
        return false;
 }
 
-static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     bool force_nonblock)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        unsigned ioprio;
        int ret;
 
@@ -1524,6 +1541,12 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
                        return -EINVAL;
                kiocb->ki_complete = io_complete_rw;
        }
+
+       req->rw.addr = READ_ONCE(sqe->addr);
+       req->rw.len = READ_ONCE(sqe->len);
+       /* we own ->private, reuse it for the buffer index */
+       req->rw.kiocb.private = (void *) (unsigned long)
+                                       READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -1557,11 +1580,11 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
                io_rw_done(kiocb, ret);
 }
 
-static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
-                              const struct io_uring_sqe *sqe,
+static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
                               struct iov_iter *iter)
 {
-       size_t len = READ_ONCE(sqe->len);
+       struct io_ring_ctx *ctx = req->ctx;
+       size_t len = req->rw.len;
        struct io_mapped_ubuf *imu;
        unsigned index, buf_index;
        size_t offset;
@@ -1571,13 +1594,13 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
        if (unlikely(!ctx->user_bufs))
                return -EFAULT;
 
-       buf_index = READ_ONCE(sqe->buf_index);
+       buf_index = (unsigned long) req->rw.kiocb.private;
        if (unlikely(buf_index >= ctx->nr_user_bufs))
                return -EFAULT;
 
        index = array_index_nospec(buf_index, ctx->nr_user_bufs);
        imu = &ctx->user_bufs[index];
-       buf_addr = READ_ONCE(sqe->addr);
+       buf_addr = req->rw.addr;
 
        /* overflow */
        if (buf_addr + len < buf_addr)
@@ -1634,25 +1657,20 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
                               struct iovec **iovec, struct iov_iter *iter)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-       void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
-       size_t sqe_len = READ_ONCE(sqe->len);
+       void __user *buf = u64_to_user_ptr(req->rw.addr);
+       size_t sqe_len = req->rw.len;
        u8 opcode;
 
-       /*
-        * We're reading ->opcode for the second time, but the first read
-        * doesn't care whether it's _FIXED or not, so it doesn't matter
-        * whether ->opcode changes concurrently. The first read does care
-        * about whether it is a READ or a WRITE, so we don't trust this read
-        * for that purpose and instead let the caller pass in the read/write
-        * flag.
-        */
        opcode = req->opcode;
        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
                *iovec = NULL;
-               return io_import_fixed(req->ctx, rw, sqe, iter);
+               return io_import_fixed(req, rw, iter);
        }
 
+       /* buffer index only valid with fixed read/write */
+       if (req->rw.kiocb.private)
+               return -EINVAL;
+
        if (req->io) {
                struct io_async_rw *iorw = &req->io->rw;
 
@@ -1750,13 +1768,7 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
 static int io_alloc_async_ctx(struct io_kiocb *req)
 {
        req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
-       if (req->io) {
-               memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe));
-               req->sqe = &req->io->sqe;
-               return 0;
-       }
-
-       return 1;
+       return req->io == NULL;
 }
 
 static void io_rw_async(struct io_wq_work **workptr)
@@ -1774,6 +1786,9 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
                             struct iovec *iovec, struct iovec *fast_iov,
                             struct iov_iter *iter)
 {
+       if (req->opcode == IORING_OP_READ_FIXED ||
+           req->opcode == IORING_OP_WRITE_FIXED)
+               return 0;
        if (!req->io && io_alloc_async_ctx(req))
                return -ENOMEM;
 
@@ -1782,46 +1797,53 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
        return 0;
 }
 
-static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
-                       struct iov_iter *iter, bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       ret = io_prep_rw(req, force_nonblock);
+       ret = io_prep_rw(req, sqe, force_nonblock);
        if (ret)
                return ret;
 
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
 
-       return io_import_iovec(READ, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
                   bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter iter;
-       struct file *file;
        size_t iov_count;
        ssize_t io_size, ret;
 
-       if (!req->io) {
-               ret = io_read_prep(req, &iovec, &iter, force_nonblock);
-               if (ret < 0)
-                       return ret;
-       } else {
-               ret = io_import_iovec(READ, req, &iovec, &iter);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = io_import_iovec(READ, req, &iovec, &iter);
+       if (ret < 0)
+               return ret;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
-               req->rw.ki_flags &= ~IOCB_NOWAIT;
+               req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
 
-       file = req->file;
+       req->result = 0;
        io_size = ret;
        if (req->flags & REQ_F_LINK)
                req->result = io_size;
@@ -1830,33 +1852,21 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(file)) {
+       if (force_nonblock && !io_file_supports_async(req->file)) {
                req->flags |= REQ_F_MUST_PUNT;
                goto copy_iov;
        }
 
        iov_count = iov_iter_count(&iter);
-       ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
+       ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
        if (!ret) {
                ssize_t ret2;
 
-               if (file->f_op->read_iter)
-                       ret2 = call_read_iter(file, kiocb, &iter);
+               if (req->file->f_op->read_iter)
+                       ret2 = call_read_iter(req->file, kiocb, &iter);
                else
-                       ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+                       ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
 
-               /*
-                * In case of a short read, punt to async. This can happen
-                * if we have data partially cached. Alternatively we can
-                * return the short read, in which case the application will
-                * need to issue another SQE and wait for it. That SQE will
-                * need async punt anyway, so it's more efficient to do it
-                * here.
-                */
-               if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
-                   (req->flags & REQ_F_ISREG) &&
-                   ret2 > 0 && ret2 < io_size)
-                       ret2 = -EAGAIN;
                /* Catch -EAGAIN return for forced non-blocking submission */
                if (!force_nonblock || ret2 != -EAGAIN) {
                        kiocb_done(kiocb, ret2, nxt, req->in_async);
@@ -1875,46 +1885,53 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
        return ret;
 }
 
-static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
-                        struct iov_iter *iter, bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                        bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       ret = io_prep_rw(req, force_nonblock);
+       ret = io_prep_rw(req, sqe, force_nonblock);
        if (ret)
                return ret;
 
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
 
-       return io_import_iovec(WRITE, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                    bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter iter;
-       struct file *file;
        size_t iov_count;
        ssize_t ret, io_size;
 
-       if (!req->io) {
-               ret = io_write_prep(req, &iovec, &iter, force_nonblock);
-               if (ret < 0)
-                       return ret;
-       } else {
-               ret = io_import_iovec(WRITE, req, &iovec, &iter);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = io_import_iovec(WRITE, req, &iovec, &iter);
+       if (ret < 0)
+               return ret;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
-               req->rw.ki_flags &= ~IOCB_NOWAIT;
+               req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
 
-       file = kiocb->ki_filp;
+       req->result = 0;
        io_size = ret;
        if (req->flags & REQ_F_LINK)
                req->result = io_size;
@@ -1934,7 +1951,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                goto copy_iov;
 
        iov_count = iov_iter_count(&iter);
-       ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
+       ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
        if (!ret) {
                ssize_t ret2;
 
@@ -1946,17 +1963,17 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                 * we return to userspace.
                 */
                if (req->flags & REQ_F_ISREG) {
-                       __sb_start_write(file_inode(file)->i_sb,
+                       __sb_start_write(file_inode(req->file)->i_sb,
                                                SB_FREEZE_WRITE, true);
-                       __sb_writers_release(file_inode(file)->i_sb,
+                       __sb_writers_release(file_inode(req->file)->i_sb,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
 
-               if (file->f_op->write_iter)
-                       ret2 = call_write_iter(file, kiocb, &iter);
+               if (req->file->f_op->write_iter)
+                       ret2 = call_write_iter(req->file, kiocb, &iter);
                else
-                       ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
+                       ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
                if (!force_nonblock || ret2 != -EAGAIN) {
                        kiocb_done(kiocb, ret2, nxt, req->in_async);
                } else {
@@ -1989,13 +2006,10 @@ static int io_nop(struct io_kiocb *req)
        return 0;
 }
 
-static int io_prep_fsync(struct io_kiocb *req)
+static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (!req->file)
                return -EBADF;
 
@@ -2010,7 +2024,6 @@ static int io_prep_fsync(struct io_kiocb *req)
 
        req->sync.off = READ_ONCE(sqe->off);
        req->sync.len = READ_ONCE(sqe->len);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2026,6 +2039,28 @@ static bool io_req_cancelled(struct io_kiocb *req)
        return false;
 }
 
+static void io_link_work_cb(struct io_wq_work **workptr)
+{
+       struct io_wq_work *work = *workptr;
+       struct io_kiocb *link = work->data;
+
+       io_queue_linked_timeout(link);
+       work->func = io_wq_submit_work;
+}
+
+static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
+{
+       struct io_kiocb *link;
+
+       io_prep_async_work(nxt, &link);
+       *workptr = &nxt->work;
+       if (link) {
+               nxt->work.flags |= IO_WQ_WORK_CB;
+               nxt->work.func = io_link_work_cb;
+               nxt->work.data = link;
+       }
+}
+
 static void io_fsync_finish(struct io_wq_work **workptr)
 {
        struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
@@ -2036,7 +2071,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
        if (io_req_cancelled(req))
                return;
 
-       ret = vfs_fsync_range(req->rw.ki_filp, req->sync.off,
+       ret = vfs_fsync_range(req->file, req->sync.off,
                                end > 0 ? end : LLONG_MAX,
                                req->sync.flags & IORING_FSYNC_DATASYNC);
        if (ret < 0)
@@ -2044,18 +2079,13 @@ static void io_fsync_finish(struct io_wq_work **workptr)
        io_cqring_add_event(req, ret);
        io_put_req_find_next(req, &nxt);
        if (nxt)
-               *workptr = &nxt->work;
+               io_wq_assign_next(workptr, nxt);
 }
 
 static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
                    bool force_nonblock)
 {
        struct io_wq_work *work, *old_work;
-       int ret;
-
-       ret = io_prep_fsync(req);
-       if (ret)
-               return ret;
 
        /* fsync always requires a blocking context */
        if (force_nonblock) {
@@ -2071,13 +2101,10 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
        return 0;
 }
 
-static int io_prep_sfr(struct io_kiocb *req)
+static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (!req->file)
                return -EBADF;
 
@@ -2089,7 +2116,6 @@ static int io_prep_sfr(struct io_kiocb *req)
        req->sync.off = READ_ONCE(sqe->off);
        req->sync.len = READ_ONCE(sqe->len);
        req->sync.flags = READ_ONCE(sqe->sync_range_flags);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2102,25 +2128,20 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
        if (io_req_cancelled(req))
                return;
 
-       ret = sync_file_range(req->rw.ki_filp, req->sync.off, req->sync.len,
+       ret = sync_file_range(req->file, req->sync.off, req->sync.len,
                                req->sync.flags);
        if (ret < 0)
                req_set_fail_links(req);
        io_cqring_add_event(req, ret);
        io_put_req_find_next(req, &nxt);
        if (nxt)
-               *workptr = &nxt->work;
+               io_wq_assign_next(workptr, nxt);
 }
 
 static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
                              bool force_nonblock)
 {
        struct io_wq_work *work, *old_work;
-       int ret;
-
-       ret = io_prep_sfr(req);
-       if (ret)
-               return ret;
 
        /* sync_file_range always requires a blocking context */
        if (force_nonblock) {
@@ -2149,19 +2170,23 @@ static void io_sendrecv_async(struct io_wq_work **workptr)
 }
 #endif
 
-static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct user_msghdr __user *msg;
-       unsigned flags;
+       struct io_sr_msg *sr = &req->sr_msg;
+       struct io_async_ctx *io = req->io;
+
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+       if (!io)
+               return 0;
 
-       flags = READ_ONCE(sqe->msg_flags);
-       msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
        io->msg.iov = io->msg.fast_iov;
-       return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov);
+       return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+                                       &io->msg.iov);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
@@ -2169,7 +2194,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                      bool force_nonblock)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
@@ -2183,12 +2207,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                struct sockaddr_storage addr;
                unsigned flags;
 
-               flags = READ_ONCE(sqe->msg_flags);
-               if (flags & MSG_DONTWAIT)
-                       req->flags |= REQ_F_NOWAIT;
-               else if (force_nonblock)
-                       flags |= MSG_DONTWAIT;
-
                if (req->io) {
                        kmsg = &req->io->msg;
                        kmsg->msg.msg_name = &addr;
@@ -2197,13 +2215,24 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                                kmsg->iov = kmsg->fast_iov;
                        kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
+                       struct io_sr_msg *sr = &req->sr_msg;
+
                        kmsg = &io.msg;
                        kmsg->msg.msg_name = &addr;
-                       ret = io_sendmsg_prep(req, &io);
+
+                       io.msg.iov = io.msg.fast_iov;
+                       ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
+               flags = req->sr_msg.msg_flags;
+               if (flags & MSG_DONTWAIT)
+                       req->flags |= REQ_F_NOWAIT;
+               else if (force_nonblock)
+                       flags |= MSG_DONTWAIT;
+
                ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
                if (force_nonblock && ret == -EAGAIN) {
                        if (req->io)
@@ -2218,7 +2247,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                        ret = -EINTR;
        }
 
-out:
        if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
                kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
@@ -2231,20 +2259,24 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 #endif
 }
 
-static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_recvmsg_prep(struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct user_msghdr __user *msg;
-       unsigned flags;
+       struct io_sr_msg *sr = &req->sr_msg;
+       struct io_async_ctx *io = req->io;
+
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+       if (!io)
+               return 0;
 
-       flags = READ_ONCE(sqe->msg_flags);
-       msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
        io->msg.iov = io->msg.fast_iov;
-       return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr,
-                                       &io->msg.iov);
+       return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+                                       &io->msg.uaddr, &io->msg.iov);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
@@ -2252,7 +2284,6 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                      bool force_nonblock)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
@@ -2262,19 +2293,10 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 
        sock = sock_from_file(req->file, &ret);
        if (sock) {
-               struct user_msghdr __user *msg;
                struct io_async_ctx io;
                struct sockaddr_storage addr;
                unsigned flags;
 
-               flags = READ_ONCE(sqe->msg_flags);
-               if (flags & MSG_DONTWAIT)
-                       req->flags |= REQ_F_NOWAIT;
-               else if (force_nonblock)
-                       flags |= MSG_DONTWAIT;
-
-               msg = (struct user_msghdr __user *) (unsigned long)
-                       READ_ONCE(sqe->addr);
                if (req->io) {
                        kmsg = &req->io->msg;
                        kmsg->msg.msg_name = &addr;
@@ -2283,14 +2305,27 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                                kmsg->iov = kmsg->fast_iov;
                        kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
+                       struct io_sr_msg *sr = &req->sr_msg;
+
                        kmsg = &io.msg;
                        kmsg->msg.msg_name = &addr;
-                       ret = io_recvmsg_prep(req, &io);
+
+                       io.msg.iov = io.msg.fast_iov;
+                       ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.uaddr,
+                                       &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
-               ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags);
+               flags = req->sr_msg.msg_flags;
+               if (flags & MSG_DONTWAIT)
+                       req->flags |= REQ_F_NOWAIT;
+               else if (force_nonblock)
+                       flags |= MSG_DONTWAIT;
+
+               ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
+                                               kmsg->uaddr, flags);
                if (force_nonblock && ret == -EAGAIN) {
                        if (req->io)
                                return -EAGAIN;
@@ -2304,7 +2339,6 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
                        ret = -EINTR;
        }
 
-out:
        if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
                kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
@@ -2317,25 +2351,19 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
 #endif
 }
 
-static int io_accept_prep(struct io_kiocb *req)
+static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_accept *accept = &req->accept;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
-
        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
                return -EINVAL;
        if (sqe->ioprio || sqe->len || sqe->buf_index)
                return -EINVAL;
 
-       accept->addr = (struct sockaddr __user *)
-                               (unsigned long) READ_ONCE(sqe->addr);
-       accept->addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
+       accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 #else
        return -EOPNOTSUPP;
@@ -2373,7 +2401,7 @@ static void io_accept_finish(struct io_wq_work **workptr)
                return;
        __io_accept(req, &nxt, false);
        if (nxt)
-               *workptr = &nxt->work;
+               io_wq_assign_next(workptr, nxt);
 }
 #endif
 
@@ -2383,10 +2411,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 #if defined(CONFIG_NET)
        int ret;
 
-       ret = io_accept_prep(req);
-       if (ret)
-               return ret;
-
        ret = __io_accept(req, nxt, force_nonblock);
        if (ret == -EAGAIN && force_nonblock) {
                req->work.func = io_accept_finish;
@@ -2400,18 +2424,27 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 #endif
 }
 
-static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct sockaddr __user *addr;
-       int addr_len;
+       struct io_connect *conn = &req->connect;
+       struct io_async_ctx *io = req->io;
 
-       addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
-       addr_len = READ_ONCE(sqe->addr2);
-       return move_addr_to_kernel(addr, addr_len, &io->connect.address);
+       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+               return -EINVAL;
+
+       conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       conn->addr_len =  READ_ONCE(sqe->addr2);
+
+       if (!io)
+               return 0;
+
+       return move_addr_to_kernel(conn->addr, conn->addr_len,
+                                       &io->connect.address);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
@@ -2419,30 +2452,25 @@ static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
                      bool force_nonblock)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_async_ctx __io, *io;
        unsigned file_flags;
-       int addr_len, ret;
-
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
-               return -EINVAL;
-
-       addr_len = READ_ONCE(sqe->addr2);
-       file_flags = force_nonblock ? O_NONBLOCK : 0;
+       int ret;
 
        if (req->io) {
                io = req->io;
        } else {
-               ret = io_connect_prep(req, &__io);
+               ret = move_addr_to_kernel(req->connect.addr,
+                                               req->connect.addr_len,
+                                               &__io.connect.address);
                if (ret)
                        goto out;
                io = &__io;
        }
 
-       ret = __sys_connect_file(req->file, &io->connect.address, addr_len,
-                                       file_flags);
+       file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+       ret = __sys_connect_file(req->file, &io->connect.address,
+                                       req->connect.addr_len, file_flags);
        if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
                if (req->io)
                        return -EAGAIN;
@@ -2513,12 +2541,9 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
        return -ENOENT;
 }
 
-static int io_poll_remove_prep(struct io_kiocb *req)
+static int io_poll_remove_prep(struct io_kiocb *req,
+                              const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
@@ -2526,7 +2551,6 @@ static int io_poll_remove_prep(struct io_kiocb *req)
                return -EINVAL;
 
        req->poll.addr = READ_ONCE(sqe->addr);
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2540,10 +2564,6 @@ static int io_poll_remove(struct io_kiocb *req)
        u64 addr;
        int ret;
 
-       ret = io_poll_remove_prep(req);
-       if (ret)
-               return ret;
-
        addr = req->poll.addr;
        spin_lock_irq(&ctx->completion_lock);
        ret = io_poll_cancel(ctx, addr);
@@ -2612,7 +2632,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
                req_set_fail_links(req);
        io_put_req_find_next(req, &nxt);
        if (nxt)
-               *workptr = &nxt->work;
+               io_wq_assign_next(workptr, nxt);
 }
 
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -2681,14 +2701,11 @@ static void io_poll_req_insert(struct io_kiocb *req)
        hlist_add_head(&req->hash_node, list);
 }
 
-static int io_poll_add_prep(struct io_kiocb *req)
+static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_poll_iocb *poll = &req->poll;
        u16 events;
 
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
@@ -2696,7 +2713,6 @@ static int io_poll_add_prep(struct io_kiocb *req)
        if (!poll->file)
                return -EBADF;
 
-       req->flags |= REQ_F_PREPPED;
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
        return 0;
@@ -2709,11 +2725,6 @@ static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
        struct io_poll_table ipt;
        bool cancel = false;
        __poll_t mask;
-       int ret;
-
-       ret = io_poll_add_prep(req);
-       if (ret)
-               return ret;
 
        INIT_IO_WORK(&req->work, io_poll_complete_work);
        INIT_HLIST_NODE(&req->hash_node);
@@ -2832,12 +2843,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
        return 0;
 }
 
-static int io_timeout_remove_prep(struct io_kiocb *req)
+static int io_timeout_remove_prep(struct io_kiocb *req,
+                                 const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
@@ -2848,7 +2856,6 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
        if (req->timeout.flags)
                return -EINVAL;
 
-       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -2860,10 +2867,6 @@ static int io_timeout_remove(struct io_kiocb *req)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_timeout_remove_prep(req);
-       if (ret)
-               return ret;
-
        spin_lock_irq(&ctx->completion_lock);
        ret = io_timeout_cancel(ctx, req->timeout.addr);
 
@@ -2877,10 +2880,9 @@ static int io_timeout_remove(struct io_kiocb *req)
        return 0;
 }
 
-static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
+static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                           bool is_timeout_link)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_timeout_data *data;
        unsigned flags;
 
@@ -2894,7 +2896,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
        if (flags & ~IORING_TIMEOUT_ABS)
                return -EINVAL;
 
-       data = &io->timeout;
+       req->timeout.count = READ_ONCE(sqe->off);
+
+       if (!req->io && io_alloc_async_ctx(req))
+               return -ENOMEM;
+
+       data = &req->io->timeout;
        data->req = req;
        req->flags |= REQ_F_TIMEOUT;
 
@@ -2912,21 +2919,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
 
 static int io_timeout(struct io_kiocb *req)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_timeout_data *data;
        struct list_head *entry;
        unsigned span = 0;
-       int ret;
 
-       if (!req->io) {
-               if (io_alloc_async_ctx(req))
-                       return -ENOMEM;
-               ret = io_timeout_prep(req, req->io, false);
-               if (ret)
-                       return ret;
-       }
        data = &req->io->timeout;
 
        /*
@@ -2934,7 +2932,7 @@ static int io_timeout(struct io_kiocb *req)
         * timeout event to be satisfied. If it isn't set, then this is
         * a pure timeout request, sequence isn't used.
         */
-       count = READ_ONCE(sqe->off);
+       count = req->timeout.count;
        if (!count) {
                req->flags |= REQ_F_TIMEOUT_NOSEQ;
                spin_lock_irq(&ctx->completion_lock);
@@ -3052,19 +3050,15 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
        io_put_req_find_next(req, nxt);
 }
 
-static int io_async_cancel_prep(struct io_kiocb *req)
+static int io_async_cancel_prep(struct io_kiocb *req,
+                               const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (req->flags & REQ_F_PREPPED)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
            sqe->cancel_flags)
                return -EINVAL;
 
-       req->flags |= REQ_F_PREPPED;
        req->cancel.addr = READ_ONCE(sqe->addr);
        return 0;
 }
@@ -3072,21 +3066,14 @@ static int io_async_cancel_prep(struct io_kiocb *req)
 static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       int ret;
-
-       ret = io_async_cancel_prep(req);
-       if (ret)
-               return ret;
 
        io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
        return 0;
 }
 
-static int io_req_defer_prep(struct io_kiocb *req)
+static int io_req_defer_prep(struct io_kiocb *req,
+                            const struct io_uring_sqe *sqe)
 {
-       struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct io_async_ctx *io = req->io;
-       struct iov_iter iter;
        ssize_t ret = 0;
 
        switch (req->opcode) {
@@ -3094,61 +3081,47 @@ static int io_req_defer_prep(struct io_kiocb *req)
                break;
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
-               /* ensure prep does right import */
-               req->io = NULL;
-               ret = io_read_prep(req, &iovec, &iter, true);
-               req->io = io;
-               if (ret < 0)
-                       break;
-               io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
-               ret = 0;
+               ret = io_read_prep(req, sqe, true);
                break;
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
-               /* ensure prep does right import */
-               req->io = NULL;
-               ret = io_write_prep(req, &iovec, &iter, true);
-               req->io = io;
-               if (ret < 0)
-                       break;
-               io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
-               ret = 0;
+               ret = io_write_prep(req, sqe, true);
                break;
        case IORING_OP_POLL_ADD:
-               ret = io_poll_add_prep(req);
+               ret = io_poll_add_prep(req, sqe);
                break;
        case IORING_OP_POLL_REMOVE:
-               ret = io_poll_remove_prep(req);
+               ret = io_poll_remove_prep(req, sqe);
                break;
        case IORING_OP_FSYNC:
-               ret = io_prep_fsync(req);
+               ret = io_prep_fsync(req, sqe);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
-               ret = io_prep_sfr(req);
+               ret = io_prep_sfr(req, sqe);
                break;
        case IORING_OP_SENDMSG:
-               ret = io_sendmsg_prep(req, io);
+               ret = io_sendmsg_prep(req, sqe);
                break;
        case IORING_OP_RECVMSG:
-               ret = io_recvmsg_prep(req, io);
+               ret = io_recvmsg_prep(req, sqe);
                break;
        case IORING_OP_CONNECT:
-               ret = io_connect_prep(req, io);
+               ret = io_connect_prep(req, sqe);
                break;
        case IORING_OP_TIMEOUT:
-               ret = io_timeout_prep(req, io, false);
+               ret = io_timeout_prep(req, sqe, false);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
-               ret = io_timeout_remove_prep(req);
+               ret = io_timeout_remove_prep(req, sqe);
                break;
        case IORING_OP_ASYNC_CANCEL:
-               ret = io_async_cancel_prep(req);
+               ret = io_async_cancel_prep(req, sqe);
                break;
        case IORING_OP_LINK_TIMEOUT:
-               ret = io_timeout_prep(req, io, true);
+               ret = io_timeout_prep(req, sqe, true);
                break;
        case IORING_OP_ACCEPT:
-               ret = io_accept_prep(req);
+               ret = io_accept_prep(req, sqe);
                break;
        default:
                printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
@@ -3160,7 +3133,7 @@ static int io_req_defer_prep(struct io_kiocb *req)
        return ret;
 }
 
-static int io_req_defer(struct io_kiocb *req)
+static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
@@ -3169,10 +3142,10 @@ static int io_req_defer(struct io_kiocb *req)
        if (!req_need_defer(req) && list_empty(&ctx->defer_list))
                return 0;
 
-       if (io_alloc_async_ctx(req))
+       if (!req->io && io_alloc_async_ctx(req))
                return -EAGAIN;
 
-       ret = io_req_defer_prep(req);
+       ret = io_req_defer_prep(req, sqe);
        if (ret < 0)
                return ret;
 
@@ -3188,9 +3161,8 @@ static int io_req_defer(struct io_kiocb *req)
        return -EIOCBQUEUED;
 }
 
-__attribute__((nonnull))
-static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
-                       bool force_nonblock)
+static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       struct io_kiocb **nxt, bool force_nonblock)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
@@ -3200,52 +3172,109 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
                ret = io_nop(req);
                break;
        case IORING_OP_READV:
-               if (unlikely(req->sqe->buf_index))
-                       return -EINVAL;
-               ret = io_read(req, nxt, force_nonblock);
-               break;
-       case IORING_OP_WRITEV:
-               if (unlikely(req->sqe->buf_index))
-                       return -EINVAL;
-               ret = io_write(req, nxt, force_nonblock);
-               break;
        case IORING_OP_READ_FIXED:
+               if (sqe) {
+                       ret = io_read_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_read(req, nxt, force_nonblock);
                break;
+       case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
+               if (sqe) {
+                       ret = io_write_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_write(req, nxt, force_nonblock);
                break;
        case IORING_OP_FSYNC:
+               if (sqe) {
+                       ret = io_prep_fsync(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_fsync(req, nxt, force_nonblock);
                break;
        case IORING_OP_POLL_ADD:
+               if (sqe) {
+                       ret = io_poll_add_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_poll_add(req, nxt);
                break;
        case IORING_OP_POLL_REMOVE:
+               if (sqe) {
+                       ret = io_poll_remove_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_poll_remove(req);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
+               if (sqe) {
+                       ret = io_prep_sfr(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_sync_file_range(req, nxt, force_nonblock);
                break;
        case IORING_OP_SENDMSG:
+               if (sqe) {
+                       ret = io_sendmsg_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_sendmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_RECVMSG:
+               if (sqe) {
+                       ret = io_recvmsg_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_recvmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_TIMEOUT:
+               if (sqe) {
+                       ret = io_timeout_prep(req, sqe, false);
+                       if (ret)
+                               break;
+               }
                ret = io_timeout(req);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
+               if (sqe) {
+                       ret = io_timeout_remove_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_timeout_remove(req);
                break;
        case IORING_OP_ACCEPT:
+               if (sqe) {
+                       ret = io_accept_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_accept(req, nxt, force_nonblock);
                break;
        case IORING_OP_CONNECT:
+               if (sqe) {
+                       ret = io_connect_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_connect(req, nxt, force_nonblock);
                break;
        case IORING_OP_ASYNC_CANCEL:
+               if (sqe) {
+                       ret = io_async_cancel_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_async_cancel(req, nxt);
                break;
        default:
@@ -3257,24 +3286,24 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
                return ret;
 
        if (ctx->flags & IORING_SETUP_IOPOLL) {
+               const bool in_async = io_wq_current_is_worker();
+
                if (req->result == -EAGAIN)
                        return -EAGAIN;
 
+               /* workqueue context doesn't hold uring_lock, grab it now */
+               if (in_async)
+                       mutex_lock(&ctx->uring_lock);
+
                io_iopoll_req_issued(req);
+
+               if (in_async)
+                       mutex_unlock(&ctx->uring_lock);
        }
 
        return 0;
 }
 
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
-       struct io_wq_work *work = *workptr;
-       struct io_kiocb *link = work->data;
-
-       io_queue_linked_timeout(link);
-       work->func = io_wq_submit_work;
-}
-
 static void io_wq_submit_work(struct io_wq_work **workptr)
 {
        struct io_wq_work *work = *workptr;
@@ -3289,7 +3318,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
                req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
                req->in_async = true;
                do {
-                       ret = io_issue_sqe(req, &nxt, false);
+                       ret = io_issue_sqe(req, NULL, &nxt, false);
                        /*
                         * We can get EAGAIN for polled IO even though we're
                         * forcing a sync submission from here, since we can't
@@ -3311,17 +3340,8 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
        }
 
        /* if a dependent link is ready, pass it back */
-       if (!ret && nxt) {
-               struct io_kiocb *link;
-
-               io_prep_async_work(nxt, &link);
-               *workptr = &nxt->work;
-               if (link) {
-                       nxt->work.flags |= IO_WQ_WORK_CB;
-                       nxt->work.func = io_link_work_cb;
-                       nxt->work.data = link;
-               }
-       }
+       if (!ret && nxt)
+               io_wq_assign_next(workptr, nxt);
 }
 
 static bool io_req_op_valid(int op)
@@ -3355,14 +3375,15 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
        return table->files[index & IORING_FILE_TABLE_MASK];
 }
 
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        unsigned flags;
        int fd, ret;
 
-       flags = READ_ONCE(req->sqe->flags);
-       fd = READ_ONCE(req->sqe->fd);
+       flags = READ_ONCE(sqe->flags);
+       fd = READ_ONCE(sqe->fd);
 
        if (flags & IOSQE_IO_DRAIN)
                req->flags |= REQ_F_IO_DRAIN;
@@ -3494,7 +3515,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
        return nxt;
 }
 
-static void __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_kiocb *linked_timeout;
        struct io_kiocb *nxt = NULL;
@@ -3503,7 +3524,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
 again:
        linked_timeout = io_prep_linked_timeout(req);
 
-       ret = io_issue_sqe(req, &nxt, true);
+       ret = io_issue_sqe(req, sqe, &nxt, true);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -3550,7 +3571,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
        }
 }
 
-static void io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        int ret;
 
@@ -3560,7 +3581,7 @@ static void io_queue_sqe(struct io_kiocb *req)
        }
        req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
 
-       ret = io_req_defer(req);
+       ret = io_req_defer(req, sqe);
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_cqring_add_event(req, ret);
@@ -3568,7 +3589,7 @@ static void io_queue_sqe(struct io_kiocb *req)
                        io_double_put_req(req);
                }
        } else
-               __io_queue_sqe(req);
+               __io_queue_sqe(req, sqe);
 }
 
 static inline void io_queue_link_head(struct io_kiocb *req)
@@ -3577,25 +3598,25 @@ static inline void io_queue_link_head(struct io_kiocb *req)
                io_cqring_add_event(req, -ECANCELED);
                io_double_put_req(req);
        } else
-               io_queue_sqe(req);
+               io_queue_sqe(req, NULL);
 }
 
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
                                IOSQE_IO_HARDLINK)
 
-static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
-                         struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
        /* enforce forwards compatibility on users */
-       if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
+       if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
                ret = -EINVAL;
                goto err_req;
        }
 
-       ret = io_req_set_file(state, req);
+       ret = io_req_set_file(state, req, sqe);
        if (unlikely(ret)) {
 err_req:
                io_cqring_add_event(req, ret);
@@ -3613,10 +3634,10 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
        if (*link) {
                struct io_kiocb *prev = *link;
 
-               if (req->sqe->flags & IOSQE_IO_DRAIN)
+               if (sqe->flags & IOSQE_IO_DRAIN)
                        (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
 
-               if (req->sqe->flags & IOSQE_IO_HARDLINK)
+               if (sqe->flags & IOSQE_IO_HARDLINK)
                        req->flags |= REQ_F_HARDLINK;
 
                if (io_alloc_async_ctx(req)) {
@@ -3624,7 +3645,7 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
                        goto err_req;
                }
 
-               ret = io_req_defer_prep(req);
+               ret = io_req_defer_prep(req, sqe);
                if (ret) {
                        /* fail even hard links since we don't submit */
                        prev->flags |= REQ_F_FAIL_LINK;
@@ -3632,15 +3653,18 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
                }
                trace_io_uring_link(ctx, req, prev);
                list_add_tail(&req->link_list, &prev->link_list);
-       } else if (req->sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+       } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                req->flags |= REQ_F_LINK;
-               if (req->sqe->flags & IOSQE_IO_HARDLINK)
+               if (sqe->flags & IOSQE_IO_HARDLINK)
                        req->flags |= REQ_F_HARDLINK;
 
                INIT_LIST_HEAD(&req->link_list);
+               ret = io_req_defer_prep(req, sqe);
+               if (ret)
+                       req->flags |= REQ_F_FAIL_LINK;
                *link = req;
        } else {
-               io_queue_sqe(req);
+               io_queue_sqe(req, sqe);
        }
 
        return true;
@@ -3685,14 +3709,15 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
 }
 
 /*
- * Fetch an sqe, if one is available. Note that req->sqe will point to memory
+ * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
  * that is mapped by userspace. This means that care needs to be taken to
  * ensure that reads are stable, as we cannot rely on userspace always
  * being a good citizen. If members of the sqe are validated and then later
  * used, it's important that those reads are done through READ_ONCE() to
  * prevent a re-load down the line.
  */
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                         const struct io_uring_sqe **sqe_ptr)
 {
        struct io_rings *rings = ctx->rings;
        u32 *sq_array = ctx->sq_array;
@@ -3719,9 +3744,9 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
                 * link list.
                 */
                req->sequence = ctx->cached_sq_head;
-               req->sqe = &ctx->sq_sqes[head];
-               req->opcode = READ_ONCE(req->sqe->opcode);
-               req->user_data = READ_ONCE(req->sqe->user_data);
+               *sqe_ptr = &ctx->sq_sqes[head];
+               req->opcode = READ_ONCE((*sqe_ptr)->opcode);
+               req->user_data = READ_ONCE((*sqe_ptr)->user_data);
                ctx->cached_sq_head++;
                return true;
        }
@@ -3753,6 +3778,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
        }
 
        for (i = 0; i < nr; i++) {
+               const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
                unsigned int sqe_flags;
 
@@ -3762,7 +3788,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                                submitted = -EAGAIN;
                        break;
                }
-               if (!io_get_sqring(ctx, req)) {
+               if (!io_get_sqring(ctx, req, &sqe)) {
                        __io_free_req(req);
                        break;
                }
@@ -3776,7 +3802,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                }
 
                submitted++;
-               sqe_flags = req->sqe->flags;
+               sqe_flags = sqe->flags;
 
                req->ring_file = ring_file;
                req->ring_fd = ring_fd;
@@ -3784,7 +3810,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                req->in_async = async;
                req->needs_fixed_file = async;
                trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
-               if (!io_submit_sqe(req, statep, &link))
+               if (!io_submit_sqe(req, sqe, statep, &link))
                        break;
                /*
                 * If previous wasn't linked and we have a linked command,
@@ -4702,7 +4728,7 @@ static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
                if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
                        return -EFAULT;
 
-               dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
+               dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
                dst->iov_len = ciov.iov_len;
                return 0;
        }
@@ -5133,6 +5159,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
        } else if (to_submit) {
                struct mm_struct *cur_mm;
 
+               if (current->mm != ctx->sqo_mm ||
+                   current_cred() != ctx->creds) {
+                       ret = -EPERM;
+                       goto out;
+               }
+
                to_submit = min(to_submit, ctx->sq_entries);
                mutex_lock(&ctx->uring_lock);
                /* already have mm, so io_submit_sqes() won't try to grab it */
index 6970f55daf54341f307a5052eb44b9e2d039ead8..44b6da0328426ba0f133733114973f7d7a526e6a 100644 (file)
@@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
        }
        if (inode) {
                /* userspace relies on this representation of dev_t */
-               seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
+               seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
                                MAJOR(inode->i_sb->s_dev),
                                MINOR(inode->i_sb->s_dev), inode->i_ino);
        } else {
index a63620cdb73a732d1a9f987c7889923c05830535..ccba3c4c44797b54b859acc2378d5f317e6c29be 100644 (file)
@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
        bio->bi_end_io = mpage_end_io;
        bio_set_op_attrs(bio, op, op_flags);
-       guard_bio_eod(op, bio);
+       guard_bio_eod(bio);
        submit_bio(bio);
        return NULL;
 }
index d6c91d1e88cb361f751f825bf84ace990629223b..d2720dc71d0efc111c1c2da0f2d71acf5c39c39d 100644 (file)
@@ -1232,6 +1232,7 @@ static int follow_managed(struct path *path, struct nameidata *nd)
                        BUG_ON(!path->dentry->d_op);
                        BUG_ON(!path->dentry->d_op->d_manage);
                        ret = path->dentry->d_op->d_manage(path, false);
+                       flags = smp_load_acquire(&path->dentry->d_flags);
                        if (ret < 0)
                                break;
                }
@@ -1649,17 +1650,15 @@ static struct dentry *__lookup_slow(const struct qstr *name,
        if (IS_ERR(dentry))
                return dentry;
        if (unlikely(!d_in_lookup(dentry))) {
-               if (!(flags & LOOKUP_NO_REVAL)) {
-                       int error = d_revalidate(dentry, flags);
-                       if (unlikely(error <= 0)) {
-                               if (!error) {
-                                       d_invalidate(dentry);
-                                       dput(dentry);
-                                       goto again;
-                               }
+               int error = d_revalidate(dentry, flags);
+               if (unlikely(error <= 0)) {
+                       if (!error) {
+                               d_invalidate(dentry);
                                dput(dentry);
-                               dentry = ERR_PTR(error);
+                               goto again;
                        }
+                       dput(dentry);
+                       dentry = ERR_PTR(error);
                }
        } else {
                old = inode->i_op->lookup(inode, dentry, flags);
@@ -2617,72 +2616,6 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
 }
 EXPORT_SYMBOL(user_path_at_empty);
 
-/**
- * mountpoint_last - look up last component for umount
- * @nd:   pathwalk nameidata - currently pointing at parent directory of "last"
- *
- * This is a special lookup_last function just for umount. In this case, we
- * need to resolve the path without doing any revalidation.
- *
- * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
- * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
- * in almost all cases, this lookup will be served out of the dcache. The only
- * cases where it won't are if nd->last refers to a symlink or the path is
- * bogus and it doesn't exist.
- *
- * Returns:
- * -error: if there was an error during lookup. This includes -ENOENT if the
- *         lookup found a negative dentry.
- *
- * 0:      if we successfully resolved nd->last and found it to not to be a
- *         symlink that needs to be followed.
- *
- * 1:      if we successfully resolved nd->last and found it to be a symlink
- *         that needs to be followed.
- */
-static int
-mountpoint_last(struct nameidata *nd)
-{
-       int error = 0;
-       struct dentry *dir = nd->path.dentry;
-       struct path path;
-
-       /* If we're in rcuwalk, drop out of it to handle last component */
-       if (nd->flags & LOOKUP_RCU) {
-               if (unlazy_walk(nd))
-                       return -ECHILD;
-       }
-
-       nd->flags &= ~LOOKUP_PARENT;
-
-       if (unlikely(nd->last_type != LAST_NORM)) {
-               error = handle_dots(nd, nd->last_type);
-               if (error)
-                       return error;
-               path.dentry = dget(nd->path.dentry);
-       } else {
-               path.dentry = d_lookup(dir, &nd->last);
-               if (!path.dentry) {
-                       /*
-                        * No cached dentry. Mounted dentries are pinned in the
-                        * cache, so that means that this dentry is probably
-                        * a symlink or the path doesn't actually point
-                        * to a mounted dentry.
-                        */
-                       path.dentry = lookup_slow(&nd->last, dir,
-                                            nd->flags | LOOKUP_NO_REVAL);
-                       if (IS_ERR(path.dentry))
-                               return PTR_ERR(path.dentry);
-               }
-       }
-       if (d_flags_negative(smp_load_acquire(&path.dentry->d_flags))) {
-               dput(path.dentry);
-               return -ENOENT;
-       }
-       path.mnt = nd->path.mnt;
-       return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
-}
-
 /**
  * path_mountpoint - look up a path to be umounted
  * @nd:                lookup context
@@ -2699,14 +2632,17 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
        int err;
 
        while (!(err = link_path_walk(s, nd)) &&
-               (err = mountpoint_last(nd)) > 0) {
+               (err = lookup_last(nd)) > 0) {
                s = trailing_symlink(nd);
        }
+       if (!err && (nd->flags & LOOKUP_RCU))
+               err = unlazy_walk(nd);
+       if (!err)
+               err = handle_lookup_down(nd);
        if (!err) {
                *path = nd->path;
                nd->path.mnt = NULL;
                nd->path.dentry = NULL;
-               follow_mount(path);
        }
        terminate_walk(nd);
        return err;
index be601d3a800807360a0272c713d4abfc25795687..5e1bf611a9eb6922554c405d4aa753c866e81585 100644 (file)
@@ -1728,7 +1728,7 @@ static bool is_mnt_ns_file(struct dentry *dentry)
               dentry->d_fsdata == &mntns_operations;
 }
 
-struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
 {
        return container_of(ns, struct mnt_namespace, ns);
 }
index f64a33d2a1d15f36d258e8db1579f5ed3080a984..2a82dcce5fc169d189f708de67d97b3bce450e63 100644 (file)
@@ -206,7 +206,6 @@ TRACE_DEFINE_ENUM(LOOKUP_AUTOMOUNT);
 TRACE_DEFINE_ENUM(LOOKUP_PARENT);
 TRACE_DEFINE_ENUM(LOOKUP_REVAL);
 TRACE_DEFINE_ENUM(LOOKUP_RCU);
-TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
 TRACE_DEFINE_ENUM(LOOKUP_OPEN);
 TRACE_DEFINE_ENUM(LOOKUP_CREATE);
 TRACE_DEFINE_ENUM(LOOKUP_EXCL);
@@ -224,7 +223,6 @@ TRACE_DEFINE_ENUM(LOOKUP_DOWN);
                        { LOOKUP_PARENT, "PARENT" }, \
                        { LOOKUP_REVAL, "REVAL" }, \
                        { LOOKUP_RCU, "RCU" }, \
-                       { LOOKUP_NO_REVAL, "NO_REVAL" }, \
                        { LOOKUP_OPEN, "OPEN" }, \
                        { LOOKUP_CREATE, "CREATE" }, \
                        { LOOKUP_EXCL, "EXCL" }, \
index a0431642c6b55f4d80f66ddb29d7f0f8f5314bf8..f75767bd623afcb5f5e67e2a61dbf95c51e442f7 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -3,6 +3,7 @@
 #include <linux/pseudo_fs.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/proc_fs.h>
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
 #include <linux/ktime.h>
@@ -11,6 +12,8 @@
 #include <linux/nsfs.h>
 #include <linux/uaccess.h>
 
+#include "internal.h"
+
 static struct vfsmount *nsfs_mnt;
 
 static long ns_ioctl(struct file *filp, unsigned int ioctl,
index 1c4c51f3df60fce4fb0ece07b7a46d821124430c..cda1027d08194242a77709d2340da0f4357864d7 100644 (file)
@@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
 
        debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
                           &dlm_debug->d_filter_secs);
+       ocfs2_get_dlm_debug(dlm_debug);
 }
 
 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
index 1afe57f425a01e85639dd6192948bb4b6c3a9bbe..68ba354cf3610aa0808fccb4b9575e0c7a915581 100644 (file)
@@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
 
        ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
 
+       if (replayed) {
+               jbd2_journal_lock_updates(journal->j_journal);
+               status = jbd2_journal_flush(journal->j_journal);
+               jbd2_journal_unlock_updates(journal->j_journal);
+               if (status < 0)
+                       mlog_errno(status);
+       }
+
        status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
        if (status < 0) {
                mlog_errno(status);
index 84ad1c90d535db8fa333ca77b1c1073f5914e4a1..249672bf54fe7b3d973c485019e1d112989c741f 100644 (file)
@@ -631,12 +631,15 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
 
 /**
  * posix_acl_update_mode  -  update mode in set_acl
+ * @inode: target inode
+ * @mode_p: mode (pointer) for update
+ * @acl: acl pointer
  *
  * Update the file mode when setting an ACL: compute the new file permission
  * bits based on the ACL.  In addition, if the ACL is equivalent to the new
- * file mode, set *acl to NULL to indicate that no ACL should be set.
+ * file mode, set *@acl to NULL to indicate that no ACL should be set.
  *
- * As with chmod, clear the setgit bit if the caller is not in the owning group
+ * As with chmod, clear the setgid bit if the caller is not in the owning group
  * or capable of CAP_FSETID (see inode_change_ok).
  *
  * Called from set_acl inode operations.
index 8caff834f002668eb79d84d2adf8c108dbc2c640..013486b5125ed78f80f10024c7c1f3d67fe8d647 100644 (file)
@@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
 
        prz = cxt->dprzs[cxt->dump_write_cnt];
 
+       /*
+        * Since this is a new crash dump, we need to reset the buffer in
+        * case it still has an old dump present. Without this, the new dump
+        * will get appended, which would seriously confuse anything trying
+        * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
+        * expects to find a dump header in the beginning of buffer data, so
+        * we must to reset the buffer values, in order to ensure that the
+        * header will be written to the beginning of the buffer.
+        */
+       persistent_ram_zap(prz);
+
        /* Build header and append record contents. */
        hlen = ramoops_write_kmsg_hdr(prz, record);
        if (!hlen)
@@ -572,6 +583,7 @@ static int ramoops_init_przs(const char *name,
                prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
                                               &cxt->ecc_info,
                                               cxt->memtype, flags, label);
+               kfree(label);
                if (IS_ERR(prz_ar[i])) {
                        err = PTR_ERR(prz_ar[i]);
                        dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@@ -617,6 +629,7 @@ static int ramoops_init_prz(const char *name,
        label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
        *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
                                  cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
+       kfree(label);
        if (IS_ERR(*prz)) {
                int err = PTR_ERR(*prz);
 
index 8823f65888f034cd5997a68898f2c7792d8ae69c..1f4d8c06f9be61d8f95747aa1d1b2dd730d0a36d 100644 (file)
@@ -574,7 +574,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
        /* Initialize general buffer state. */
        raw_spin_lock_init(&prz->buffer_lock);
        prz->flags = flags;
-       prz->label = label;
+       prz->label = kstrdup(label, GFP_KERNEL);
 
        ret = persistent_ram_buffer_map(start, size, prz, memtype);
        if (ret)
index a950a22c489041e78c05094553bb254313c9d636..cac7404b2bdd2cdcd1c4041f496aecbf4e9245fe 100644 (file)
  * The cache doesn't need to be flushed when TLB entries change when
  * the cache is mapped to physical memory, not virtual memory
  */
+#ifndef flush_cache_all
 static inline void flush_cache_all(void)
 {
 }
+#endif
 
+#ifndef flush_cache_mm
 static inline void flush_cache_mm(struct mm_struct *mm)
 {
 }
+#endif
 
+#ifndef flush_cache_dup_mm
 static inline void flush_cache_dup_mm(struct mm_struct *mm)
 {
 }
+#endif
 
+#ifndef flush_cache_range
 static inline void flush_cache_range(struct vm_area_struct *vma,
                                     unsigned long start,
                                     unsigned long end)
 {
 }
+#endif
 
+#ifndef flush_cache_page
 static inline void flush_cache_page(struct vm_area_struct *vma,
                                    unsigned long vmaddr,
                                    unsigned long pfn)
 {
 }
+#endif
 
+#ifndef flush_dcache_page
 static inline void flush_dcache_page(struct page *page)
 {
 }
+#endif
 
+#ifndef flush_dcache_mmap_lock
 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
 {
 }
+#endif
 
+#ifndef flush_dcache_mmap_unlock
 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
 {
 }
+#endif
 
+#ifndef flush_icache_range
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
 }
+#endif
 
+#ifndef flush_icache_page
 static inline void flush_icache_page(struct vm_area_struct *vma,
                                     struct page *page)
 {
 }
+#endif
 
+#ifndef flush_icache_user_range
 static inline void flush_icache_user_range(struct vm_area_struct *vma,
                                           struct page *page,
                                           unsigned long addr, int len)
 {
 }
+#endif
 
+#ifndef flush_cache_vmap
 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 {
 }
+#endif
 
+#ifndef flush_cache_vunmap
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 {
 }
+#endif
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)     \
        do { \
                memcpy(dst, src, len); \
                flush_icache_user_range(vma, page, vaddr, len); \
        } while (0)
+#endif
+
+#ifndef copy_from_user_page
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy(dst, src, len)
+#endif
 
 #endif /* __ASM_CACHEFLUSH_H */
index d5fc90b304876697740c7d994a78ea8692e9f85b..c1bda7030e2de8ad3e904471b0264c9effccfcde 100644 (file)
@@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr {
         * &drm_dp_sideband_msg_tx.state once they are queued
         */
        struct mutex qlock;
+
+       /**
+        * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+        */
+       bool is_waiting_for_dwn_reply;
+
        /**
         * @tx_msg_downq: List of pending down replies.
         */
index c614438bcbdb89508432da77c6d55b356c950625..fbc524a900da118619aa3683c7992a1df56627a1 100644 (file)
@@ -46,9 +46,9 @@
 #define RESET_VD_RMEM                  64
 #define RESET_AUDIN                    65
 #define RESET_DBLK                     66
-#define RESET_PIC_DC                   66
-#define RESET_PSC                      66
-#define RESET_NAND                     66
+#define RESET_PIC_DC                   67
+#define RESET_PSC                      68
+#define RESET_NAND                     69
 #define RESET_GE2D                     70
 #define RESET_PARSER_REG               71
 #define RESET_PARSER_FETCH             72
index 6782f0d45ebe5dce4687f1f5db4ab39f3673c3a0..49e5383d4222257bed2e103fa6c2ec29b836bf00 100644 (file)
@@ -19,6 +19,8 @@ struct ahci_host_priv;
 struct platform_device;
 struct scsi_host_template;
 
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
 int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
 void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
 int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
index 3cdb84cdc48843bd970f41f6221e1467fd8b86ff..853d92ceee64ed5d89f8900f2235af22b1b6fbe1 100644 (file)
@@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
                                     gfp_t);
 extern int bio_uncopy_user(struct bio *);
 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+void bio_truncate(struct bio *bio, unsigned new_size);
 
 static inline void zero_fill_bio(struct bio *bio)
 {
index 47eb22a3b7f99ed14d2caafeada6bce779b5ee29..4c636c42ad6864523fcf637ddf28ce66ee0934b2 100644 (file)
@@ -328,6 +328,7 @@ struct queue_limits {
        unsigned int            max_sectors;
        unsigned int            max_segment_size;
        unsigned int            physical_block_size;
+       unsigned int            logical_block_size;
        unsigned int            alignment_offset;
        unsigned int            io_min;
        unsigned int            io_opt;
@@ -338,7 +339,6 @@ struct queue_limits {
        unsigned int            discard_granularity;
        unsigned int            discard_alignment;
 
-       unsigned short          logical_block_size;
        unsigned short          max_segments;
        unsigned short          max_integrity_segments;
        unsigned short          max_discard_segments;
@@ -1077,7 +1077,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q,
                unsigned int max_write_same_sectors);
 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
                unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_alignment_offset(struct request_queue *q,
                                       unsigned int alignment);
@@ -1291,7 +1291,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
        return q->limits.max_segment_size;
 }
 
-static inline unsigned short queue_logical_block_size(const struct request_queue *q)
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
 {
        int retval = 512;
 
@@ -1301,7 +1301,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue
        return retval;
 }
 
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
 {
        return queue_logical_block_size(bdev_get_queue(bdev));
 }
index 679a42253170bf6325cbdfebadfb283986878d2e..a81c13ac19728c233de45d6ef9fbfae4e4a94e06 100644 (file)
@@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec,
        }
 }
 
-/*
- * Get the last single-page segment from the multi-page bvec and store it
- * in @seg
- */
-static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
-                                       struct bio_vec *seg)
-{
-       unsigned total = bvec->bv_offset + bvec->bv_len;
-       unsigned last_page = (total - 1) / PAGE_SIZE;
-
-       seg->bv_page = bvec->bv_page + last_page;
-
-       /* the whole segment is inside the last page */
-       if (bvec->bv_offset >= last_page * PAGE_SIZE) {
-               seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
-               seg->bv_len = bvec->bv_len;
-       } else {
-               seg->bv_offset = 0;
-               seg->bv_len = total - last_page * PAGE_SIZE;
-       }
-}
-
 #endif /* __LINUX_BVEC_ITER_H */
index 9b3c720a31b189b2ccf6637636ad02ecc3503f70..5e3d45525bd358ebefc999945c09ff4d5a124ffa 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/can/error.h>
 #include <linux/can/led.h>
 #include <linux/can/netlink.h>
+#include <linux/can/skb.h>
 #include <linux/netdevice.h>
 
 /*
@@ -91,6 +92,36 @@ struct can_priv {
 #define get_can_dlc(i)         (min_t(__u8, (i), CAN_MAX_DLC))
 #define get_canfd_dlc(i)       (min_t(__u8, (i), CANFD_MAX_DLC))
 
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+                                         struct sk_buff *skb)
+{
+       /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+       if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+               return false;
+
+       /* af_packet does not apply CAN skb specific settings */
+       if (skb->ip_summed == CHECKSUM_NONE) {
+               /* init headroom */
+               can_skb_prv(skb)->ifindex = dev->ifindex;
+               can_skb_prv(skb)->skbcnt = 0;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* preform proper loopback on capable devices */
+               if (dev->flags & IFF_ECHO)
+                       skb->pkt_type = PACKET_LOOPBACK;
+               else
+                       skb->pkt_type = PACKET_HOST;
+
+               skb_reset_mac_header(skb);
+               skb_reset_network_header(skb);
+               skb_reset_transport_header(skb);
+       }
+
+       return true;
+}
+
 /* Drop a given socketbuffer if it does not contain a valid CAN frame. */
 static inline bool can_dropped_invalid_skb(struct net_device *dev,
                                          struct sk_buff *skb)
@@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev,
        } else
                goto inval_skb;
 
+       if (!can_skb_headroom_valid(dev, skb))
+               goto inval_skb;
+
        return false;
 
 inval_skb:
index 8fcdee1c0cf9559fc99671bdfb64782acd08619f..dad4a68fa00947f3c02cd13e597ea42ef7c713dc 100644 (file)
@@ -1364,8 +1364,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
 {
        struct dma_slave_caps caps;
+       int ret;
 
-       dma_get_slave_caps(tx->chan, &caps);
+       ret = dma_get_slave_caps(tx->chan, &caps);
+       if (ret)
+               return ret;
 
        if (caps.descriptor_reuse) {
                tx->flags |= DMA_CTRL_REUSE;
index 76cf11e905e160b32625adefd1bcc7b641ff6527..8a9792a6427ad9cf58b50c79cbfe185615800dcb 100644 (file)
@@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
        return (struct ethhdr *)skb_mac_header(skb);
 }
 
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct ethhdr *)skb->data;
+}
+
 static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
 {
        return (struct ethhdr *)skb_inner_mac_header(skb);
index 6d8bf4bdf240dfc8a5959641b568678e74750698..4a16b39ae353ee8be6c908a597575c33aaa0cc25 100644 (file)
 #define VTD_STRIDE_SHIFT        (9)
 #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)
 
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
+#define DMA_PTE_READ           BIT_ULL(0)
+#define DMA_PTE_WRITE          BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE     BIT_ULL(7)
+#define DMA_PTE_SNP            BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT     BIT_ULL(0)
+#define DMA_FL_PTE_XD          BIT_ULL(63)
 
 #define CONTEXT_TT_MULTI_LEVEL 0
 #define CONTEXT_TT_DEV_IOTLB   1
@@ -435,8 +438,10 @@ enum {
 
 #define VTD_FLAG_TRANS_PRE_ENABLED     (1 << 0)
 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+#define VTD_FLAG_SVM_CAPABLE           (1 << 2)
 
 extern int intel_iommu_sm;
+extern spinlock_t device_domain_lock;
 
 #define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
 #define pasid_supported(iommu) (sm_supported(iommu) &&                 \
@@ -609,10 +614,11 @@ static inline void dma_clear_pte(struct dma_pte *pte)
 static inline u64 dma_pte_addr(struct dma_pte *pte)
 {
 #ifdef CONFIG_64BIT
-       return pte->val & VTD_PAGE_MASK;
+       return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
 #else
        /* Must have a full atomic 64-bit read */
-       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+                       VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
 #endif
 }
 
@@ -645,6 +651,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
                          unsigned int size_order, u64 type);
 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
                        u16 qdep, u64 addr, unsigned mask);
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+                    unsigned long npages, bool ih);
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
@@ -656,9 +664,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
                                     void *data), void *data);
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct dmar_domain *find_domain(struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_svm_init(struct intel_iommu *iommu);
+extern void intel_svm_check(struct intel_iommu *iommu);
 extern int intel_svm_enable_prq(struct intel_iommu *iommu);
 extern int intel_svm_finish_prq(struct intel_iommu *iommu);
 
@@ -686,6 +695,8 @@ struct intel_svm {
 };
 
 extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
 #endif
 
 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
index ee21eedafe986817ea63f079458b34c593cce933..53d53c6c2be9a486e6000df17a4bdb27906e3401 100644 (file)
@@ -83,12 +83,16 @@ struct io_pgtable_cfg {
         * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
         *      on unmap, for DMA domains using the flush queue mechanism for
         *      delayed invalidation.
+        *
+        * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
+        *      for use in the upper half of a split address space.
         */
        #define IO_PGTABLE_QUIRK_ARM_NS         BIT(0)
        #define IO_PGTABLE_QUIRK_NO_PERMS       BIT(1)
        #define IO_PGTABLE_QUIRK_TLBI_ON_MAP    BIT(2)
        #define IO_PGTABLE_QUIRK_ARM_MTK_EXT    BIT(3)
        #define IO_PGTABLE_QUIRK_NON_STRICT     BIT(4)
+       #define IO_PGTABLE_QUIRK_ARM_TTBR1      BIT(5)
        unsigned long                   quirks;
        unsigned long                   pgsize_bitmap;
        unsigned int                    ias;
@@ -100,18 +104,33 @@ struct io_pgtable_cfg {
        /* Low-level data specific to the table format */
        union {
                struct {
-                       u64     ttbr[2];
-                       u64     tcr;
+                       u64     ttbr;
+                       struct {
+                               u32     ips:3;
+                               u32     tg:2;
+                               u32     sh:2;
+                               u32     orgn:2;
+                               u32     irgn:2;
+                               u32     tsz:6;
+                       }       tcr;
                        u64     mair;
                } arm_lpae_s1_cfg;
 
                struct {
                        u64     vttbr;
-                       u64     vtcr;
+                       struct {
+                               u32     ps:3;
+                               u32     tg:2;
+                               u32     sh:2;
+                               u32     orgn:2;
+                               u32     irgn:2;
+                               u32     sl:2;
+                               u32     tsz:6;
+                       }       vtcr;
                } arm_lpae_s2_cfg;
 
                struct {
-                       u32     ttbr[2];
+                       u32     ttbr;
                        u32     tcr;
                        u32     nmrr;
                        u32     prrr;
index e80b83b8cab8e730dcb61d9c052bc9f6d2f64832..d1b5f4d98569de91c6982300cdadc9a92a8f5a4a 100644 (file)
@@ -246,9 +246,10 @@ struct iommu_iotlb_gather {
  * @sva_get_pasid: Get PASID associated to a SVA handle
  * @page_response: handle page request response
  * @cache_invalidate: invalidate translation caches
- * @pgsize_bitmap: bitmap of all possible supported page sizes
  * @sva_bind_gpasid: bind guest pasid and mm
  * @sva_unbind_gpasid: unbind guest pasid and mm
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @owner: Driver module providing these ops
  */
 struct iommu_ops {
        bool (*capable)(enum iommu_cap);
@@ -318,6 +319,7 @@ struct iommu_ops {
        int (*sva_unbind_gpasid)(struct device *dev, int pasid);
 
        unsigned long pgsize_bitmap;
+       struct module *owner;
 };
 
 /**
@@ -386,12 +388,19 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu);
 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
 
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
-                                       const struct iommu_ops *ops)
+static inline void __iommu_device_set_ops(struct iommu_device *iommu,
+                                         const struct iommu_ops *ops)
 {
        iommu->ops = ops;
 }
 
+#define iommu_device_set_ops(iommu, ops)                               \
+do {                                                                   \
+       struct iommu_ops *__ops = (struct iommu_ops *)(ops);            \
+       __ops->owner = THIS_MODULE;                                     \
+       __iommu_device_set_ops(iommu, __ops);                           \
+} while (0)
+
 static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
                                           struct fwnode_handle *fwnode)
 {
@@ -572,6 +581,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
  * @ops: ops for this device's IOMMU
  * @iommu_fwnode: firmware handle for this device's IOMMU
  * @iommu_priv: IOMMU driver private data for this device
+ * @num_pasid_bits: number of PASID bits supported by this device
  * @num_ids: number of associated device IDs
  * @ids: IDs which this device may present to the IOMMU
  */
@@ -580,6 +590,7 @@ struct iommu_fwspec {
        struct fwnode_handle    *iommu_fwnode;
        void                    *iommu_priv;
        u32                     flags;
+       u32                     num_pasid_bits;
        unsigned int            num_ids;
        u32                     ids[1];
 };
index 3adcb39fa6f5c7e3acb75c8bf3c9ecebc3b1aaa6..0d9db2a14f4444d428c096696dbff51ac7ce5d49 100644 (file)
  */
 #define round_down(x, y) ((x) & ~__round_mask(x, y))
 
-/**
- * FIELD_SIZEOF - get the size of a struct's field
- * @t: the target struct
- * @f: the target struct's field
- * Return: the size of @f in the struct definition without having a
- * declared instance of @t.
- */
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-
 #define typeof_member(T, m)    typeof(((T*)0)->m)
 
 #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
index d3bbfddf616aac6de2134e2adf1e8f416b9a9abe..2dbde119721df4b63982b3dab853ba9ab51ea822 100644 (file)
@@ -1175,6 +1175,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id);
 extern void ata_qc_complete(struct ata_queued_cmd *qc);
 extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
 extern int ata_std_bios_param(struct scsi_device *sdev,
                              struct block_device *bdev,
index 3a08ecdfca116d1cdbbbb383e10e282af7065749..ba0dca6aac6ee307d90b93e1f0dea40e4a5f82c7 100644 (file)
@@ -122,8 +122,8 @@ static inline bool movable_node_is_enabled(void)
 
 extern void arch_remove_memory(int nid, u64 start, u64 size,
                               struct vmem_altmap *altmap);
-extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
-                          unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+                          struct vmem_altmap *altmap);
 
 /* reasonably generic interface to expand the physical pages */
 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -342,6 +342,9 @@ extern int add_memory(int nid, u64 start, u64 size);
 extern int add_memory_resource(int nid, struct resource *resource);
 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+                                      unsigned long start_pfn,
+                                      unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
 extern int sparse_add_section(int nid, unsigned long pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap);
index f84b9163c0ee60f47a4afa1a3f03930881e73859..7dfb63b8137355ef6cf83ebce12cc54ff7d91aa8 100644 (file)
 
 #define RTC_AL_SEC             0x0018
 
+#define RTC_AL_SEC_MASK        0x003f
+#define RTC_AL_MIN_MASK        0x003f
+#define RTC_AL_HOU_MASK        0x001f
+#define RTC_AL_DOM_MASK        0x001f
+#define RTC_AL_DOW_MASK        0x0007
+#define RTC_AL_MTH_MASK        0x000f
+#define RTC_AL_YEA_MASK        0x007f
+
 #define RTC_PDN2               0x002e
 #define RTC_PDN2_PWRON_ALARM   BIT(4)
 
index 80a9162b406c26c702f15e264e0bd3083cd4636d..cfaa8feecfe86b9d340ac0bdfbd904656f9fb7c1 100644 (file)
@@ -2658,13 +2658,25 @@ static inline bool want_init_on_free(void)
               !page_poisoning_enabled();
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern void init_debug_pagealloc(void);
 #else
-DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+static inline void init_debug_pagealloc(void) {}
 #endif
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
 
 static inline bool debug_pagealloc_enabled(void)
+{
+       return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+               _debug_pagealloc_enabled_early;
+}
+
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
 {
        if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
                return false;
index 89d8ff06c9ce0f98315697a70554726cf20ae391..5334ad8fc7bd9613e2ace8a7b3f2de14fa829a4a 100644 (file)
@@ -215,9 +215,8 @@ enum node_stat_item {
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
-       NR_SLAB_RECLAIMABLE,    /* Please do not reorder this item */
-       NR_SLAB_UNRECLAIMABLE,  /* and this one without looking at
-                                * memcg_flush_percpu_vmstats() first. */
+       NR_SLAB_RECLAIMABLE,
+       NR_SLAB_UNRECLAIMABLE,
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        WORKINGSET_NODES,
index ecc88a41792a3a959bc539f1eb341426accdd856..c04f690871ca18f5f15cd9c4e8cfee3b4ef77d0f 100644 (file)
@@ -40,7 +40,7 @@ typedef enum {
        FL_READING,
        FL_CACHEDPRG,
        /* These 4 come from onenand_state_t, which has been unified here */
-       FL_RESETING,
+       FL_RESETTING,
        FL_OTPING,
        FL_PREPARING_ERASE,
        FL_VERIFYING_ERASE,
index 7fe7b87a3ded914ab89fd021aed3bed9218ef5b2..07bfb08740330f90b121bcb81cc30436ef7c73aa 100644 (file)
@@ -34,7 +34,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 
 /* internal use only */
 #define LOOKUP_PARENT          0x0010
-#define LOOKUP_NO_REVAL                0x0080
 #define LOOKUP_JUMPED          0x1000
 #define LOOKUP_ROOT            0x2000
 #define LOOKUP_ROOT_GRABBED    0x0008
index 79bc82e30c02333d1014987f941172969ae355a8..491a2b7e77c1e90645180c13cdb34694f94d1196 100644 (file)
@@ -55,7 +55,7 @@ static inline int of_mdio_parse_addr(struct device *dev,
 }
 
 #else /* CONFIG_OF_MDIO */
-static bool of_mdiobus_child_is_phy(struct device_node *child)
+static inline bool of_mdiobus_child_is_phy(struct device_node *child)
 {
        return false;
 }
index 5d62e78946a3e5dfaad42a24cfbda20eafe1ef3c..d08f0869f1213e3d5df4c65ad9259d280aac4342 100644 (file)
@@ -33,6 +33,9 @@ void pci_disable_pasid(struct pci_dev *pdev);
 int pci_pasid_features(struct pci_dev *pdev);
 int pci_max_pasids(struct pci_dev *pdev);
 #else /* CONFIG_PCI_PASID */
+static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
+{ return -EINVAL; }
+static inline void pci_disable_pasid(struct pci_dev *pdev) { }
 static inline int pci_pasid_features(struct pci_dev *pdev)
 { return -EINVAL; }
 static inline int pci_max_pasids(struct pci_dev *pdev)
index fe6cfdcfbc260280bf941ea666109e275e1d7464..468328b1e1dd583fedfdd67e4f85bfd94d43c31d 100644 (file)
@@ -69,29 +69,32 @@ struct posix_clock_operations {
  *
  * @ops:     Functional interface to the clock
  * @cdev:    Character device instance for this clock
- * @kref:    Reference count.
+ * @dev:     Pointer to the clock's device.
  * @rwsem:   Protects the 'zombie' field from concurrent access.
  * @zombie:  If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- *           zero. May be NULL if structure is statically allocated.
  *
  * Drivers should embed their struct posix_clock within a private
  * structure, obtaining a reference to it during callbacks using
  * container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
  */
 struct posix_clock {
        struct posix_clock_operations ops;
        struct cdev cdev;
-       struct kref kref;
+       struct device *dev;
        struct rw_semaphore rwsem;
        bool zombie;
-       void (*release)(struct posix_clock *clk);
 };
 
 /**
  * posix_clock_register() - register a new clock
- * @clk:   Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk:   Pointer to the clock. Caller must provide 'ops' field
+ * @dev:   Pointer to the initialized device. Caller must provide
+ *         'release' field
  *
  * A clock driver calls this function to register itself with the
  * clock device subsystem. If 'clk' points to dynamically allocated
@@ -100,7 +103,7 @@ struct posix_clock {
  *
  * Returns zero on success, non-zero otherwise.
  */
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
 
 /**
  * posix_clock_unregister() - unregister a clock
index 467d2604641692c8a9f2c950f7b2083e374757bb..716ad1d8d95e1457dd1b42f51bebbf692e836830 100644 (file)
@@ -1929,11 +1929,11 @@ static inline void rseq_migrate(struct task_struct *t)
 
 /*
  * If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread.
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
  */
 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
-       if (clone_flags & CLONE_THREAD) {
+       if (clone_flags & CLONE_VM) {
                t->rseq = NULL;
                t->rseq_sig = 0;
                t->rseq_event_mask = 0;
index ef7031f8a304f524c3651d4c83e8053a2f0f9e8e..14d61bba0b79bd0e8cc48c24fc84970546537a4f 100644 (file)
@@ -358,17 +358,22 @@ static inline void sk_psock_update_proto(struct sock *sk,
 static inline void sk_psock_restore_proto(struct sock *sk,
                                          struct sk_psock *psock)
 {
-       sk->sk_write_space = psock->saved_write_space;
+       sk->sk_prot->unhash = psock->saved_unhash;
 
        if (psock->sk_proto) {
                struct inet_connection_sock *icsk = inet_csk(sk);
                bool has_ulp = !!icsk->icsk_ulp_data;
 
-               if (has_ulp)
-                       tcp_update_ulp(sk, psock->sk_proto);
-               else
+               if (has_ulp) {
+                       tcp_update_ulp(sk, psock->sk_proto,
+                                      psock->saved_write_space);
+               } else {
                        sk->sk_prot = psock->sk_proto;
+                       sk->sk_write_space = psock->saved_write_space;
+               }
                psock->sk_proto = NULL;
+       } else {
+               sk->sk_write_space = psock->saved_write_space;
        }
 }
 
index 98fe8663033af4534f9405655f177baab30d6251..3a67a7e45633cf9cfef557cfb5b3b31344866b01 100644 (file)
@@ -689,10 +689,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
 /* Helper calls for driver to timestamp transfer */
 void spi_take_timestamp_pre(struct spi_controller *ctlr,
                            struct spi_transfer *xfer,
-                           const void *tx, bool irqs_off);
+                           size_t progress, bool irqs_off);
 void spi_take_timestamp_post(struct spi_controller *ctlr,
                             struct spi_transfer *xfer,
-                            const void *tx, bool irqs_off);
+                            size_t progress, bool irqs_off);
 
 /* the spi driver core manages memory for the spi_controller classdev */
 extern struct spi_controller *__spi_alloc_controller(struct device *host,
index 85ec745767bd91b0728ef0acd6b813cbcf37a871..966146f7267a518b301da61396fdbd03fa473180 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * 10G controller driver for Samsung EXYNOS SoCs
+ * 10G controller driver for Samsung Exynos SoCs
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
index 2960dedcfde8a25ad53ed7f15d133d7c9fdb4757..5262b7a76d392b60769a93dde860a30e4b23481f 100644 (file)
@@ -1232,6 +1232,7 @@ asmlinkage long sys_ni_syscall(void);
  */
 
 int ksys_umount(char __user *name, int flags);
+int ksys_dup(unsigned int fildes);
 int ksys_chroot(const char __user *filename);
 ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count);
 int ksys_chdir(const char __user *filename);
index c17af77f3fae7f98814b521b127b2e10945d1e5e..ea627d1ab7e39bebb4db5bb4d436530d57f63567 100644 (file)
@@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift);
 /* Shift (rsh) a tnum right (by a fixed shift) */
 struct tnum tnum_rshift(struct tnum a, u8 shift);
 /* Shift (arsh) a tnum right (by a fixed min_shift) */
-struct tnum tnum_arshift(struct tnum a, u8 min_shift);
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
 /* Add two tnums, return @a + @b */
 struct tnum tnum_add(struct tnum a, struct tnum b);
 /* Subtract two tnums, return @a - @b */
index 059524b87c4c90f310cbf1f08f72721bcf5481c0..f22bd6c838a379108f74c493a127805857552431 100644 (file)
@@ -3548,6 +3548,9 @@ struct cfg80211_update_owe_info {
  *
  * @start_radar_detection: Start radar detection in the driver.
  *
+ * @end_cac: End running CAC, probably because a related CAC
+ *     was finished on another phy.
+ *
  * @update_ft_ies: Provide updated Fast BSS Transition information to the
  *     driver. If the SME is in the driver/firmware, this information can be
  *     used in building Authentication and Reassociation Request frames.
@@ -3874,6 +3877,8 @@ struct cfg80211_ops {
                                         struct net_device *dev,
                                         struct cfg80211_chan_def *chandef,
                                         u32 cac_time_ms);
+       void    (*end_cac)(struct wiphy *wiphy,
+                               struct net_device *dev);
        int     (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
                                 struct cfg80211_update_ft_ies_params *ftie);
        int     (*crit_proto_start)(struct wiphy *wiphy,
index 47f87b2fcf633b1181263772936882910782e4b2..38b4acb93f74725e561252a81679cf08293748c3 100644 (file)
@@ -938,7 +938,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink,
                                             u32 region_max_snapshots,
                                             u64 region_size);
 void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+u32 devlink_region_snapshot_id_get(struct devlink *devlink);
 int devlink_region_snapshot_create(struct devlink_region *region,
                                   u8 *data, u32 snapshot_id,
                                   devlink_snapshot_data_dest_t *data_destructor);
index 8224dad2ae9460fb3ef3ace54f4442e76463ac04..3448cf865edee88fe97c7651895e47b940d8ae8a 100644 (file)
@@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
        struct dst_entry *dst = skb_dst(skb);
 
        if (dst && dst->ops->update_pmtu)
-               dst->ops->update_pmtu(dst, NULL, skb, mtu);
+               dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (dst && dst->ops->update_pmtu)
+               dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
 }
 
 static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
@@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
        u32 encap_mtu = dst_mtu(encap_dst);
 
        if (skb->len > encap_mtu - headroom)
-               skb_dst_update_pmtu(skb, encap_mtu - headroom);
+               skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
 }
 
 #endif /* _NET_DST_H */
index 5ec645f27ee3841f9a4074c94e5dca37bb86fc2e..443863c7b8da362476c15fd290ac2a32a8aa86e3 100644 (file)
@@ -27,7 +27,8 @@ struct dst_ops {
        struct dst_entry *      (*negative_advice)(struct dst_entry *);
        void                    (*link_failure)(struct sk_buff *);
        void                    (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
-                                              struct sk_buff *skb, u32 mtu);
+                                              struct sk_buff *skb, u32 mtu,
+                                              bool confirm_neigh);
        void                    (*redirect)(struct dst_entry *dst, struct sock *sk,
                                            struct sk_buff *skb);
        int                     (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
index f0897b3c97fb8c3c9afdb4d3ba4835aab45c856a..415b8f49d1509f1fa48ee45d4e9917d70a20054e 100644 (file)
@@ -106,6 +106,12 @@ struct flow_offload {
 };
 
 #define NF_FLOW_TIMEOUT (30 * HZ)
+#define nf_flowtable_time_stamp        (u32)jiffies
+
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+       return (__s32)(timeout - nf_flowtable_time_stamp);
+}
 
 struct nf_flow_route {
        struct {
index 144f264ea394d18d92d1824926abf2caa3245e36..fceddf89592af4483431d012873bc611a80d7d50 100644 (file)
@@ -308,6 +308,7 @@ struct tcf_proto_ops {
        int                     (*delete)(struct tcf_proto *tp, void *arg,
                                          bool *last, bool rtnl_held,
                                          struct netlink_ext_ack *);
+       bool                    (*delete_empty)(struct tcf_proto *tp);
        void                    (*walk)(struct tcf_proto *tp,
                                        struct tcf_walker *arg, bool rtnl_held);
        int                     (*reoffload)(struct tcf_proto *tp, bool add,
@@ -336,6 +337,10 @@ struct tcf_proto_ops {
        int                     flags;
 };
 
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
+ * conditions can occur when filters are inserted/deleted simultaneously.
+ */
 enum tcf_proto_ops_flags {
        TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
 };
index e460ea7f767ba627972a63a974cae80357808366..e6f48384dc7119cae21ef46c74532e0f2577d8a2 100644 (file)
@@ -2147,7 +2147,8 @@ struct tcp_ulp_ops {
        /* initialize ulp */
        int (*init)(struct sock *sk);
        /* update ulp */
-       void (*update)(struct sock *sk, struct proto *p);
+       void (*update)(struct sock *sk, struct proto *p,
+                      void (*write_space)(struct sock *sk));
        /* cleanup ulp */
        void (*release)(struct sock *sk);
        /* diagnostic */
@@ -2162,7 +2163,8 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type);
 int tcp_set_ulp(struct sock *sk, const char *name);
 void tcp_get_available_ulp(char *buf, size_t len);
 void tcp_cleanup_ulp(struct sock *sk);
-void tcp_update_ulp(struct sock *sk, struct proto *p);
+void tcp_update_ulp(struct sock *sk, struct proto *p,
+                   void (*write_space)(struct sock *sk));
 
 #define MODULE_ALIAS_TCP_ULP(name)                             \
        __MODULE_INFO(alias, alias_userspace, name);            \
similarity index 72%
rename from arch/riscv/include/asm/sifive_l2_cache.h
rename to include/soc/sifive/sifive_l2_cache.h
index 04f6748fc50b18874ccd0a25ae6eaa0d7ab92796..92ade10ed67e94bac8439512cbe3a628958e7b6b 100644 (file)
@@ -4,8 +4,8 @@
  *
  */
 
-#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
-#define _ASM_RISCV_SIFIVE_L2_CACHE_H
+#ifndef __SOC_SIFIVE_L2_CACHE_H
+#define __SOC_SIFIVE_L2_CACHE_H
 
 extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
 extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
@@ -13,4 +13,4 @@ extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
 #define SIFIVE_L2_ERR_TYPE_CE 0
 #define SIFIVE_L2_ERR_TYPE_UE 1
 
-#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
+#endif /* __SOC_SIFIVE_L2_CACHE_H */
index d5ec4fac82aea89dfe33d0ecc92c4f395d2b1dd9..564ba1b5cf57a1eeef66c8c5531f5d1809f13bbf 100644 (file)
@@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state,
 
 TRACE_EVENT(afs_lookup,
            TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
-                    struct afs_vnode *vnode),
+                    struct afs_fid *fid),
 
-           TP_ARGS(dvnode, name, vnode),
+           TP_ARGS(dvnode, name, fid),
 
            TP_STRUCT__entry(
                    __field_struct(struct afs_fid,      dfid            )
@@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup,
            TP_fast_assign(
                    int __len = min_t(int, name->len, 23);
                    __entry->dfid = dvnode->fid;
-                   if (vnode) {
-                           __entry->fid = vnode->fid;
-                   } else {
-                           __entry->fid.vid = 0;
-                           __entry->fid.vnode = 0;
-                           __entry->fid.unique = 0;
-                   }
+                   __entry->fid = *fid;
                    memcpy(__entry->name, name->name, __len);
                    __entry->name[__len] = 0;
                           ),
index dd4db334bd63717d571851441ecd47786fd260da..d82a0f4e824dd55e74a6791e81ee76a04c4c2d1a 100644 (file)
@@ -31,7 +31,8 @@
        EM( SCAN_ALLOC_HUGE_PAGE_FAIL,  "alloc_huge_page_failed")       \
        EM( SCAN_CGROUP_CHARGE_FAIL,    "ccgroup_charge_failed")        \
        EM( SCAN_EXCEED_SWAP_PTE,       "exceed_swap_pte")              \
-       EMe(SCAN_TRUNCATED,             "truncated")                    \
+       EM( SCAN_TRUNCATED,             "truncated")                    \
+       EMe(SCAN_PAGE_HAS_PRIVATE,      "page_has_private")             \
 
 #undef EM
 #undef EMe
index 54e61d456cdfaa06b332b03cd0639a27a8dd7384..112bd06487bfc2dc578eb0a4e609162ccfcbfeaf 100644 (file)
@@ -49,12 +49,6 @@ DEFINE_EVENT(dma_map, map_single,
        TP_ARGS(dev, dev_addr, phys_addr, size)
 );
 
-DEFINE_EVENT(dma_map, map_sg,
-       TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
-                size_t size),
-       TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
 DEFINE_EVENT(dma_map, bounce_map_single,
        TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
                 size_t size),
@@ -99,6 +93,48 @@ DEFINE_EVENT(dma_unmap, bounce_unmap_single,
        TP_ARGS(dev, dev_addr, size)
 );
 
+DECLARE_EVENT_CLASS(dma_map_sg,
+       TP_PROTO(struct device *dev, int index, int total,
+                struct scatterlist *sg),
+
+       TP_ARGS(dev, index, total, sg),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name(dev))
+               __field(dma_addr_t, dev_addr)
+               __field(phys_addr_t, phys_addr)
+               __field(size_t, size)
+               __field(int, index)
+               __field(int, total)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name(dev));
+               __entry->dev_addr = sg->dma_address;
+               __entry->phys_addr = sg_phys(sg);
+               __entry->size = sg->dma_length;
+               __entry->index = index;
+               __entry->total = total;
+       ),
+
+       TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
+                 __get_str(dev_name), __entry->index, __entry->total,
+                 (unsigned long long)__entry->dev_addr,
+                 (unsigned long long)__entry->phys_addr,
+                 __entry->size)
+);
+
+DEFINE_EVENT(dma_map_sg, map_sg,
+       TP_PROTO(struct device *dev, int index, int total,
+                struct scatterlist *sg),
+       TP_ARGS(dev, index, total, sg)
+);
+
+DEFINE_EVENT(dma_map_sg, bounce_map_sg,
+       TP_PROTO(struct device *dev, int index, int total,
+                struct scatterlist *sg),
+       TP_ARGS(dev, index, total, sg)
+);
 #endif /* _TRACE_INTEL_IOMMU_H */
 
 /* This part must be outside protection */
index 95fba0471e5bdc2cc43916b4c02a1b128f8ae1ef..3f249e150c0c3ec7133b93a07011dba2b736f43a 100644 (file)
@@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template,
        TP_ARGS(ip, parent_ip),
 
        TP_STRUCT__entry(
-               __field(u32, caller_offs)
-               __field(u32, parent_offs)
+               __field(s32, caller_offs)
+               __field(s32, parent_offs)
        ),
 
        TP_fast_assign(
-               __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
-               __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+               __entry->caller_offs = (s32)(ip - (unsigned long)_stext);
+               __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext);
        ),
 
        TP_printk("caller=%pS parent=%pS",
index f056b2a00d5c7695a69826786b5e2336c79c3c31..9a61c28ed3ae48cf2107ffc8b9ea0647e9ccb4f8 100644 (file)
@@ -34,6 +34,7 @@ struct input_event {
        __kernel_ulong_t __sec;
 #if defined(__sparc__) && defined(__arch64__)
        unsigned int __usec;
+       unsigned int __pad;
 #else
        __kernel_ulong_t __usec;
 #endif
index 409d3ad1e6e288e8192a6632655ee51d2906132c..1d0350e44ae34c57ae029de544a51c47d9fb0b95 100644 (file)
@@ -9,11 +9,11 @@
  * and the comment before kcov_remote_start() for usage details.
  */
 struct kcov_remote_arg {
-       unsigned int    trace_mode;     /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
-       unsigned int    area_size;      /* Length of coverage buffer in words */
-       unsigned int    num_handles;    /* Size of handles array */
-       __u64           common_handle;
-       __u64           handles[0];
+       __u32           trace_mode;     /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
+       __u32           area_size;      /* Length of coverage buffer in words */
+       __u32           num_handles;    /* Size of handles array */
+       __aligned_u64   common_handle;
+       __aligned_u64   handles[0];
 };
 
 #define KCOV_REMOTE_MAX_HANDLES                0x100
index 1ecfd43ed4643461af60bb47f5d4f61aad9b12af..da1bc0b60a7de3119113a8dace48c79928f5a927 100644 (file)
@@ -93,7 +93,6 @@
 #include <linux/rodata_test.h>
 #include <linux/jump_label.h>
 #include <linux/mem_encrypt.h>
-#include <linux/file.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -554,6 +553,7 @@ static void __init mm_init(void)
         * bigger than MAX_ORDER unless SPARSEMEM.
         */
        page_ext_init_flatmem();
+       init_debug_pagealloc();
        report_meminit();
        mem_init();
        kmem_cache_init();
@@ -1158,26 +1158,13 @@ static int __ref kernel_init(void *unused)
 
 void console_on_rootfs(void)
 {
-       struct file *file;
-       unsigned int i;
-
-       /* Open /dev/console in kernelspace, this should never fail */
-       file = filp_open("/dev/console", O_RDWR, 0);
-       if (IS_ERR(file))
-               goto err_out;
-
-       /* create stdin/stdout/stderr, this should never fail */
-       for (i = 0; i < 3; i++) {
-               if (f_dupfd(i, file, 0) != i)
-                       goto err_out;
-       }
-
-       return;
+       /* Open the /dev/console as stdin, this should never fail */
+       if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+               pr_err("Warning: unable to open an initial console.\n");
 
-err_out:
-       /* no panic -- this might not be fatal */
-       pr_err("Warning: unable to open an initial console.\n");
-       return;
+       /* create stdout/stderr */
+       (void) ksys_dup(0);
+       (void) ksys_dup(0);
 }
 
 static noinline void __init kernel_init_freeable(void)
index 4fb20ab179feea2dbd155caa386389ed6144c129..9e43b72eb619c06fbf8481b9de71e69ed2ecc158 100644 (file)
@@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
  */
 static void cgroup_bpf_release(struct work_struct *work)
 {
-       struct cgroup *cgrp = container_of(work, struct cgroup,
-                                          bpf.release_work);
+       struct cgroup *p, *cgrp = container_of(work, struct cgroup,
+                                              bpf.release_work);
        enum bpf_cgroup_storage_type stype;
        struct bpf_prog_array *old_array;
        unsigned int type;
@@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work)
 
        mutex_unlock(&cgroup_mutex);
 
+       for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+               cgroup_bpf_put(p);
+
        percpu_ref_exit(&cgrp->bpf.refcnt);
        cgroup_put(cgrp);
 }
@@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
  */
 #define        NR ARRAY_SIZE(cgrp->bpf.effective)
        struct bpf_prog_array *arrays[NR] = {};
+       struct cgroup *p;
        int ret, i;
 
        ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
@@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
        if (ret)
                return ret;
 
+       for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+               cgroup_bpf_get(p);
+
        for (i = 0; i < NR; i++)
                INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
 
index ca52b9642943ffe6c4567539df95e810437f35e1..d4f335a9a89989f48e06588d1ff513d5bb30cf86 100644 (file)
@@ -44,14 +44,19 @@ struct tnum tnum_rshift(struct tnum a, u8 shift)
        return TNUM(a.value >> shift, a.mask >> shift);
 }
 
-struct tnum tnum_arshift(struct tnum a, u8 min_shift)
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness)
 {
        /* if a.value is negative, arithmetic shifting by minimum shift
         * will have larger negative offset compared to more shifting.
         * If a.value is nonnegative, arithmetic shifting by minimum shift
         * will have larger positive offset compare to more shifting.
         */
-       return TNUM((s64)a.value >> min_shift, (s64)a.mask >> min_shift);
+       if (insn_bitness == 32)
+               return TNUM((u32)(((s32)a.value) >> min_shift),
+                           (u32)(((s32)a.mask)  >> min_shift));
+       else
+               return TNUM((s64)a.value >> min_shift,
+                           (s64)a.mask  >> min_shift);
 }
 
 struct tnum tnum_add(struct tnum a, struct tnum b)
index 4983940cbdca7d89fdc6bafe757b87f07b634b0d..7d530ce8719d8d06efc9498f70e13f35c700ccd9 100644 (file)
@@ -907,7 +907,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
        BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 };
 
-static void __mark_reg_not_init(struct bpf_reg_state *reg);
+static void __mark_reg_not_init(const struct bpf_verifier_env *env,
+                               struct bpf_reg_state *reg);
 
 /* Mark the unknown part of a register (variable offset or scalar value) as
  * known to have the value @imm.
@@ -945,7 +946,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
                verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
                /* Something bad happened, let's kill all regs */
                for (regno = 0; regno < MAX_BPF_REG; regno++)
-                       __mark_reg_not_init(regs + regno);
+                       __mark_reg_not_init(env, regs + regno);
                return;
        }
        __mark_reg_known_zero(regs + regno);
@@ -1054,7 +1055,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
 }
 
 /* Mark a register as having a completely unknown (scalar) value. */
-static void __mark_reg_unknown(struct bpf_reg_state *reg)
+static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+                              struct bpf_reg_state *reg)
 {
        /*
         * Clear type, id, off, and union(map_ptr, range) and
@@ -1064,6 +1066,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
        reg->type = SCALAR_VALUE;
        reg->var_off = tnum_unknown;
        reg->frameno = 0;
+       reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
+                      true : false;
        __mark_reg_unbounded(reg);
 }
 
@@ -1074,19 +1078,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
                verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
                /* Something bad happened, let's kill all regs except FP */
                for (regno = 0; regno < BPF_REG_FP; regno++)
-                       __mark_reg_not_init(regs + regno);
+                       __mark_reg_not_init(env, regs + regno);
                return;
        }
-       regs += regno;
-       __mark_reg_unknown(regs);
-       /* constant backtracking is enabled for root without bpf2bpf calls */
-       regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
-                       true : false;
+       __mark_reg_unknown(env, regs + regno);
 }
 
-static void __mark_reg_not_init(struct bpf_reg_state *reg)
+static void __mark_reg_not_init(const struct bpf_verifier_env *env,
+                               struct bpf_reg_state *reg)
 {
-       __mark_reg_unknown(reg);
+       __mark_reg_unknown(env, reg);
        reg->type = NOT_INIT;
 }
 
@@ -1097,10 +1098,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
                verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
                /* Something bad happened, let's kill all regs except FP */
                for (regno = 0; regno < BPF_REG_FP; regno++)
-                       __mark_reg_not_init(regs + regno);
+                       __mark_reg_not_init(env, regs + regno);
                return;
        }
-       __mark_reg_not_init(regs + regno);
+       __mark_reg_not_init(env, regs + regno);
 }
 
 #define DEF_NOT_SUBREG (0)
@@ -3234,7 +3235,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                }
                if (state->stack[spi].slot_type[0] == STACK_SPILL &&
                    state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
-                       __mark_reg_unknown(&state->stack[spi].spilled_ptr);
+                       __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
                        for (j = 0; j < BPF_REG_SIZE; j++)
                                state->stack[spi].slot_type[j] = STACK_MISC;
                        goto mark;
@@ -3892,7 +3893,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
                if (!reg)
                        continue;
                if (reg_is_pkt_pointer_any(reg))
-                       __mark_reg_unknown(reg);
+                       __mark_reg_unknown(env, reg);
        }
 }
 
@@ -3920,7 +3921,7 @@ static void release_reg_references(struct bpf_verifier_env *env,
                if (!reg)
                        continue;
                if (reg->ref_obj_id == ref_obj_id)
-                       __mark_reg_unknown(reg);
+                       __mark_reg_unknown(env, reg);
        }
 }
 
@@ -4582,7 +4583,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                /* Taint dst register if offset had invalid bounds derived from
                 * e.g. dead branches.
                 */
-               __mark_reg_unknown(dst_reg);
+               __mark_reg_unknown(env, dst_reg);
                return 0;
        }
 
@@ -4834,13 +4835,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                /* Taint dst register if offset had invalid bounds derived from
                 * e.g. dead branches.
                 */
-               __mark_reg_unknown(dst_reg);
+               __mark_reg_unknown(env, dst_reg);
                return 0;
        }
 
        if (!src_known &&
            opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
-               __mark_reg_unknown(dst_reg);
+               __mark_reg_unknown(env, dst_reg);
                return 0;
        }
 
@@ -5048,9 +5049,16 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                /* Upon reaching here, src_known is true and
                 * umax_val is equal to umin_val.
                 */
-               dst_reg->smin_value >>= umin_val;
-               dst_reg->smax_value >>= umin_val;
-               dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
+               if (insn_bitness == 32) {
+                       dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
+                       dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
+               } else {
+                       dst_reg->smin_value >>= umin_val;
+                       dst_reg->smax_value >>= umin_val;
+               }
+
+               dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
+                                               insn_bitness);
 
                /* blow away the dst_reg umin_value/umax_value and rely on
                 * dst_reg var_off to refine the result.
@@ -6263,6 +6271,7 @@ static bool may_access_skb(enum bpf_prog_type type)
 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
 {
        struct bpf_reg_state *regs = cur_regs(env);
+       static const int ctx_reg = BPF_REG_6;
        u8 mode = BPF_MODE(insn->code);
        int i, err;
 
@@ -6296,7 +6305,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
        }
 
        /* check whether implicit source operand (register R6) is readable */
-       err = check_reg_arg(env, BPF_REG_6, SRC_OP);
+       err = check_reg_arg(env, ctx_reg, SRC_OP);
        if (err)
                return err;
 
@@ -6315,7 +6324,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
                return -EINVAL;
        }
 
-       if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+       if (regs[ctx_reg].type != PTR_TO_CTX) {
                verbose(env,
                        "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
                return -EINVAL;
@@ -6328,6 +6337,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
                        return err;
        }
 
+       err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
+       if (err < 0)
+               return err;
+
        /* reset caller saved regs to unreadable */
        for (i = 0; i < CALLER_SAVED_REGS; i++) {
                mark_reg_not_init(env, regs, caller_saved[i]);
@@ -6982,7 +6995,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
                        /* since the register is unused, clear its state
                         * to make further comparison simpler
                         */
-                       __mark_reg_not_init(&st->regs[i]);
+                       __mark_reg_not_init(env, &st->regs[i]);
        }
 
        for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
@@ -6990,7 +7003,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
                /* liveness must not touch this stack slot anymore */
                st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
                if (!(live & REG_LIVE_READ)) {
-                       __mark_reg_not_init(&st->stack[i].spilled_ptr);
+                       __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
                        for (j = 0; j < BPF_REG_SIZE; j++)
                                st->stack[i].slot_type[j] = STACK_INVALID;
                }
index a59cc980adadb56cbe04ba842ed80c34289748c1..4dc279ed3b2d72294255acf9a5f307630ea9a531 100644 (file)
@@ -1909,6 +1909,78 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
 
+#ifdef CONFIG_HOTPLUG_SMT
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = true;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static void cpuhp_online_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = false;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
+int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       for_each_online_cpu(cpu) {
+               if (topology_is_primary_thread(cpu))
+                       continue;
+               ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+               if (ret)
+                       break;
+               /*
+                * As this needs to hold the cpu maps lock it's impossible
+                * to call device_offline() because that ends up calling
+                * cpu_down() which takes cpu maps lock. cpu maps lock
+                * needs to be held as this might race against in kernel
+                * abusers of the hotplug machinery (thermal management).
+                *
+                * So nothing would update device:offline state. That would
+                * leave the sysfs entry stale and prevent onlining after
+                * smt control has been changed to 'off' again. This is
+                * called under the sysfs hotplug lock, so it is properly
+                * serialized against the regular offline usage.
+                */
+               cpuhp_offline_cpu_device(cpu);
+       }
+       if (!ret)
+               cpu_smt_control = ctrlval;
+       cpu_maps_update_done();
+       return ret;
+}
+
+int cpuhp_smt_enable(void)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       cpu_smt_control = CPU_SMT_ENABLED;
+       for_each_present_cpu(cpu) {
+               /* Skip online CPUs and CPUs on offline nodes */
+               if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+                       continue;
+               ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+               if (ret)
+                       break;
+               /* See comment in cpuhp_smt_disable() */
+               cpuhp_online_cpu_device(cpu);
+       }
+       cpu_maps_update_done();
+       return ret;
+}
+#endif
+
 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
 static ssize_t show_cpuhp_state(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -2063,77 +2135,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
 
 #ifdef CONFIG_HOTPLUG_SMT
 
-static void cpuhp_offline_cpu_device(unsigned int cpu)
-{
-       struct device *dev = get_cpu_device(cpu);
-
-       dev->offline = true;
-       /* Tell user space about the state change */
-       kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
-}
-
-static void cpuhp_online_cpu_device(unsigned int cpu)
-{
-       struct device *dev = get_cpu_device(cpu);
-
-       dev->offline = false;
-       /* Tell user space about the state change */
-       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
-}
-
-int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
-{
-       int cpu, ret = 0;
-
-       cpu_maps_update_begin();
-       for_each_online_cpu(cpu) {
-               if (topology_is_primary_thread(cpu))
-                       continue;
-               ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
-               if (ret)
-                       break;
-               /*
-                * As this needs to hold the cpu maps lock it's impossible
-                * to call device_offline() because that ends up calling
-                * cpu_down() which takes cpu maps lock. cpu maps lock
-                * needs to be held as this might race against in kernel
-                * abusers of the hotplug machinery (thermal management).
-                *
-                * So nothing would update device:offline state. That would
-                * leave the sysfs entry stale and prevent onlining after
-                * smt control has been changed to 'off' again. This is
-                * called under the sysfs hotplug lock, so it is properly
-                * serialized against the regular offline usage.
-                */
-               cpuhp_offline_cpu_device(cpu);
-       }
-       if (!ret)
-               cpu_smt_control = ctrlval;
-       cpu_maps_update_done();
-       return ret;
-}
-
-int cpuhp_smt_enable(void)
-{
-       int cpu, ret = 0;
-
-       cpu_maps_update_begin();
-       cpu_smt_control = CPU_SMT_ENABLED;
-       for_each_present_cpu(cpu) {
-               /* Skip online CPUs and CPUs on offline nodes */
-               if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
-                       continue;
-               ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
-               if (ret)
-                       break;
-               /* See comment in cpuhp_smt_disable() */
-               cpuhp_online_cpu_device(cpu);
-       }
-       cpu_maps_update_done();
-       return ret;
-}
-
-
 static ssize_t
 __store_smt_control(struct device *dev, struct device_attribute *attr,
                    const char *buf, size_t count)
index c0a4c12d38b20ed80254c76acb28c6d060b1df86..809a985b17934ae7e01a63621c5ca49ce03577ad 100644 (file)
@@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk)
        put_cred(cred);
 
 #ifdef CONFIG_KEYS_REQUEST_CACHE
-       key_put(current->cached_requested_key);
-       current->cached_requested_key = NULL;
+       key_put(tsk->cached_requested_key);
+       tsk->cached_requested_key = NULL;
 #endif
 }
 
@@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void)
        new->magic = CRED_MAGIC;
 #endif
 
-       if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+       if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
        return new;
@@ -282,7 +282,7 @@ struct cred *prepare_creds(void)
        new->security = NULL;
 #endif
 
-       if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
        validate_creds(new);
        return new;
@@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
 #ifdef CONFIG_SECURITY
        new->security = NULL;
 #endif
-       if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
        put_cred(old);
index a1f8bde19b561a03ff09cd817d4013512d7f5383..2173c23c25b4fcef30b27059a1c799a697d217b1 100644 (file)
@@ -11465,8 +11465,10 @@ SYSCALL_DEFINE5(perf_event_open,
                }
        }
 
-       if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader))
+       if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
+               err = -EINVAL;
                goto err_locked;
+       }
 
        /*
         * Must be under the same ctx::mutex as perf_install_in_context(),
index bcbd59888e67bbd516767cb485c1d63dd1bb7075..2833ffb0c2110c482010954e73ed836395eaa6a6 100644 (file)
@@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father,
        }
 
        write_unlock_irq(&tasklist_lock);
-       if (unlikely(pid_ns == &init_pid_ns)) {
-               panic("Attempted to kill init! exitcode=0x%08x\n",
-                       father->signal->group_exit_code ?: father->exit_code);
-       }
 
        list_for_each_entry_safe(p, n, dead, ptrace_entry) {
                list_del_init(&p->ptrace_entry);
@@ -766,6 +762,14 @@ void __noreturn do_exit(long code)
        acct_update_integrals(tsk);
        group_dead = atomic_dec_and_test(&tsk->signal->live);
        if (group_dead) {
+               /*
+                * If the last thread of global init has exited, panic
+                * immediately to get a useable coredump.
+                */
+               if (unlikely(is_global_init(tsk)))
+                       panic("Attempted to kill init! exitcode=0x%08x\n",
+                               tsk->signal->group_exit_code ?: (int)code);
+
 #ifdef CONFIG_POSIX_TIMERS
                hrtimer_cancel(&tsk->signal->real_timer);
                exit_itimers(tsk->signal);
index 2508a4f238a3f3b2e41dd78502a5a653f57c221b..080809560072550279595ae05567b4a64cced9d7 100644 (file)
@@ -2578,6 +2578,16 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 #endif
 
 #ifdef __ARCH_WANT_SYS_CLONE3
+
+/*
+ * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from
+ * the registers containing the syscall arguments for clone. This doesn't work
+ * with clone3 since the TLS value is passed in clone_args instead.
+ */
+#ifndef CONFIG_HAVE_COPY_THREAD_TLS
+#error clone3 requires copy_thread_tls support in arch
+#endif
+
 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
                                              struct clone_args __user *uargs,
                                              size_t usize)
index 03c518e9747e579869e13b7e2bc91ad83e3e8da7..0cf84c8664f207c574325b899ef2e57f01295a94 100644 (file)
@@ -1178,6 +1178,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
 
 /**
  * wait_for_owner_exiting - Block until the owner has exited
+ * @ret: owner's current futex lock status
  * @exiting:   Pointer to the exiting task
  *
  * Caller must hold a refcount on @exiting.
index 32282e7112d3990d8bf18ea172f66439f9c445d4..32406ef0d6a2d28e74122e2b16968f758275590f 100644 (file)
@@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void)
        struct lock_trace *trace, *t2;
        struct hlist_head *hash_head;
        u32 hash;
-       unsigned int max_entries;
+       int max_entries;
 
        BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
        BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
@@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void)
        trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
        max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
                LOCK_TRACE_SIZE_IN_LONGS;
-       trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
 
-       if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES -
-           LOCK_TRACE_SIZE_IN_LONGS - 1) {
+       if (max_entries <= 0) {
                if (!debug_locks_off_graph_unlock())
                        return NULL;
 
@@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void)
 
                return NULL;
        }
+       trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
 
        hash = jhash(trace->entries, trace->nr_entries *
                     sizeof(trace->entries[0]), 0);
index 44e68761f43213144a61b0cd2b489cfd1b8358bd..0d9b6be9ecc886cdae8067f479a6cd915929fef6 100644 (file)
@@ -1226,8 +1226,8 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
                 * In this case, we attempt to acquire the lock again
                 * without sleeping.
                 */
-               if ((wstate == WRITER_HANDOFF) &&
-                   (rwsem_spin_on_owner(sem, 0) == OWNER_NULL))
+               if (wstate == WRITER_HANDOFF &&
+                   rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
                        goto trylock_again;
 
                /* Block until there are no active lockers. */
index cb9ddcc08119added2355311507c65db3849b0c4..43d6179508d645e5decfbf24dde54ed1815df470 100644 (file)
@@ -264,12 +264,17 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
        return ret;
 }
 
-static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
+                          unsigned int mode)
 {
+       int ret;
+
        if (mode & PTRACE_MODE_NOAUDIT)
-               return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
+               ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
        else
-               return has_ns_capability(current, ns, CAP_SYS_PTRACE);
+               ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
+
+       return ret == 0;
 }
 
 /* Returns 0 on success, -errno on denial. */
@@ -321,7 +326,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
            gid_eq(caller_gid, tcred->sgid) &&
            gid_eq(caller_gid, tcred->gid))
                goto ok;
-       if (ptrace_has_cap(tcred->user_ns, mode))
+       if (ptrace_has_cap(cred, tcred->user_ns, mode))
                goto ok;
        rcu_read_unlock();
        return -EPERM;
@@ -340,7 +345,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
        mm = task->mm;
        if (mm &&
            ((get_dumpable(mm) != SUID_DUMP_USER) &&
-            !ptrace_has_cap(mm->user_ns, mode)))
+            !ptrace_has_cap(cred, mm->user_ns, mode)))
            return -EPERM;
 
        return security_ptrace_access_check(task, mode);
index 27c48eb7de4025f94653c86878652ad57e660427..a4f86a9d6937cdfa2f13d1dcc9be863c1943d06f 100644 (file)
@@ -310,6 +310,8 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
        int ret;
 
        if (flags & RSEQ_FLAG_UNREGISTER) {
+               if (flags & ~RSEQ_FLAG_UNREGISTER)
+                       return -EINVAL;
                /* Unregister rseq for current thread. */
                if (current->rseq != rseq || !current->rseq)
                        return -EINVAL;
index 12d2227e5786794260a7018a9141f083b575f732..b6ea3dcb57bfefa47233ada64d74f822b6666ab6 100644 (file)
@@ -1026,6 +1026,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter,
        struct seccomp_notif unotif;
        ssize_t ret;
 
+       /* Verify that we're not given garbage to keep struct extensible. */
+       ret = check_zeroed_user(buf, sizeof(unotif));
+       if (ret < 0)
+               return ret;
+       if (!ret)
+               return -EINVAL;
+
        memset(&unotif, 0, sizeof(unotif));
 
        ret = down_interruptible(&filter->notif->request);
index 13a0f2e6ebc2c5de1d1a3c00a780ef9588d4cfb7..e2ac0e37c4ae7bac1b574e8b660a1203d01a75fd 100644 (file)
@@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
        struct signal_struct *sig = tsk->signal;
-       struct taskstats *stats;
+       struct taskstats *stats_new, *stats;
 
-       if (sig->stats || thread_group_empty(tsk))
-               goto ret;
+       /* Pairs with smp_store_release() below. */
+       stats = smp_load_acquire(&sig->stats);
+       if (stats || thread_group_empty(tsk))
+               return stats;
 
        /* No problem if kmem_cache_zalloc() fails */
-       stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+       stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
 
        spin_lock_irq(&tsk->sighand->siglock);
-       if (!sig->stats) {
-               sig->stats = stats;
-               stats = NULL;
+       stats = sig->stats;
+       if (!stats) {
+               /*
+                * Pairs with smp_store_release() above and order the
+                * kmem_cache_zalloc().
+                */
+               smp_store_release(&sig->stats, stats_new);
+               stats = stats_new;
+               stats_new = NULL;
        }
        spin_unlock_irq(&tsk->sighand->siglock);
 
-       if (stats)
-               kmem_cache_free(taskstats_cache, stats);
-ret:
-       return sig->stats;
+       if (stats_new)
+               kmem_cache_free(taskstats_cache, stats_new);
+
+       return stats;
 }
 
 /* Send pid data out on exit */
index ec960bb939fdf9770b9f4b92ee87e0f1992a1329..200fb2d3be99f25358ea4fd13d3d285baf4865d8 100644 (file)
@@ -14,8 +14,6 @@
 
 #include "posix-timers.h"
 
-static void delete_clock(struct kref *kref);
-
 /*
  * Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
  */
@@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
                err = 0;
 
        if (!err) {
-               kref_get(&clk->kref);
+               get_device(clk->dev);
                fp->private_data = clk;
        }
 out:
@@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
        if (clk->ops.release)
                err = clk->ops.release(clk);
 
-       kref_put(&clk->kref, delete_clock);
+       put_device(clk->dev);
 
        fp->private_data = NULL;
 
@@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = {
 #endif
 };
 
-int posix_clock_register(struct posix_clock *clk, dev_t devid)
+int posix_clock_register(struct posix_clock *clk, struct device *dev)
 {
        int err;
 
-       kref_init(&clk->kref);
        init_rwsem(&clk->rwsem);
 
        cdev_init(&clk->cdev, &posix_clock_file_operations);
+       err = cdev_device_add(&clk->cdev, dev);
+       if (err) {
+               pr_err("%s unable to add device %d:%d\n",
+                       dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+               return err;
+       }
        clk->cdev.owner = clk->ops.owner;
-       err = cdev_add(&clk->cdev, devid, 1);
+       clk->dev = dev;
 
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(posix_clock_register);
 
-static void delete_clock(struct kref *kref)
-{
-       struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-
-       if (clk->release)
-               clk->release(clk);
-}
-
 void posix_clock_unregister(struct posix_clock *clk)
 {
-       cdev_del(&clk->cdev);
+       cdev_device_del(&clk->cdev, clk->dev);
 
        down_write(&clk->rwsem);
        clk->zombie = true;
        up_write(&clk->rwsem);
 
-       kref_put(&clk->kref, delete_clock);
+       put_device(clk->dev);
 }
 EXPORT_SYMBOL_GPL(posix_clock_unregister);
 
index 67df65f887ac872130b4038f1e4be8d799f097e0..20c65a7d4e3a5190482c5aeff6e408907f71a7b6 100644 (file)
@@ -151,6 +151,9 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
 
 #ifdef CONFIG_COMPAT
 COMPAT_SYS_NI(timer_create);
+#endif
+
+#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
 COMPAT_SYS_NI(getitimer);
 COMPAT_SYS_NI(setitimer);
 #endif
index 8b192e67aabc9d16d8a7b08c0356641b5462c783..a792d21cac645c63468f605207c0c5fd7b356242 100644 (file)
@@ -58,8 +58,9 @@ static void tick_do_update_jiffies64(ktime_t now)
 
        /*
         * Do a quick check without holding jiffies_lock:
+        * The READ_ONCE() pairs with two updates done later in this function.
         */
-       delta = ktime_sub(now, last_jiffies_update);
+       delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
        if (delta < tick_period)
                return;
 
@@ -70,8 +71,9 @@ static void tick_do_update_jiffies64(ktime_t now)
        if (delta >= tick_period) {
 
                delta = ktime_sub(delta, tick_period);
-               last_jiffies_update = ktime_add(last_jiffies_update,
-                                               tick_period);
+               /* Pairs with the lockless read in this function. */
+               WRITE_ONCE(last_jiffies_update,
+                          ktime_add(last_jiffies_update, tick_period));
 
                /* Slow path for long timeouts */
                if (unlikely(delta >= tick_period)) {
@@ -79,8 +81,10 @@ static void tick_do_update_jiffies64(ktime_t now)
 
                        ticks = ktime_divns(delta, incr);
 
-                       last_jiffies_update = ktime_add_ns(last_jiffies_update,
-                                                          incr * ticks);
+                       /* Pairs with the lockless read in this function. */
+                       WRITE_ONCE(last_jiffies_update,
+                                  ktime_add_ns(last_jiffies_update,
+                                               incr * ticks));
                }
                do_timer(++ticks);
 
index a2659735db73b99623e1a53d0b2a1f11843c4577..1af321dec0f19624f5a35a44eac0fe186dd9d776 100644 (file)
@@ -96,6 +96,20 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
        return 0;
 }
 
+/*
+ * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
+ * functions. But those archs currently don't support direct functions
+ * anyway, and ftrace_find_rec_direct() is just a stub for them.
+ * Define MCOUNT_INSN_SIZE to keep those archs compiling.
+ */
+#ifndef MCOUNT_INSN_SIZE
+/* Make sure this only works without direct calls */
+# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+#  error MCOUNT_INSN_SIZE not defined with direct calls enabled
+# endif
+# define MCOUNT_INSN_SIZE 0
+#endif
+
 int function_graph_enter(unsigned long ret, unsigned long func,
                         unsigned long frame_pointer, unsigned long *retp)
 {
index ac99a3500076ab9f8de3609f3ab8aeff4b19c344..9bf1f2cd515ef39dd962196f473890da7b80d99e 100644 (file)
@@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v)
        }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       avg = rec->time;
-       do_div(avg, rec->counter);
+       avg = div64_ul(rec->time, rec->counter);
        if (tracing_thresh && (avg < tracing_thresh))
                goto out;
 #endif
@@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v)
                 * Divide only 1000 for ns^2 -> us^2 conversion.
                 * trace_print_graph_duration will divide 1000 again.
                 */
-               do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+               stddev = div64_ul(stddev,
+                                 rec->counter * (rec->counter - 1) * 1000);
        }
 
        trace_seq_init(&s);
index d45079ee62f8d9a75e53c616dd8b498d9ecbdd11..22bcf7c51d1ee5cf9407aed3a2f47531780335a0 100644 (file)
@@ -195,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
        unsigned long irq_flags;
        void *entry = NULL;
        int entry_size;
-       u64 val;
+       u64 val = 0;
        int len;
 
        entry = trace_alloc_entry(call, &entry_size);
index 5e43b9664ecabb366dc8e81faa4d09d962084baa..617e297f46dcc78aa0c310c3e7d288159eaa0ab2 100644 (file)
@@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
        if (ret) {
                pr_info("wakeup trace: Couldn't activate tracepoint"
                        " probe to kernel_sched_migrate_task\n");
-               return;
+               goto fail_deprobe_sched_switch;
        }
 
        wakeup_reset(tr);
@@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
                printk(KERN_ERR "failed to start wakeup tracer\n");
 
        return;
+fail_deprobe_sched_switch:
+       unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 fail_deprobe_wake_new:
        unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
 fail_deprobe:
index 344e4c1aa09ccf6bc69a17da761a45290681407c..87de6edafd147e1c68aa4e485719202926adc45f 100644 (file)
@@ -381,7 +381,7 @@ int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
                       int prefix_type, int rowsize, int groupsize,
                       const void *buf, size_t len, bool ascii)
 {
-               unsigned int save_len = s->seq.len;
+       unsigned int save_len = s->seq.len;
 
        if (s->full)
                return 0;
index 4df9a209f7caf2aaf213c7a45e317dd0c79338f2..c557f42a93971a96512fb90bb4e7be2f0e550b2a 100644 (file)
@@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack)
        local_irq_restore(flags);
 }
 
+/* Some archs may not define MCOUNT_INSN_SIZE */
+#ifndef MCOUNT_INSN_SIZE
+# define MCOUNT_INSN_SIZE 0
+#endif
+
 static void
 stack_trace_call(unsigned long ip, unsigned long parent_ip,
                 struct ftrace_ops *op, struct pt_regs *pt_regs)
index 9ecfd3b547bae2024fa54d9d5635177b9c36ae02..42bd8ab955fa4f85beb3509073fa26c380a96b63 100644 (file)
@@ -221,6 +221,7 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
        return 0;
 }
 
+static __maybe_unused
 int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
 {
        int ret = __cvdso_clock_getres_common(clock, res);
index 7dd602d7f8db7be0f698021b01d8fc9810d25e5b..ad9d5b1c44739755146c17f8e5a5f6ff4659bc50 100644 (file)
@@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
        unsigned long i, nr_pages, addr, next;
        int nr;
        struct page **pages;
+       int ret = 0;
 
        if (gup->size > ULONG_MAX)
                return -EINVAL;
@@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
                                            NULL);
                        break;
                default:
-                       return -1;
+                       kvfree(pages);
+                       ret = -EINVAL;
+                       goto out;
                }
 
                if (nr <= 0)
@@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
        gup->put_delta_usec = ktime_us_delta(end_time, start_time);
 
        kvfree(pages);
-       return 0;
+out:
+       return ret;
 }
 
 static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
index 41a0fbddc96bb27a0717e4033bbbaa511aac4899..a880932136740ddbc6bbf1b4b5c04bfd74f9c8ef 100644 (file)
@@ -527,13 +527,13 @@ void prep_transhuge_page(struct page *page)
        set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
 }
 
-static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
+static unsigned long __thp_get_unmapped_area(struct file *filp,
+               unsigned long addr, unsigned long len,
                loff_t off, unsigned long flags, unsigned long size)
 {
-       unsigned long addr;
        loff_t off_end = off + len;
        loff_t off_align = round_up(off, size);
-       unsigned long len_pad;
+       unsigned long len_pad, ret;
 
        if (off_end <= off_align || (off_end - off_align) < size)
                return 0;
@@ -542,30 +542,40 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long le
        if (len_pad < len || (off + len_pad) < off)
                return 0;
 
-       addr = current->mm->get_unmapped_area(filp, 0, len_pad,
+       ret = current->mm->get_unmapped_area(filp, addr, len_pad,
                                              off >> PAGE_SHIFT, flags);
-       if (IS_ERR_VALUE(addr))
+
+       /*
+        * The failure might be due to length padding. The caller will retry
+        * without the padding.
+        */
+       if (IS_ERR_VALUE(ret))
                return 0;
 
-       addr += (off - addr) & (size - 1);
-       return addr;
+       /*
+        * Do not try to align to THP boundary if allocation at the address
+        * hint succeeds.
+        */
+       if (ret == addr)
+               return addr;
+
+       ret += (off - ret) & (size - 1);
+       return ret;
 }
 
 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
+       unsigned long ret;
        loff_t off = (loff_t)pgoff << PAGE_SHIFT;
 
-       if (addr)
-               goto out;
        if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
                goto out;
 
-       addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
-       if (addr)
-               return addr;
-
- out:
+       ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
+       if (ret)
+               return ret;
+out:
        return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
index ac65bb5e38ac267dec41c3e09bdaa53501da4c9a..dd8737a94bec42c8458e8d7c6201cf24d163239c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/swapops.h>
 #include <linux/jhash.h>
 #include <linux/numa.h>
+#include <linux/llist.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -1136,7 +1137,7 @@ static inline void ClearPageHugeTemporary(struct page *page)
        page[2].mapping = NULL;
 }
 
-void free_huge_page(struct page *page)
+static void __free_huge_page(struct page *page)
 {
        /*
         * Can't pass hstate in here because it is called from the
@@ -1199,6 +1200,54 @@ void free_huge_page(struct page *page)
        spin_unlock(&hugetlb_lock);
 }
 
+/*
+ * As free_huge_page() can be called from a non-task context, we have
+ * to defer the actual freeing in a workqueue to prevent potential
+ * hugetlb_lock deadlock.
+ *
+ * free_hpage_workfn() locklessly retrieves the linked list of pages to
+ * be freed and frees them one-by-one. As the page->mapping pointer is
+ * going to be cleared in __free_huge_page() anyway, it is reused as the
+ * llist_node structure of a lockless linked list of huge pages to be freed.
+ */
+static LLIST_HEAD(hpage_freelist);
+
+static void free_hpage_workfn(struct work_struct *work)
+{
+       struct llist_node *node;
+       struct page *page;
+
+       node = llist_del_all(&hpage_freelist);
+
+       while (node) {
+               page = container_of((struct address_space **)node,
+                                    struct page, mapping);
+               node = node->next;
+               __free_huge_page(page);
+       }
+}
+static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
+
+void free_huge_page(struct page *page)
+{
+       /*
+        * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
+        */
+       if (!in_task()) {
+               /*
+                * Only call schedule_work() if hpage_freelist is previously
+                * empty. Otherwise, schedule_work() had been called but the
+                * workfn hasn't retrieved the list yet.
+                */
+               if (llist_add((struct llist_node *)&page->mapping,
+                             &hpage_freelist))
+                       schedule_work(&free_hpage_work);
+               return;
+       }
+
+       __free_huge_page(page);
+}
+
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        INIT_LIST_HEAD(&page->lru);
index c5b5f74cfd4debb16651552d29a5dfd688c87093..6c83cf4ed970b90e46b29d56a75ba20de4d580b9 100644 (file)
@@ -3287,49 +3287,34 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
        }
 }
 
-static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
+static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
 {
-       unsigned long stat[MEMCG_NR_STAT];
+       unsigned long stat[MEMCG_NR_STAT] = {0};
        struct mem_cgroup *mi;
        int node, cpu, i;
-       int min_idx, max_idx;
-
-       if (slab_only) {
-               min_idx = NR_SLAB_RECLAIMABLE;
-               max_idx = NR_SLAB_UNRECLAIMABLE;
-       } else {
-               min_idx = 0;
-               max_idx = MEMCG_NR_STAT;
-       }
-
-       for (i = min_idx; i < max_idx; i++)
-               stat[i] = 0;
 
        for_each_online_cpu(cpu)
-               for (i = min_idx; i < max_idx; i++)
+               for (i = 0; i < MEMCG_NR_STAT; i++)
                        stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
 
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
-               for (i = min_idx; i < max_idx; i++)
+               for (i = 0; i < MEMCG_NR_STAT; i++)
                        atomic_long_add(stat[i], &mi->vmstats[i]);
 
-       if (!slab_only)
-               max_idx = NR_VM_NODE_STAT_ITEMS;
-
        for_each_node(node) {
                struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
                struct mem_cgroup_per_node *pi;
 
-               for (i = min_idx; i < max_idx; i++)
+               for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
                        stat[i] = 0;
 
                for_each_online_cpu(cpu)
-                       for (i = min_idx; i < max_idx; i++)
+                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
                                stat[i] += per_cpu(
                                        pn->lruvec_stat_cpu->count[i], cpu);
 
                for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
-                       for (i = min_idx; i < max_idx; i++)
+                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
                                atomic_long_add(stat[i], &pi->lruvec_stat[i]);
        }
 }
@@ -3403,13 +3388,9 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
                parent = root_mem_cgroup;
 
        /*
-        * Deactivate and reparent kmem_caches. Then flush percpu
-        * slab statistics to have precise values at the parent and
-        * all ancestor levels. It's required to keep slab stats
-        * accurate after the reparenting of kmem_caches.
+        * Deactivate and reparent kmem_caches.
         */
        memcg_deactivate_kmem_caches(memcg, parent);
-       memcg_flush_percpu_vmstats(memcg, true);
 
        kmemcg_id = memcg->kmemcg_id;
        BUG_ON(kmemcg_id < 0);
@@ -4913,7 +4894,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
         * Flush percpu vmstats and vmevents to guarantee the value correctness
         * on parent's and all ancestor levels.
         */
-       memcg_flush_percpu_vmstats(memcg, false);
+       memcg_flush_percpu_vmstats(memcg);
        memcg_flush_percpu_vmevents(memcg);
        __mem_cgroup_free(memcg);
 }
index 55ac23ef11c1cbf1e13f187b5c850d4075128ad5..a91a072f2b2ce6baf432f09c3cfe74dbaec0033b 100644 (file)
@@ -483,8 +483,9 @@ static void update_pgdat_span(struct pglist_data *pgdat)
        pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
 }
 
-static void __remove_zone(struct zone *zone, unsigned long start_pfn,
-               unsigned long nr_pages)
+void __ref remove_pfn_range_from_zone(struct zone *zone,
+                                     unsigned long start_pfn,
+                                     unsigned long nr_pages)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        unsigned long flags;
@@ -499,28 +500,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
                return;
 #endif
 
+       clear_zone_contiguous(zone);
+
        pgdat_resize_lock(zone->zone_pgdat, &flags);
        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
        update_pgdat_span(pgdat);
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
+
+       set_zone_contiguous(zone);
 }
 
-static void __remove_section(struct zone *zone, unsigned long pfn,
-               unsigned long nr_pages, unsigned long map_offset,
-               struct vmem_altmap *altmap)
+static void __remove_section(unsigned long pfn, unsigned long nr_pages,
+                            unsigned long map_offset,
+                            struct vmem_altmap *altmap)
 {
        struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
 
        if (WARN_ON_ONCE(!valid_section(ms)))
                return;
 
-       __remove_zone(zone, pfn, nr_pages);
        sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
 }
 
 /**
- * __remove_pages() - remove sections of pages from a zone
- * @zone: zone from which pages need to be removed
+ * __remove_pages() - remove sections of pages
  * @pfn: starting pageframe (must be aligned to start of a section)
  * @nr_pages: number of pages to remove (must be multiple of section size)
  * @altmap: alternative device page map or %NULL if default memmap is used
@@ -530,16 +533,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
  * sure that pages are marked reserved and zones are adjust properly by
  * calling offline_pages().
  */
-void __remove_pages(struct zone *zone, unsigned long pfn,
-                   unsigned long nr_pages, struct vmem_altmap *altmap)
+void __remove_pages(unsigned long pfn, unsigned long nr_pages,
+                   struct vmem_altmap *altmap)
 {
        unsigned long map_offset = 0;
        unsigned long nr, start_sec, end_sec;
 
        map_offset = vmem_altmap_offset(altmap);
 
-       clear_zone_contiguous(zone);
-
        if (check_pfn_span(pfn, nr_pages, "remove"))
                return;
 
@@ -551,13 +552,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
                cond_resched();
                pfns = min(nr_pages, PAGES_PER_SECTION
                                - (pfn & ~PAGE_SECTION_MASK));
-               __remove_section(zone, pfn, pfns, map_offset, altmap);
+               __remove_section(pfn, pfns, map_offset, altmap);
                pfn += pfns;
                nr_pages -= pfns;
                map_offset = 0;
        }
-
-       set_zone_contiguous(zone);
 }
 
 int set_online_page_callback(online_page_callback_t callback)
@@ -869,6 +868,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
                 (unsigned long long) pfn << PAGE_SHIFT,
                 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
        memory_notify(MEM_CANCEL_ONLINE, &arg);
+       remove_pfn_range_from_zone(zone, pfn, nr_pages);
        mem_hotplug_done();
        return ret;
 }
@@ -1628,6 +1628,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        writeback_set_ratelimit();
 
        memory_notify(MEM_OFFLINE, &arg);
+       remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
        mem_hotplug_done();
        return 0;
 
index 067cf7d3daf5af6126f90e481a154392e724559c..b2920ae87a612fa550e224a91758b14f1c836b9d 100644 (file)
@@ -2148,18 +2148,22 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                nmask = policy_nodemask(gfp, pol);
                if (!nmask || node_isset(hpage_node, *nmask)) {
                        mpol_cond_put(pol);
+                       /*
+                        * First, try to allocate THP only on local node, but
+                        * don't reclaim unnecessarily, just compact.
+                        */
                        page = __alloc_pages_node(hpage_node,
-                                               gfp | __GFP_THISNODE, order);
+                               gfp | __GFP_THISNODE | __GFP_NORETRY, order);
 
                        /*
                         * If hugepage allocations are configured to always
                         * synchronous compact or the vma has been madvised
                         * to prefer hugepage backing, retry allowing remote
-                        * memory as well.
+                        * memory with both reclaim and compact as well.
                         */
                        if (!page && (gfp & __GFP_DIRECT_RECLAIM))
                                page = __alloc_pages_node(hpage_node,
-                                               gfp | __GFP_NORETRY, order);
+                                                               gfp, order);
 
                        goto out;
                }
index 03ccbdfeb6972018eea805d5848b4a9b3a3ba4cd..c51c6bd2fe3425f0fc00dae12f29dcec5d395a42 100644 (file)
@@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
 
        mem_hotplug_begin();
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+               __remove_pages(PHYS_PFN(res->start),
                               PHYS_PFN(resource_size(res)), NULL);
        } else {
                arch_remove_memory(nid, res->start, resource_size(res),
index eae1565285e3a0e1ecf2bc12a731d8081d17d31b..86873b6f38a7feba4593f0d9a209dde07e2ed385 100644 (file)
@@ -1512,9 +1512,11 @@ static int do_move_pages_to_node(struct mm_struct *mm,
 /*
  * Resolves the given address to a struct page, isolates it from the LRU and
  * puts it to the given pagelist.
- * Returns -errno if the page cannot be found/isolated or 0 when it has been
- * queued or the page doesn't need to be migrated because it is already on
- * the target node
+ * Returns:
+ *     errno - if the page cannot be found/isolated
+ *     0 - when it doesn't have to be migrated because it is already on the
+ *         target node
+ *     1 - when it has been queued
  */
 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                int node, struct list_head *pagelist, bool migrate_all)
@@ -1553,7 +1555,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
        if (PageHuge(page)) {
                if (PageHead(page)) {
                        isolate_huge_page(page, pagelist);
-                       err = 0;
+                       err = 1;
                }
        } else {
                struct page *head;
@@ -1563,7 +1565,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                if (err)
                        goto out_putpage;
 
-               err = 0;
+               err = 1;
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
                        NR_ISOLATED_ANON + page_is_file_cache(head),
@@ -1640,8 +1642,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                 */
                err = add_page_for_migration(mm, addr, current_node,
                                &pagelist, flags & MPOL_MF_MOVE_ALL);
-               if (!err)
+
+               if (!err) {
+                       /* The page is already on the target node */
+                       err = store_status(status, i, current_node, 1);
+                       if (err)
+                               goto out_flush;
                        continue;
+               } else if (err > 0) {
+                       /* The page is successfully queued for migration */
+                       continue;
+               }
 
                err = store_status(status, i, err, 1);
                if (err)
index 9c648524e4dc7eda8fadb087566da216fc61417c..71e4ffc83bcde1eb4d6f060b4e7e409e51ac8485 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm,
  * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE:
- *                                                             r: (no) no
- *                                                             w: (no) no
- *                                                             x: (yes) yes
  */
 pgprot_t protection_map[16] __ro_after_init = {
        __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
index 71e3acea781764248161da9d7e80046fe823eb72..d58c481b3df833e7ab12ecceaee8ed0b09a7e884 100644 (file)
@@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
                K(get_mm_counter(mm, MM_FILEPAGES)),
                K(get_mm_counter(mm, MM_SHMEMPAGES)),
                from_kuid(&init_user_ns, task_uid(victim)),
-               mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
+               mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
        task_unlock(victim);
 
        /*
index 50055d2e4ea85cf305e209f8a7dd3c84c8251721..2caf780a42e7ae6129aa10f9d2d5dd890a7716f9 100644 (file)
@@ -201,11 +201,11 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
        if (this_bw < tot_bw) {
                if (min) {
                        min *= this_bw;
-                       do_div(min, tot_bw);
+                       min = div64_ul(min, tot_bw);
                }
                if (max < 100) {
                        max *= this_bw;
-                       do_div(max, tot_bw);
+                       max = div64_ul(max, tot_bw);
                }
        }
 
@@ -766,7 +766,7 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
        struct wb_domain *dom = dtc_dom(dtc);
        unsigned long thresh = dtc->thresh;
        u64 wb_thresh;
-       long numerator, denominator;
+       unsigned long numerator, denominator;
        unsigned long wb_min_ratio, wb_max_ratio;
 
        /*
@@ -777,7 +777,7 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 
        wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
        wb_thresh *= numerator;
-       do_div(wb_thresh, denominator);
+       wb_thresh = div64_ul(wb_thresh, denominator);
 
        wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 
@@ -1102,7 +1102,7 @@ static void wb_update_write_bandwidth(struct bdi_writeback *wb,
        bw = written - min(written, wb->written_stamp);
        bw *= HZ;
        if (unlikely(elapsed > period)) {
-               do_div(bw, elapsed);
+               bw = div64_ul(bw, elapsed);
                avg = bw;
                goto out;
        }
index 4785a8a2040eeccaa996e93597b30ddf4626e838..d047bf7d8fd406b9959969ab65b433d8954b2803 100644 (file)
@@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
-#else
+bool _debug_pagealloc_enabled_early __read_mostly
+                       = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
-#endif
 EXPORT_SYMBOL(_debug_pagealloc_enabled);
 
 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
 
 static int __init early_debug_pagealloc(char *buf)
 {
-       bool enable = false;
-
-       if (kstrtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable)
-               static_branch_enable(&_debug_pagealloc_enabled);
-
-       return 0;
+       return kstrtobool(buf, &_debug_pagealloc_enabled_early);
 }
 early_param("debug_pagealloc", early_debug_pagealloc);
 
-static void init_debug_guardpage(void)
+void init_debug_pagealloc(void)
 {
        if (!debug_pagealloc_enabled())
                return;
 
+       static_branch_enable(&_debug_pagealloc_enabled);
+
        if (!debug_guardpage_minorder())
                return;
 
@@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
         */
        arch_free_page(page, order);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 0);
 
        kasan_free_nondeferred_pages(page, order);
@@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page)
 
 static bool bulkfree_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_check(page);
        else
                return false;
@@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
  */
 static bool free_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_prepare(page, 0, true);
        else
                return free_pages_prepare(page, 0, false);
@@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void)
 
        for_each_populated_zone(zone)
                set_zone_contiguous(zone);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       init_debug_guardpage();
-#endif
 }
 
 #ifdef CONFIG_CMA
@@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void)
  */
 static inline bool check_pcp_refill(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page)
 }
 static inline bool check_new_pcp(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
        set_page_refcounted(page);
 
        arch_alloc_page(page, order);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 1);
        kasan_alloc_pages(page, order);
        kernel_poison_pages(page, 1 << order, 1);
@@ -4476,8 +4465,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                if (page)
                        goto got_pg;
 
-                if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
-                    !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
+               /*
+                * Checks for costly allocations with __GFP_NORETRY, which
+                * includes some THP page fault allocations
+                */
+               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
                        /*
                         * If allocating entire pageblock(s) and compaction
                         * failed because all zones are below low watermarks
@@ -4498,23 +4490,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                        if (compact_result == COMPACT_SKIPPED ||
                            compact_result == COMPACT_DEFERRED)
                                goto nopage;
-               }
-
-               /*
-                * Checks for costly allocations with __GFP_NORETRY, which
-                * includes THP page fault allocations
-                */
-               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
-                       /*
-                        * If compaction is deferred for high-order allocations,
-                        * it is because sync compaction recently failed. If
-                        * this is the case and the caller requested a THP
-                        * allocation, we do not want to heavily disrupt the
-                        * system, so we fail the allocation instead of entering
-                        * direct reclaim.
-                        */
-                       if (compact_result == COMPACT_DEFERRED)
-                               goto nopage;
 
                        /*
                         * Looks like reclaim/compaction is worth trying, but
index 165fa633299348412084a81fb546bd74f1425436..8793e8cc1a48c94939b4f235a77ced1c2f3b2a4b 100644 (file)
@@ -2107,9 +2107,10 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        /*
         * Our priority is to support MAP_SHARED mapped hugely;
         * and support MAP_PRIVATE mapped hugely too, until it is COWed.
-        * But if caller specified an address hint, respect that as before.
+        * But if caller specified an address hint and we allocated area there
+        * successfully, respect that as before.
         */
-       if (uaddr)
+       if (uaddr == addr)
                return addr;
 
        if (shmem_huge != SHMEM_HUGE_FORCE) {
@@ -2143,7 +2144,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        if (inflated_len < len)
                return addr;
 
-       inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
+       inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
        if (IS_ERR_VALUE(inflated_addr))
                return addr;
        if (inflated_addr & ~PAGE_MASK)
index f1e1840af53309298b289c521e90ff6ac0a952b9..a89633603b2d7f775655680d8865be8c9eab6f6a 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1416,7 +1416,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 #if DEBUG
 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
 {
-       if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
+       if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
                (cachep->size % PAGE_SIZE) == 0)
                return true;
 
@@ -2008,7 +2008,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
         * to check size >= 256. It guarantees that all necessary small
         * sized slab is initialized in current slab initialization sequence.
         */
-       if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
+       if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
                size >= 256 && cachep->object_size > cache_line_size()) {
                if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
                        size_t tmp_size = ALIGN(size, PAGE_SIZE);
index f0ab6d4ceb4c947207c0f567a6f31e564fa59b29..0d95ddea13b0d835007025e5fc2f487f3c85a76a 100644 (file)
@@ -903,7 +903,8 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
         * deactivates the memcg kmem_caches through workqueue. Make sure all
         * previous workitems on workqueue are processed.
         */
-       flush_workqueue(memcg_kmem_cache_wq);
+       if (likely(memcg_kmem_cache_wq))
+               flush_workqueue(memcg_kmem_cache_wq);
 
        /*
         * If we're racing with children kmem_cache deactivation, it might
index d11389710b12d6f35479aa5d029ac6b5b9205228..8eafccf759409b78657fae18e80ae12c9fd52789 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -288,7 +288,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
        unsigned long freepointer_addr;
        void *p;
 
-       if (!debug_pagealloc_enabled())
+       if (!debug_pagealloc_enabled_static())
                return get_freepointer(s, object);
 
        freepointer_addr = (unsigned long)object + s->offset;
index b20ab7cdac867071b27c81e39cffad475851fad5..3822ecbd8a1f644e6e9e1d7b27655e5e5ae00ebe 100644 (file)
@@ -777,7 +777,14 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
        if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
 
-               if (!section_is_early) {
+               /*
+                * When removing an early section, the usage map is kept (as the
+                * usage maps of other sections fall into the same page). It
+                * will be re-used when re-adding the section - which is then no
+                * longer an early section. If the usage map is PageReserved, it
+                * was allocated during boot.
+                */
+               if (!PageReserved(virt_to_page(ms->usage))) {
                        kfree(ms->usage);
                        ms->usage = NULL;
                }
index e9681dc4aa754ee5745e1f0f7c90913557ada172..b29ad17edcf5fadd73103e46995c737bfebd23cf 100644 (file)
@@ -1383,7 +1383,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
 {
        flush_cache_vunmap(va->va_start, va->va_end);
        unmap_vmap_area(va);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                flush_tlb_kernel_range(va->va_start, va->va_end);
 
        free_vmap_area_noflush(va);
@@ -1681,7 +1681,7 @@ static void vb_free(const void *addr, unsigned long size)
 
        vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                flush_tlb_kernel_range((unsigned long)addr,
                                        (unsigned long)addr + size);
 
index 2b2b9aae8a3c63dfc3c8a1b0551ec8c415d69567..22d17ecfe7df464412426e2dc4079c541f28ae4d 100644 (file)
@@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
                zs_pool_dec_isolated(pool);
        }
 
+       if (page_zone(newpage) != page_zone(page)) {
+               dec_zone_page_state(page, NR_ZSPAGES);
+               inc_zone_page_state(newpage, NR_ZSPAGES);
+       }
+
        reset_page(page);
        put_page(page);
        page = newpage;
index c46daf09a5011fe317d0a950ec2ea6735619a148..bb7ec1a3915ddbda397e920d9d281eddbd62527f 100644 (file)
@@ -126,6 +126,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
 bool vlan_dev_inherit_address(struct net_device *dev,
                              struct net_device *real_dev);
 
index e5bff5cc6f97562a9887195ced5c572c1951915e..2a78da4072de9824ef1074b7596697366b68c21c 100644 (file)
@@ -586,7 +586,8 @@ static int vlan_dev_init(struct net_device *dev)
        return 0;
 }
 
-static void vlan_dev_uninit(struct net_device *dev)
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
 {
        struct vlan_priority_tci_mapping *pm;
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
index c482a6fe939398a82a6d112aa5f306e0765ab387..0db85aeb119b88fae1abaf7d0a10019ba4044915 100644 (file)
@@ -108,11 +108,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct ifla_vlan_flags *flags;
        struct ifla_vlan_qos_mapping *m;
        struct nlattr *attr;
-       int rem;
+       int rem, err;
 
        if (data[IFLA_VLAN_FLAGS]) {
                flags = nla_data(data[IFLA_VLAN_FLAGS]);
-               vlan_dev_change_flags(dev, flags->flags, flags->mask);
+               err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+               if (err)
+                       return err;
        }
        if (data[IFLA_VLAN_INGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
@@ -123,7 +125,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (data[IFLA_VLAN_EGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
                        m = nla_data(attr);
-                       vlan_dev_set_egress_priority(dev, m->from, m->to);
+                       err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+                       if (err)
+                               return err;
                }
        }
        return 0;
@@ -179,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
 
        err = vlan_changelink(dev, tb, data, extack);
-       if (err < 0)
-               return err;
-
-       return register_vlan_dev(dev, extack);
+       if (!err)
+               err = register_vlan_dev(dev, extack);
+       if (err)
+               vlan_dev_uninit(dev);
+       return err;
 }
 
 static inline size_t vlan_qos_map_size(unsigned int n)
index b0af3a11d4069cb7e44419d4494f20dd653dbca8..ec7bf5a4a9fc724b63da7bfd7ece79f32b104d99 100644 (file)
@@ -285,6 +285,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
        u32 hash = 0;
        const struct batadv_dat_entry *dat = data;
        const unsigned char *key;
+       __be16 vid;
        u32 i;
 
        key = (const unsigned char *)&dat->ip;
@@ -294,7 +295,8 @@ static u32 batadv_hash_dat(const void *data, u32 size)
                hash ^= (hash >> 6);
        }
 
-       key = (const unsigned char *)&dat->vid;
+       vid = htons(dat->vid);
+       key = (__force const unsigned char *)&vid;
        for (i = 0; i < sizeof(dat->vid); i++) {
                hash += key[i];
                hash += (hash << 10);
index 2cdfc5d6c25db6d9b565d33833360a9ab0c578c2..8c69f0c95a8ed4d92b6991d05e2c0d8133c9425c 100644 (file)
@@ -22,7 +22,8 @@
 #endif
 
 static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                            struct sk_buff *skb, u32 mtu)
+                            struct sk_buff *skb, u32 mtu,
+                            bool confirm_neigh)
 {
 }
 
index 4096d8a74a2bd7474da1e5ee980d840126f90c83..e1256e03a9a86b5b1249895a0627f7fbfa8e3386 100644 (file)
@@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
 }
 
 static int ebt_buf_add(struct ebt_entries_buf_state *state,
-                      void *data, unsigned int sz)
+                      const void *data, unsigned int sz)
 {
        if (state->buf_kern_start == NULL)
                goto count_only;
@@ -1901,7 +1901,7 @@ enum compat_mwt {
        EBT_COMPAT_TARGET,
 };
 
-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
                                enum compat_mwt compat_mwt,
                                struct ebt_entries_buf_state *state,
                                const unsigned char *base)
@@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
 /* return size of all matches, watchers or target, including necessary
  * alignment and padding.
  */
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
                        unsigned int size_left, enum compat_mwt type,
                        struct ebt_entries_buf_state *state, const void *base)
 {
+       const char *buf = (const char *)match32;
        int growth = 0;
-       char *buf;
 
        if (size_left == 0)
                return 0;
 
-       buf = (char *) match32;
-
-       while (size_left >= sizeof(*match32)) {
+       do {
                struct ebt_entry_match *match_kern;
                int ret;
 
+               if (size_left < sizeof(*match32))
+                       return -EINVAL;
+
                match_kern = (struct ebt_entry_match *) state->buf_kern_start;
                if (match_kern) {
                        char *tmp;
@@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
                if (match_kern)
                        match_kern->match_size = ret;
 
-               /* rule should have no remaining data after target */
-               if (type == EBT_COMPAT_TARGET && size_left)
-                       return -EINVAL;
-
                match32 = (struct compat_ebt_entry_mwt *) buf;
-       }
+       } while (size_left);
 
        return growth;
 }
 
 /* called for all ebt_entry structures. */
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
                          unsigned int *total,
                          struct ebt_entries_buf_state *state)
 {
-       unsigned int i, j, startoff, new_offset = 0;
+       unsigned int i, j, startoff, next_expected_off, new_offset = 0;
        /* stores match/watchers/targets & offset of next struct ebt_entry: */
        unsigned int offsets[4];
        unsigned int *offsets_update = NULL;
@@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
                        return ret;
        }
 
-       startoff = state->buf_user_offset - startoff;
+       next_expected_off = state->buf_user_offset - startoff;
+       if (next_expected_off != entry->next_offset)
+               return -EINVAL;
 
-       if (WARN_ON(*total < startoff))
+       if (*total < entry->next_offset)
                return -EINVAL;
-       *total -= startoff;
+       *total -= entry->next_offset;
        return 0;
 }
 
index 0ad39c87b7fd20e199af9f4b49d5f9598dcef222..7e885d0697075cd78df86e47ef9cc08195013adb 100644 (file)
@@ -9177,22 +9177,10 @@ static void netdev_unregister_lockdep_key(struct net_device *dev)
 
 void netdev_update_lockdep_key(struct net_device *dev)
 {
-       struct netdev_queue *queue;
-       int i;
-
-       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
        lockdep_unregister_key(&dev->addr_list_lock_key);
-
-       lockdep_register_key(&dev->qdisc_xmit_lock_key);
        lockdep_register_key(&dev->addr_list_lock_key);
 
        lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
-       for (i = 0; i < dev->num_tx_queues; i++) {
-               queue = netdev_get_tx_queue(dev, i);
-
-               lockdep_set_class(&queue->_xmit_lock,
-                                 &dev->qdisc_xmit_lock_key);
-       }
 }
 EXPORT_SYMBOL(netdev_update_lockdep_key);
 
index 4c63c9a4c09e62fdc8b428ae5a990feb329c47f6..f76219bf0c21d778fe0a5f64d7a1b739bca922ef 100644 (file)
@@ -6406,7 +6406,7 @@ static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
               devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA;
 }
 
-#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30)
+#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
 
 static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
 {
@@ -7563,7 +7563,7 @@ void devlink_region_destroy(struct devlink_region *region)
 EXPORT_SYMBOL_GPL(devlink_region_destroy);
 
 /**
- *     devlink_region_shapshot_id_get - get snapshot ID
+ *     devlink_region_snapshot_id_get - get snapshot ID
  *
  *     This callback should be called when adding a new snapshot,
  *     Driver should use the same id for multiple snapshots taken
@@ -7571,7 +7571,7 @@ EXPORT_SYMBOL_GPL(devlink_region_destroy);
  *
  *     @devlink: devlink
  */
-u32 devlink_region_shapshot_id_get(struct devlink *devlink)
+u32 devlink_region_snapshot_id_get(struct devlink *devlink)
 {
        u32 id;
 
@@ -7581,7 +7581,7 @@ u32 devlink_region_shapshot_id_get(struct devlink *devlink)
 
        return id;
 }
-EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
 
 /**
  *     devlink_region_snapshot_create - create a new snapshot
index 28b3c258188c02baa4a2a3caec1dfd5d8a29c68c..538f6a735a19f017df8e10149cb578107ddc8cbb 100644 (file)
@@ -2231,10 +2231,10 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
+               offset += len;
                len = sk_msg_elem(msg, i)->length;
                if (start < offset + len)
                        break;
-               offset += len;
                sk_msg_iter_var_next(i);
        } while (i != msg->sg.end);
 
@@ -2346,7 +2346,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
           u32, len, u64, flags)
 {
        struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
-       u32 new, i = 0, l, space, copy = 0, offset = 0;
+       u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
        u8 *raw, *to, *from;
        struct page *page;
 
@@ -2356,11 +2356,11 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
+               offset += l;
                l = sk_msg_elem(msg, i)->length;
 
                if (start < offset + l)
                        break;
-               offset += l;
                sk_msg_iter_var_next(i);
        } while (i != msg->sg.end);
 
@@ -2415,6 +2415,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
 
                sk_msg_iter_var_next(i);
                sg_unmark_end(psge);
+               sg_unmark_end(&rsge);
                sk_msg_iter_next(msg, end);
        }
 
@@ -2506,7 +2507,7 @@ static void sk_msg_shift_right(struct sk_msg *msg, int i)
 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
           u32, len, u64, flags)
 {
-       u32 i = 0, l, space, offset = 0;
+       u32 i = 0, l = 0, space, offset = 0;
        u64 last = start + len;
        int pop;
 
@@ -2516,11 +2517,11 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
+               offset += l;
                l = sk_msg_elem(msg, i)->length;
 
                if (start < offset + l)
                        break;
-               offset += l;
                sk_msg_iter_var_next(i);
        } while (i != msg->sg.end);
 
@@ -5318,8 +5319,7 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        if (sk) {
                sk = sk_to_full_sk(sk);
                if (!sk_fullsock(sk)) {
-                       if (!sock_flag(sk, SOCK_RCU_FREE))
-                               sock_gen_put(sk);
+                       sock_gen_put(sk);
                        return NULL;
                }
        }
@@ -5356,8 +5356,7 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        if (sk) {
                sk = sk_to_full_sk(sk);
                if (!sk_fullsock(sk)) {
-                       if (!sock_flag(sk, SOCK_RCU_FREE))
-                               sock_gen_put(sk);
+                       sock_gen_put(sk);
                        return NULL;
                }
        }
@@ -5424,7 +5423,8 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
 
 BPF_CALL_1(bpf_sk_release, struct sock *, sk)
 {
-       if (!sock_flag(sk, SOCK_RCU_FREE))
+       /* Only full sockets have sk->sk_flags. */
+       if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
                sock_gen_put(sk);
        return 0;
 }
index ded2d52276786d2845d0b82d6d0f95ccf76992cc..3866d7e20c07af5b6439af315c1d4fc6ecbf0132 100644 (file)
@@ -594,6 +594,8 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
 
 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 {
+       sock_owned_by_me(sk);
+
        sk_psock_cork_free(psock);
        sk_psock_zap_ingress(psock);
 
index eb114ee419b65a9c9dc282bcfe3f03ea23f49727..8998e356f423247d96cefcab054be37b1051b5b4 100644 (file)
@@ -241,8 +241,11 @@ static void sock_map_free(struct bpf_map *map)
                struct sock *sk;
 
                sk = xchg(psk, NULL);
-               if (sk)
+               if (sk) {
+                       lock_sock(sk);
                        sock_map_unref(sk, psk);
+                       release_sock(sk);
+               }
        }
        raw_spin_unlock_bh(&stab->lock);
        rcu_read_unlock();
@@ -862,7 +865,9 @@ static void sock_hash_free(struct bpf_map *map)
                raw_spin_lock_bh(&bucket->lock);
                hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
                        hlist_del_rcu(&elem->node);
+                       lock_sock(elem->sk);
                        sock_map_unref(elem->sk, elem);
+                       release_sock(elem->sk);
                }
                raw_spin_unlock_bh(&bucket->lock);
        }
index aea918135ec370e3b12cea1ea14319c1ce5cb64b..08c3dc45f1a4344ca919b0fd1a6c73ec04247d05 100644 (file)
@@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb , u32 mtu);
+                              struct sk_buff *skb , u32 mtu,
+                              bool confirm_neigh);
 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
                            struct sk_buff *skb);
 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
@@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops)
  * advertise to the other end).
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb, u32 mtu)
+                              struct sk_buff *skb, u32 mtu,
+                              bool confirm_neigh)
 {
        struct dn_route *rt = (struct dn_route *) dst;
        struct neighbour *n = rt->n;
index b678160bbd66e5992c7b7b28a819c25f6dd3ec50..408d4af390a0ee8bfd369790ca3428db7a79b156 100644 (file)
@@ -104,7 +104,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
 }
 
 static const struct dsa_device_ops gswip_netdev_ops = {
-       .name = "gwsip",
+       .name = "gswip",
        .proto  = DSA_TAG_PROTO_GSWIP,
        .xmit = gswip_tag_xmit,
        .rcv = gswip_tag_rcv,
index c95885215525193517a6745040e39a046bf6c494..c8a128c9e5e0f6c8ab048ae6f90aa12e1087cdd6 100644 (file)
@@ -33,9 +33,6 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
        struct dsa_port *dp = dsa_slave_to_port(dev);
        u16 *phdr, hdr;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        if (skb_cow_head(skb, 0) < 0)
                return NULL;
 
index 94447974a3c05d735cad2d26648f432a45e3697a..d5f709b940ffd058deb2870ea559dfb2f0db035a 100644 (file)
@@ -20,6 +20,8 @@
 #include "hsr_main.h"
 #include "hsr_framereg.h"
 
+static struct dentry *hsr_debugfs_root_dir;
+
 static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
 {
        seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
@@ -63,8 +65,20 @@ hsr_node_table_open(struct inode *inode, struct file *filp)
        return single_open(filp, hsr_node_table_show, inode->i_private);
 }
 
+void hsr_debugfs_rename(struct net_device *dev)
+{
+       struct hsr_priv *priv = netdev_priv(dev);
+       struct dentry *d;
+
+       d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
+                          hsr_debugfs_root_dir, dev->name);
+       if (IS_ERR(d))
+               netdev_warn(dev, "failed to rename\n");
+       else
+               priv->node_tbl_root = d;
+}
+
 static const struct file_operations hsr_fops = {
-       .owner  = THIS_MODULE,
        .open   = hsr_node_table_open,
        .read   = seq_read,
        .llseek = seq_lseek,
@@ -78,15 +92,14 @@ static const struct file_operations hsr_fops = {
  * When debugfs is configured this routine sets up the node_table file per
  * hsr device for dumping the node_table entries
  */
-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
 {
-       int rc = -1;
        struct dentry *de = NULL;
 
-       de = debugfs_create_dir(hsr_dev->name, NULL);
-       if (!de) {
-               pr_err("Cannot create hsr debugfs root\n");
-               return rc;
+       de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
+       if (IS_ERR(de)) {
+               pr_err("Cannot create hsr debugfs directory\n");
+               return;
        }
 
        priv->node_tbl_root = de;
@@ -94,13 +107,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
        de = debugfs_create_file("node_table", S_IFREG | 0444,
                                 priv->node_tbl_root, priv,
                                 &hsr_fops);
-       if (!de) {
-               pr_err("Cannot create hsr node_table directory\n");
-               return rc;
+       if (IS_ERR(de)) {
+               pr_err("Cannot create hsr node_table file\n");
+               debugfs_remove(priv->node_tbl_root);
+               priv->node_tbl_root = NULL;
+               return;
        }
        priv->node_tbl_file = de;
-
-       return 0;
 }
 
 /* hsr_debugfs_term - Tear down debugfs intrastructure
@@ -117,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv)
        debugfs_remove(priv->node_tbl_root);
        priv->node_tbl_root = NULL;
 }
+
+void hsr_debugfs_create_root(void)
+{
+       hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
+       if (IS_ERR(hsr_debugfs_root_dir)) {
+               pr_err("Cannot create hsr debugfs root directory\n");
+               hsr_debugfs_root_dir = NULL;
+       }
+}
+
+void hsr_debugfs_remove_root(void)
+{
+       /* debugfs_remove() internally checks NULL and ERROR */
+       debugfs_remove(hsr_debugfs_root_dir);
+}
index b01e1bae4ddc079d60ac319991fd3867216bff9a..c7bd6c49fadff47daaeccd57cc0ce835e84696f4 100644 (file)
@@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
                            skb->dev->dev_addr, skb->len) <= 0)
                goto out;
        skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
 
        if (hsr_ver > 0) {
                hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
@@ -368,7 +370,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
        del_timer_sync(&hsr->prune_timer);
        del_timer_sync(&hsr->announce_timer);
 
-       hsr_del_self_node(&hsr->self_node_db);
+       hsr_del_self_node(hsr);
        hsr_del_nodes(&hsr->node_db);
 }
 
@@ -440,11 +442,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        INIT_LIST_HEAD(&hsr->ports);
        INIT_LIST_HEAD(&hsr->node_db);
        INIT_LIST_HEAD(&hsr->self_node_db);
+       spin_lock_init(&hsr->list_lock);
 
        ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
 
        /* Make sure we recognize frames from ourselves in hsr_rcv() */
-       res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
+       res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
                                   slave[1]->dev_addr);
        if (res < 0)
                return res;
@@ -477,31 +480,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
 
        res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
        if (res)
-               goto err_add_port;
+               goto err_add_master;
 
        res = register_netdevice(hsr_dev);
        if (res)
-               goto fail;
+               goto err_unregister;
 
        res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
        if (res)
-               goto fail;
+               goto err_add_slaves;
+
        res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
        if (res)
-               goto fail;
+               goto err_add_slaves;
 
+       hsr_debugfs_init(hsr, hsr_dev);
        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
-       res = hsr_debugfs_init(hsr, hsr_dev);
-       if (res)
-               goto fail;
 
        return 0;
 
-fail:
+err_add_slaves:
+       unregister_netdevice(hsr_dev);
+err_unregister:
        list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
                hsr_del_port(port);
-err_add_port:
-       hsr_del_self_node(&hsr->self_node_db);
+err_add_master:
+       hsr_del_self_node(hsr);
 
        return res;
 }
index 292be446007b481d3b88ec1d06ffb494059aa677..27dc65d7de67a17d362985960b83850c894dfac6 100644 (file)
@@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
 /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
  * frames from self that's been looped over the HSR ring.
  */
-int hsr_create_self_node(struct list_head *self_node_db,
+int hsr_create_self_node(struct hsr_priv *hsr,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN])
 {
+       struct list_head *self_node_db = &hsr->self_node_db;
        struct hsr_node *node, *oldnode;
 
        node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db,
        ether_addr_copy(node->macaddress_A, addr_a);
        ether_addr_copy(node->macaddress_B, addr_b);
 
-       rcu_read_lock();
+       spin_lock_bh(&hsr->list_lock);
        oldnode = list_first_or_null_rcu(self_node_db,
                                         struct hsr_node, mac_list);
        if (oldnode) {
                list_replace_rcu(&oldnode->mac_list, &node->mac_list);
-               rcu_read_unlock();
-               synchronize_rcu();
-               kfree(oldnode);
+               spin_unlock_bh(&hsr->list_lock);
+               kfree_rcu(oldnode, rcu_head);
        } else {
-               rcu_read_unlock();
                list_add_tail_rcu(&node->mac_list, self_node_db);
+               spin_unlock_bh(&hsr->list_lock);
        }
 
        return 0;
 }
 
-void hsr_del_self_node(struct list_head *self_node_db)
+void hsr_del_self_node(struct hsr_priv *hsr)
 {
+       struct list_head *self_node_db = &hsr->self_node_db;
        struct hsr_node *node;
 
-       rcu_read_lock();
+       spin_lock_bh(&hsr->list_lock);
        node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
-       rcu_read_unlock();
        if (node) {
                list_del_rcu(&node->mac_list);
-               kfree(node);
+               kfree_rcu(node, rcu_head);
        }
+       spin_unlock_bh(&hsr->list_lock);
 }
 
 void hsr_del_nodes(struct list_head *node_db)
@@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db)
  * seq_out is used to initialize filtering of outgoing duplicate frames
  * originating from the newly added node.
  */
-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
-                             u16 seq_out)
+static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+                                    struct list_head *node_db,
+                                    unsigned char addr[],
+                                    u16 seq_out)
 {
-       struct hsr_node *node;
+       struct hsr_node *new_node, *node;
        unsigned long now;
        int i;
 
-       node = kzalloc(sizeof(*node), GFP_ATOMIC);
-       if (!node)
+       new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+       if (!new_node)
                return NULL;
 
-       ether_addr_copy(node->macaddress_A, addr);
+       ether_addr_copy(new_node->macaddress_A, addr);
 
        /* We are only interested in time diffs here, so use current jiffies
         * as initialization. (0 could trigger an spurious ring error warning).
         */
        now = jiffies;
        for (i = 0; i < HSR_PT_PORTS; i++)
-               node->time_in[i] = now;
+               new_node->time_in[i] = now;
        for (i = 0; i < HSR_PT_PORTS; i++)
-               node->seq_out[i] = seq_out;
-
-       list_add_tail_rcu(&node->mac_list, node_db);
+               new_node->seq_out[i] = seq_out;
 
+       spin_lock_bh(&hsr->list_lock);
+       list_for_each_entry_rcu(node, node_db, mac_list) {
+               if (ether_addr_equal(node->macaddress_A, addr))
+                       goto out;
+               if (ether_addr_equal(node->macaddress_B, addr))
+                       goto out;
+       }
+       list_add_tail_rcu(&new_node->mac_list, node_db);
+       spin_unlock_bh(&hsr->list_lock);
+       return new_node;
+out:
+       spin_unlock_bh(&hsr->list_lock);
+       kfree(new_node);
        return node;
 }
 
@@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                              bool is_sup)
 {
        struct list_head *node_db = &port->hsr->node_db;
+       struct hsr_priv *hsr = port->hsr;
        struct hsr_node *node;
        struct ethhdr *ethhdr;
        u16 seq_out;
@@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                seq_out = HSR_SEQNR_START;
        }
 
-       return hsr_add_node(node_db, ethhdr->h_source, seq_out);
+       return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
 }
 
 /* Use the Supervision frame's info about an eventual macaddress_B for merging
@@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
                          struct hsr_port *port_rcv)
 {
-       struct ethhdr *ethhdr;
-       struct hsr_node *node_real;
+       struct hsr_priv *hsr = port_rcv->hsr;
        struct hsr_sup_payload *hsr_sp;
+       struct hsr_node *node_real;
        struct list_head *node_db;
+       struct ethhdr *ethhdr;
        int i;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
        if (!node_real)
                /* No frame received from AddrA of this node yet */
-               node_real = hsr_add_node(node_db, hsr_sp->macaddress_A,
+               node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
                                         HSR_SEQNR_START - 1);
        if (!node_real)
                goto done; /* No mem */
@@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        }
        node_real->addr_B_port = port_rcv->type;
 
+       spin_lock_bh(&hsr->list_lock);
        list_del_rcu(&node_curr->mac_list);
+       spin_unlock_bh(&hsr->list_lock);
        kfree_rcu(node_curr, rcu_head);
 
 done:
@@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t)
 {
        struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
        struct hsr_node *node;
+       struct hsr_node *tmp;
        struct hsr_port *port;
        unsigned long timestamp;
        unsigned long time_a, time_b;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
+       spin_lock_bh(&hsr->list_lock);
+       list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
                /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
                 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
                 * the master port. Thus the master node will be repeatedly
@@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t)
                        kfree_rcu(node, rcu_head);
                }
        }
-       rcu_read_unlock();
+       spin_unlock_bh(&hsr->list_lock);
 
        /* Restart timer */
        mod_timer(&hsr->prune_timer,
index 89a3ce38151d187faa595b242f6ea0e75d535985..0f0fa12b432937fdae50dc2f57953fffe238c20b 100644 (file)
 
 struct hsr_node;
 
-void hsr_del_self_node(struct list_head *self_node_db);
+void hsr_del_self_node(struct hsr_priv *hsr);
 void hsr_del_nodes(struct list_head *node_db);
-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
-                             u16 seq_out);
 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                              bool is_sup);
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
@@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
 
 void hsr_prune_nodes(struct timer_list *t);
 
-int hsr_create_self_node(struct list_head *self_node_db,
+int hsr_create_self_node(struct hsr_priv *hsr,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN]);
 
index b9988a662ee1ac14c3ad8b96ea18a2dca92f5254..9e389accbfc7e52bc677b5f3d8cb59e93f4d9bf0 100644 (file)
@@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
        case NETDEV_CHANGE:     /* Link (carrier) state changes */
                hsr_check_carrier_and_operstate(hsr);
                break;
+       case NETDEV_CHANGENAME:
+               if (is_hsr_master(dev))
+                       hsr_debugfs_rename(dev);
+               break;
        case NETDEV_CHANGEADDR:
                if (port->type == HSR_PT_MASTER) {
                        /* This should not happen since there's no
@@ -64,7 +68,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 
                /* Make sure we recognize frames from ourselves in hsr_rcv() */
                port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
-               res = hsr_create_self_node(&hsr->self_node_db,
+               res = hsr_create_self_node(hsr,
                                           master->dev->dev_addr,
                                           port ?
                                                port->dev->dev_addr :
@@ -123,6 +127,7 @@ static void __exit hsr_exit(void)
 {
        unregister_netdevice_notifier(&hsr_nb);
        hsr_netlink_exit();
+       hsr_debugfs_remove_root();
 }
 
 module_init(hsr_init);
index 96fac696a1e1a602fdf46c4df74016b7baf50008..d40de84a637f3af801bc4d317b396a992b7e2340 100644 (file)
@@ -160,8 +160,9 @@ struct hsr_priv {
        int announce_count;
        u16 sequence_nr;
        u16 sup_sequence_nr;    /* For HSRv1 separate seq_nr for supervision */
-       u8 prot_version;                /* Indicate if HSRv0 or HSRv1. */
-       spinlock_t seqnr_lock;                  /* locking for sequence_nr */
+       u8 prot_version;        /* Indicate if HSRv0 or HSRv1. */
+       spinlock_t seqnr_lock;  /* locking for sequence_nr */
+       spinlock_t list_lock;   /* locking for node list */
        unsigned char           sup_multicast_addr[ETH_ALEN];
 #ifdef CONFIG_DEBUG_FS
        struct dentry *node_tbl_root;
@@ -184,17 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
 }
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
+void hsr_debugfs_rename(struct net_device *dev);
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
 void hsr_debugfs_term(struct hsr_priv *priv);
+void hsr_debugfs_create_root(void);
+void hsr_debugfs_remove_root(void);
 #else
-static inline int hsr_debugfs_init(struct hsr_priv *priv,
-                                  struct net_device *hsr_dev)
+static inline void void hsr_debugfs_rename(struct net_device *dev)
 {
-       return 0;
 }
-
+static inline void hsr_debugfs_init(struct hsr_priv *priv,
+                                   struct net_device *hsr_dev)
+{}
 static inline void hsr_debugfs_term(struct hsr_priv *priv)
 {}
+static inline void hsr_debugfs_create_root(void)
+{}
+static inline void hsr_debugfs_remove_root(void)
+{}
 #endif
 
 #endif /*  __HSR_PRIVATE_H */
index 8f8337f893badcbe854108fba737ac9cf45a32ee..8dc0547f01d0bfc296fb61775f083cf82d8e33b1 100644 (file)
@@ -476,6 +476,7 @@ int __init hsr_netlink_init(void)
        if (rc)
                goto fail_genl_register_family;
 
+       hsr_debugfs_create_root();
        return 0;
 
 fail_genl_register_family:
index b9df9c09b84e5e6af9d6fcd1386ea669ba14a560..195469a133713a73bccf1d970121ef9866cc3766 100644 (file)
@@ -2193,6 +2193,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
        int count = cb->args[2];
        t_key key = cb->args[3];
 
+       /* First time here, count and key are both always 0. Count > 0
+        * and key == 0 means the dump has wrapped around and we are done.
+        */
+       if (count && !key)
+               return skb->len;
+
        while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
                int err;
 
index e4c6e8b4049063f5239a5e99a185016ad3bb5790..18c0d5bffe12b04b48faedc247bb3e491a7abcea 100644 (file)
@@ -1086,7 +1086,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
                if (!dst)
                        goto out;
        }
-       dst->ops->update_pmtu(dst, sk, NULL, mtu);
+       dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
 
        dst = __sk_dst_check(sk, 0);
        if (!dst)
index 38c02bb62e2cdd8b379d01e3db6e2fc2cedc3004..0fe2a5d3e258fb4220155690016d28761fdde14c 100644 (file)
@@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
                mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
        if (skb_valid_dst(skb))
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
        if (skb->protocol == htons(ETH_P_IP)) {
                if (!skb_is_gso(skb) &&
index 9b153c7fcbb4d8ecd786bbe2349258eeebd8e693..e90b600c7a25fe397214e721c806c358da31a76a 100644 (file)
@@ -214,7 +214,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
 
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                  htonl(mtu));
index 214154b47d56c7af594496abf719252a13a5594a..f1f78a742b36a18962a7a66884607e2aaa0a28bb 100644 (file)
@@ -384,10 +384,11 @@ next:             ;
        return 1;
 }
 
-static inline int check_target(struct arpt_entry *e, const char *name)
+static int check_target(struct arpt_entry *e, struct net *net, const char *name)
 {
        struct xt_entry_target *t = arpt_get_target(e);
        struct xt_tgchk_param par = {
+               .net       = net,
                .table     = name,
                .entryinfo = e,
                .target    = t->u.kernel.target,
@@ -399,8 +400,9 @@ static inline int check_target(struct arpt_entry *e, const char *name)
        return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
 }
 
-static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+static int
+find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
+                unsigned int size,
                 struct xt_percpu_counter_alloc_state *alloc_state)
 {
        struct xt_entry_target *t;
@@ -419,7 +421,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
        }
        t->u.kernel.target = target;
 
-       ret = check_target(e, name);
+       ret = check_target(e, net, name);
        if (ret)
                goto err;
        return 0;
@@ -494,12 +496,13 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
        return 0;
 }
 
-static inline void cleanup_entry(struct arpt_entry *e)
+static void cleanup_entry(struct arpt_entry *e, struct net *net)
 {
        struct xt_tgdtor_param par;
        struct xt_entry_target *t;
 
        t = arpt_get_target(e);
+       par.net      = net;
        par.target   = t->u.kernel.target;
        par.targinfo = t->data;
        par.family   = NFPROTO_ARP;
@@ -512,7 +515,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
 /* Checks and translates the user-supplied table segment (held in
  * newinfo).
  */
-static int translate_table(struct xt_table_info *newinfo, void *entry0,
+static int translate_table(struct net *net,
+                          struct xt_table_info *newinfo,
+                          void *entry0,
                           const struct arpt_replace *repl)
 {
        struct xt_percpu_counter_alloc_state alloc_state = { 0 };
@@ -569,7 +574,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
        /* Finally, each sanity check must pass */
        i = 0;
        xt_entry_foreach(iter, entry0, newinfo->size) {
-               ret = find_check_entry(iter, repl->name, repl->size,
+               ret = find_check_entry(iter, net, repl->name, repl->size,
                                       &alloc_state);
                if (ret != 0)
                        break;
@@ -580,7 +585,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
                xt_entry_foreach(iter, entry0, newinfo->size) {
                        if (i-- == 0)
                                break;
-                       cleanup_entry(iter);
+                       cleanup_entry(iter, net);
                }
                return ret;
        }
@@ -923,7 +928,7 @@ static int __do_replace(struct net *net, const char *name,
        /* Decrease module usage counts and free resource */
        loc_cpu_old_entry = oldinfo->entries;
        xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
 
        xt_free_table_info(oldinfo);
        if (copy_to_user(counters_ptr, counters,
@@ -974,7 +979,7 @@ static int do_replace(struct net *net, const void __user *user,
                goto free_newinfo;
        }
 
-       ret = translate_table(newinfo, loc_cpu_entry, &tmp);
+       ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
@@ -986,7 +991,7 @@ static int do_replace(struct net *net, const void __user *user,
 
  free_newinfo_untrans:
        xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
  free_newinfo:
        xt_free_table_info(newinfo);
        return ret;
@@ -1149,7 +1154,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
        }
 }
 
-static int translate_compat_table(struct xt_table_info **pinfo,
+static int translate_compat_table(struct net *net,
+                                 struct xt_table_info **pinfo,
                                  void **pentry0,
                                  const struct compat_arpt_replace *compatr)
 {
@@ -1217,7 +1223,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
        repl.num_counters = 0;
        repl.counters = NULL;
        repl.size = newinfo->size;
-       ret = translate_table(newinfo, entry1, &repl);
+       ret = translate_table(net, newinfo, entry1, &repl);
        if (ret)
                goto free_newinfo;
 
@@ -1270,7 +1276,7 @@ static int compat_do_replace(struct net *net, void __user *user,
                goto free_newinfo;
        }
 
-       ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+       ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
@@ -1282,7 +1288,7 @@ static int compat_do_replace(struct net *net, void __user *user,
 
  free_newinfo_untrans:
        xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
  free_newinfo:
        xt_free_table_info(newinfo);
        return ret;
@@ -1509,7 +1515,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
        return ret;
 }
 
-static void __arpt_unregister_table(struct xt_table *table)
+static void __arpt_unregister_table(struct net *net, struct xt_table *table)
 {
        struct xt_table_info *private;
        void *loc_cpu_entry;
@@ -1521,7 +1527,7 @@ static void __arpt_unregister_table(struct xt_table *table)
        /* Decrease module usage counts and free resources */
        loc_cpu_entry = private->entries;
        xt_entry_foreach(iter, loc_cpu_entry, private->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
        if (private->number > private->initial_entries)
                module_put(table_owner);
        xt_free_table_info(private);
@@ -1546,7 +1552,7 @@ int arpt_register_table(struct net *net,
        loc_cpu_entry = newinfo->entries;
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
-       ret = translate_table(newinfo, loc_cpu_entry, repl);
+       ret = translate_table(net, newinfo, loc_cpu_entry, repl);
        if (ret != 0)
                goto out_free;
 
@@ -1561,7 +1567,7 @@ int arpt_register_table(struct net *net,
 
        ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
        if (ret != 0) {
-               __arpt_unregister_table(new_table);
+               __arpt_unregister_table(net, new_table);
                *res = NULL;
        }
 
@@ -1576,7 +1582,7 @@ void arpt_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops)
 {
        nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
-       __arpt_unregister_table(table);
+       __arpt_unregister_table(net, table);
 }
 
 /* The built-in targets: standard (NULL) and error. */
index f88c93c38f1173614363b10cab7362771dd9ed8c..87e979f2b74a5fd3b4f3b70250ffd6de4ef6212f 100644 (file)
@@ -139,7 +139,8 @@ static unsigned int  ipv4_mtu(const struct dst_entry *dst);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
 static void             ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                          struct sk_buff *skb, u32 mtu);
+                                          struct sk_buff *skb, u32 mtu,
+                                          bool confirm_neigh);
 static void             ip_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 static void            ipv4_dst_destroy(struct dst_entry *dst);
@@ -1043,7 +1044,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
 }
 
 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                             struct sk_buff *skb, u32 mtu)
+                             struct sk_buff *skb, u32 mtu,
+                             bool confirm_neigh)
 {
        struct rtable *rt = (struct rtable *) dst;
        struct flowi4 fl4;
@@ -2687,7 +2689,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
 }
 
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                         struct sk_buff *skb, u32 mtu)
+                                         struct sk_buff *skb, u32 mtu,
+                                         bool confirm_neigh)
 {
 }
 
index e38705165ac9bd48910348169fc9904c41fb48dc..8a01428f80c1c4b23e2ae0568efb2aa0e0f71ccb 100644 (file)
@@ -121,14 +121,14 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        struct sk_psock *psock;
        int copied, ret;
 
-       if (unlikely(flags & MSG_ERRQUEUE))
-               return inet_recv_error(sk, msg, len, addr_len);
-       if (!skb_queue_empty(&sk->sk_receive_queue))
-               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+       if (unlikely(flags & MSG_ERRQUEUE))
+               return inet_recv_error(sk, msg, len, addr_len);
+       if (!skb_queue_empty(&sk->sk_receive_queue) &&
+           sk_psock_queue_empty(psock))
+               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
        lock_sock(sk);
 msg_bytes_ready:
        copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
@@ -139,7 +139,7 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                timeo = sock_rcvtimeo(sk, nonblock);
                data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
                if (data) {
-                       if (skb_queue_empty(&sk->sk_receive_queue))
+                       if (!sk_psock_queue_empty(psock))
                                goto msg_bytes_ready;
                        release_sock(sk);
                        sk_psock_put(sk, psock);
@@ -315,10 +315,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
                 */
                delta = msg->sg.size;
                psock->eval = sk_psock_msg_verdict(sk, psock, msg);
-               if (msg->sg.size < delta)
-                       delta -= msg->sg.size;
-               else
-                       delta = 0;
+               delta -= msg->sg.size;
        }
 
        if (msg->cork_bytes &&
index 88b987ca9ebbbbf0f28ea4560e3731837aea7fb3..5347ab2c9c58b312d3f3a969141a835a8c4b6657 100644 (file)
@@ -915,9 +915,10 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
 /* This must be called before lost_out is incremented */
 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if (!tp->retransmit_skb_hint ||
-           before(TCP_SKB_CB(skb)->seq,
-                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+       if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
+           (tp->retransmit_skb_hint &&
+            before(TCP_SKB_CB(skb)->seq,
+                   TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
                tp->retransmit_skb_hint = skb;
 }
 
@@ -1727,8 +1728,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                }
 
                /* Ignore very old stuff early */
-               if (!after(sp[used_sacks].end_seq, prior_snd_una))
+               if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
+                       if (i == 0)
+                               first_sack_index = -1;
                        continue;
+               }
 
                used_sacks++;
        }
index 1f7735ca8f22d453c1e687fdd5d973df1891cb1c..58c92a7d671c54564479db061dcec186a8a7bb34 100644 (file)
@@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
        __skb_unlink(skb, &sk->sk_write_queue);
        tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
 
+       if (tp->highest_sack == NULL)
+               tp->highest_sack = skb;
+
        tp->packets_out += tcp_skb_pcount(skb);
        if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
                tcp_rearm_rto(sk);
index 12ab5db2b71cb5781fd9517d944138e1cecbd5dc..38d3ad1411611ef6002a9843ec7699a860f1485a 100644 (file)
@@ -99,17 +99,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen)
        rcu_read_unlock();
 }
 
-void tcp_update_ulp(struct sock *sk, struct proto *proto)
+void tcp_update_ulp(struct sock *sk, struct proto *proto,
+                   void (*write_space)(struct sock *sk))
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (!icsk->icsk_ulp_ops) {
+               sk->sk_write_space = write_space;
                sk->sk_prot = proto;
                return;
        }
 
        if (icsk->icsk_ulp_ops->update)
-               icsk->icsk_ulp_ops->update(sk, proto);
+               icsk->icsk_ulp_ops->update(sk, proto, write_space);
 }
 
 void tcp_cleanup_ulp(struct sock *sk)
index 4da5758cc718739b63c7dda65cf889869b3b6141..93a355b6b09242e9adc597e49589c6d0e4c71f71 100644 (file)
@@ -1475,7 +1475,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
         * queue contains some other skb
         */
        rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
-       if (rmem > (size + sk->sk_rcvbuf))
+       if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
                goto uncharge_drop;
 
        spin_lock(&list->lock);
index 35b84b52b7027b35f65b36d6a0160913e8df2c5a..9ebd54752e03b81a01f6c53cc17cebbccd928137 100644 (file)
@@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 }
 
 static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                             struct sk_buff *skb, u32 mtu)
+                             struct sk_buff *skb, u32 mtu,
+                             bool confirm_neigh)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
        struct dst_entry *path = xdst->route;
 
-       path->ops->update_pmtu(path, sk, skb, mtu);
+       path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
 }
 
 static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
index fe9cb8d1adca069b39990307ac8ab1f7972e598a..e315526fa244ab83363495d4cfa0def7e3decf85 100644 (file)
@@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
 
        if (IS_ERR(dst))
                return NULL;
-       dst->ops->update_pmtu(dst, sk, NULL, mtu);
+       dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
 
        dst = inet6_csk_route_socket(sk, &fl6);
        return IS_ERR(dst) ? NULL : dst;
index 9d0965252ddf723a0f4e64b8f4ef18675bdd55a3..ee968d9807463fc59ccde4500a7f64070f66e69b 100644 (file)
@@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
 
        /* TooBig packet may have updated dst->dev's mtu */
        if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
-               dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+               dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
                           NEXTHDR_GRE);
index 754a484d35df6eb03b82593cbb66a78718e30326..2f376dbc37d52ea6a16cfb352f7f68d5254df732 100644 (file)
@@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (rel_info > dst_mtu(skb_dst(skb2)))
                        goto out;
 
-               skb_dst_update_pmtu(skb2, rel_info);
+               skb_dst_update_pmtu_no_confirm(skb2, rel_info);
        }
 
        icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -1132,7 +1132,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
                       IPV6_MIN_MTU : IPV4_MIN_MTU);
 
-       skb_dst_update_pmtu(skb, mtu);
+       skb_dst_update_pmtu_no_confirm(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
index 024db17386d2ff8043269bd6f7a46f4535fba67f..6f08b760c2a7c1aeaae00c58568e38e2806b99f3 100644 (file)
@@ -479,7 +479,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
index b59940416cb5776c7a77910fef029aef8e9491e7..affb51c11a25b0beaddae3c13b1d74b86dfec042 100644 (file)
@@ -95,7 +95,8 @@ static int            ip6_pkt_prohibit(struct sk_buff *skb);
 static int             ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static void            ip6_link_failure(struct sk_buff *skb);
 static void            ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                          struct sk_buff *skb, u32 mtu);
+                                          struct sk_buff *skb, u32 mtu,
+                                          bool confirm_neigh);
 static void            rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
@@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
 }
 
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                        struct sk_buff *skb, u32 mtu)
+                                        struct sk_buff *skb, u32 mtu,
+                                        bool confirm_neigh)
 {
 }
 
@@ -2692,7 +2694,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
 }
 
 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
-                                const struct ipv6hdr *iph, u32 mtu)
+                                const struct ipv6hdr *iph, u32 mtu,
+                                bool confirm_neigh)
 {
        const struct in6_addr *daddr, *saddr;
        struct rt6_info *rt6 = (struct rt6_info *)dst;
@@ -2710,7 +2713,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
                daddr = NULL;
                saddr = NULL;
        }
-       dst_confirm_neigh(dst, daddr);
+
+       if (confirm_neigh)
+               dst_confirm_neigh(dst, daddr);
+
        mtu = max_t(u32, mtu, IPV6_MIN_MTU);
        if (mtu >= dst_mtu(dst))
                return;
@@ -2764,9 +2770,11 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 }
 
 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb, u32 mtu)
+                              struct sk_buff *skb, u32 mtu,
+                              bool confirm_neigh)
 {
-       __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+       __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
+                            confirm_neigh);
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
@@ -2785,7 +2793,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
-               __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
+               __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
        dst_release(dst);
 }
 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
index b2ccbc4731277d5215d2c82c0d29d1387ae81e13..98954830c40baed84c6c3d123d299a227c2d1dee 100644 (file)
@@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                }
 
                if (tunnel->parms.iph.daddr)
-                       skb_dst_update_pmtu(skb, mtu);
+                       skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->len > mtu && !skb_is_gso(skb)) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
index 699e0730ce8e86545ef2812da316f8e726e4a75d..af7a4b8b1e9c4a46a0b0cf4245a981efa24b9152 100644 (file)
@@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 }
 
 static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                             struct sk_buff *skb, u32 mtu)
+                             struct sk_buff *skb, u32 mtu,
+                             bool confirm_neigh)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
        struct dst_entry *path = xdst->route;
 
-       path->ops->update_pmtu(path, sk, skb, mtu);
+       path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
 }
 
 static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
index 4fb7f1f1210980df126c0560fd6cc824fc1388c9..000c742d05279a8bebb23b54b061e47669b5a4ae 100644 (file)
@@ -2954,6 +2954,28 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
        return err;
 }
 
+static void ieee80211_end_cac(struct wiphy *wiphy,
+                             struct net_device *dev)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+
+       mutex_lock(&local->mtx);
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /* it might be waiting for the local->mtx, but then
+                * by the time it gets it, sdata->wdev.cac_started
+                * will no longer be true
+                */
+               cancel_delayed_work(&sdata->dfs_cac_timer_work);
+
+               if (sdata->wdev.cac_started) {
+                       ieee80211_vif_release_channel(sdata);
+                       sdata->wdev.cac_started = false;
+               }
+       }
+       mutex_unlock(&local->mtx);
+}
+
 static struct cfg80211_beacon_data *
 cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
 {
@@ -4023,6 +4045,7 @@ const struct cfg80211_ops mac80211_config_ops = {
 #endif
        .get_channel = ieee80211_cfg_get_channel,
        .start_radar_detection = ieee80211_start_radar_detection,
+       .end_cac = ieee80211_end_cac,
        .channel_switch = ieee80211_channel_switch,
        .set_qos_map = ieee80211_set_qos_map,
        .set_ap_chanwidth = ieee80211_set_ap_chanwidth,
index 68af6230638588961d3839b028f027cf690949bc..d699833703819af130c54803528aa7808e4f2c54 100644 (file)
@@ -328,6 +328,9 @@ u32 airtime_link_metric_get(struct ieee80211_local *local,
        unsigned long fail_avg =
                ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
 
+       if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+               return MAX_METRIC;
+
        /* Try to get rate based on HW/SW RC algorithm.
         * Rate is returned in units of Kbps, correct this
         * to comply with airtime calculation units
index 727dc9f3f3b3a769d2764c529d407073e1d5d825..e7f57bb18f6e00255e75a465cd7ddb1c7282e6f4 100644 (file)
@@ -263,9 +263,21 @@ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx,
        if ((keyid >> 6) != key->conf.keyidx)
                return TKIP_DECRYPT_INVALID_KEYIDX;
 
-       if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT &&
-           (iv32 < rx_ctx->iv32 ||
-            (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16)))
+       /* Reject replays if the received TSC is smaller than or equal to the
+        * last received value in a valid message, but with an exception for
+        * the case where a new key has been set and no valid frame using that
+        * key has yet received and the local RSC was initialized to 0. This
+        * exception allows the very first frame sent by the transmitter to be
+        * accepted even if that transmitter were to use TSC 0 (IEEE 802.11
+        * described TSC to be initialized to 1 whenever a new key is taken into
+        * use).
+        */
+       if (iv32 < rx_ctx->iv32 ||
+           (iv32 == rx_ctx->iv32 &&
+            (iv16 < rx_ctx->iv16 ||
+             (iv16 == rx_ctx->iv16 &&
+              (rx_ctx->iv32 || rx_ctx->iv16 ||
+               rx_ctx->ctx.state != TKIP_STATE_NOT_INIT)))))
                return TKIP_DECRYPT_REPLAY;
 
        if (only_iv) {
index 1abd6f0dc227f072c5d09aa03b4e71fdfe8d466f..077a2cb65fcb38ce8f3d40414289dc341c9da236 100644 (file)
@@ -60,9 +60,9 @@ mtype_destroy(struct ip_set *set)
        if (SET_WITH_TIMEOUT(set))
                del_timer_sync(&map->gc);
 
-       ip_set_free(map->members);
        if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
                mtype_ext_cleanup(set);
+       ip_set_free(map->members);
        ip_set_free(map);
 
        set->data = NULL;
index 169e0a04f814b2187f909723975f946cf76ed8dd..cf895bc808713c677e5dcf1d2139b51e30dacc9a 100644 (file)
@@ -1848,6 +1848,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
        int ret = 0;
+       u32 lineno;
 
        if (unlikely(protocol_min_failed(attr) ||
                     !attr[IPSET_ATTR_SETNAME] ||
@@ -1864,7 +1865,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
                return -IPSET_ERR_PROTOCOL;
 
        rcu_read_lock_bh();
-       ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
+       ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
        rcu_read_unlock_bh();
        /* Userspace can't trigger element to be re-added */
        if (ret == -EAGAIN)
index b1e300f8881b20dd8bc0c9bb2206ca911ab6bd73..b00866d777fe0e9ed8018087ebc664c56f29b5c9 100644 (file)
@@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
        struct rtable *ort = skb_rtable(skb);
 
        if (!skb->dev && sk && sk_fullsock(sk))
-               ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+               ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
 }
 
 static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
index b6b14db3955bfb55a523534ceb8cb7d3716f685a..b3f4a334f9d78d77d5912315290bf32f5390232d 100644 (file)
@@ -677,6 +677,9 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
        unsigned int *timeouts = data;
        int i;
 
+       if (!timeouts)
+                timeouts = dn->dccp_timeout;
+
        /* set default DCCP timeouts. */
        for (i=0; i<CT_DCCP_MAX; i++)
                timeouts[i] = dn->dccp_timeout[i];
index fce3d93f154181e9b5a884f2afedde818fa09bc2..0399ae8f1188fb3c73d60f8c509991d8194a1791 100644 (file)
@@ -594,6 +594,9 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
        struct nf_sctp_net *sn = nf_sctp_pernet(net);
        int i;
 
+       if (!timeouts)
+               timeouts = sn->timeouts;
+
        /* set default SCTP timeouts. */
        for (i=0; i<SCTP_CONNTRACK_MAX; i++)
                timeouts[i] = sn->timeouts[i];
index 9889d52eda8203a5e247ef2e5d2c477f05721e6d..e33a73cb1f42ed92e2dc3056d3f3cf807f23d449 100644 (file)
@@ -134,11 +134,6 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT        (120 * HZ)
 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT        (30 * HZ)
 
-static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
-{
-       return (__s32)(timeout - (u32)jiffies);
-}
-
 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
 {
        const struct nf_conntrack_l4proto *l4proto;
@@ -232,7 +227,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        int err;
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
 
        err = rhashtable_insert_fast(&flow_table->rhashtable,
                                     &flow->tuplehash[0].node,
index b9e7dd6e60ce2b1327709fb38a0eab5e7e2d1af6..7ea2ddc2aa930ced6efda1d02b2a1c5dc092966e 100644 (file)
@@ -280,7 +280,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
        if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
                return NF_DROP;
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
        iph = ip_hdr(skb);
        ip_decrease_ttl(iph);
        skb->tstamp = 0;
@@ -509,7 +509,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
        if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
                return NF_DROP;
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
        ip6h = ipv6_hdr(skb);
        ip6h->hop_limit--;
        skb->tstamp = 0;
index de7a0d1e15c88adb9a94fa2aeca106ea88fc3805..d06969af1085e571fadd5940c028b64685b9f6c9 100644 (file)
@@ -88,7 +88,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
        switch (tuple->l4proto) {
        case IPPROTO_TCP:
                key->tcp.flags = 0;
-               mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN;
+               mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
                match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
                break;
        case IPPROTO_UDP:
@@ -166,24 +166,38 @@ static int flow_offload_eth_dst(struct net *net,
                                enum flow_offload_tuple_dir dir,
                                struct nf_flow_rule *flow_rule)
 {
-       const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
        struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
        struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+       const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
+       const struct dst_entry *dst_cache;
+       unsigned char ha[ETH_ALEN];
        struct neighbour *n;
        u32 mask, val;
+       u8 nud_state;
        u16 val16;
 
-       n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
+       dst_cache = flow->tuplehash[dir].tuple.dst_cache;
+       n = dst_neigh_lookup(dst_cache, daddr);
        if (!n)
                return -ENOENT;
 
+       read_lock_bh(&n->lock);
+       nud_state = n->nud_state;
+       ether_addr_copy(ha, n->ha);
+       read_unlock_bh(&n->lock);
+
+       if (!(nud_state & NUD_VALID)) {
+               neigh_release(n);
+               return -ENOENT;
+       }
+
        mask = ~0xffffffff;
-       memcpy(&val, n->ha, 4);
+       memcpy(&val, ha, 4);
        flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
                            &val, &mask);
 
        mask = ~0x0000ffff;
-       memcpy(&val16, n->ha + 4, 2);
+       memcpy(&val16, ha + 4, 2);
        val = val16;
        flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
                            &val, &mask);
@@ -335,22 +349,26 @@ static void flow_offload_port_snat(struct net *net,
                                   struct nf_flow_rule *flow_rule)
 {
        struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
-       u32 mask = ~htonl(0xffff0000), port;
+       u32 mask, port;
        u32 offset;
 
        switch (dir) {
        case FLOW_OFFLOAD_DIR_ORIGINAL:
                port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
                offset = 0; /* offsetof(struct tcphdr, source); */
+               port = htonl(port << 16);
+               mask = ~htonl(0xffff0000);
                break;
        case FLOW_OFFLOAD_DIR_REPLY:
                port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
                offset = 0; /* offsetof(struct tcphdr, dest); */
+               port = htonl(port);
+               mask = ~htonl(0xffff);
                break;
        default:
                return;
        }
-       port = htonl(port << 16);
+
        flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
                            &port, &mask);
 }
@@ -361,22 +379,26 @@ static void flow_offload_port_dnat(struct net *net,
                                   struct nf_flow_rule *flow_rule)
 {
        struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
-       u32 mask = ~htonl(0xffff), port;
+       u32 mask, port;
        u32 offset;
 
        switch (dir) {
        case FLOW_OFFLOAD_DIR_ORIGINAL:
-               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
-               offset = 0; /* offsetof(struct tcphdr, source); */
+               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
+               offset = 0; /* offsetof(struct tcphdr, dest); */
+               port = htonl(port);
+               mask = ~htonl(0xffff);
                break;
        case FLOW_OFFLOAD_DIR_REPLY:
-               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
-               offset = 0; /* offsetof(struct tcphdr, dest); */
+               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
+               offset = 0; /* offsetof(struct tcphdr, source); */
+               port = htonl(port << 16);
+               mask = ~htonl(0xffff0000);
                break;
        default:
                return;
        }
-       port = htonl(port);
+
        flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
                            &port, &mask);
 }
@@ -759,9 +781,9 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
                           struct flow_offload *flow)
 {
        struct flow_offload_work *offload;
-       s64 delta;
+       __s32 delta;
 
-       delta = flow->timeout - jiffies;
+       delta = nf_flow_timeout_delta(flow->timeout);
        if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
            flow->flags & FLOW_OFFLOAD_HW_DYING)
                return;
index 0a59c14b5177645f3bbfd847dc5db3500e3b1961..64eedc17037ad783d3a99851038e91394465617e 100644 (file)
@@ -233,6 +233,19 @@ icmp_manip_pkt(struct sk_buff *skb,
                return false;
 
        hdr = (struct icmphdr *)(skb->data + hdroff);
+       switch (hdr->type) {
+       case ICMP_ECHO:
+       case ICMP_ECHOREPLY:
+       case ICMP_TIMESTAMP:
+       case ICMP_TIMESTAMPREPLY:
+       case ICMP_INFO_REQUEST:
+       case ICMP_INFO_REPLY:
+       case ICMP_ADDRESS:
+       case ICMP_ADDRESSREPLY:
+               break;
+       default:
+               return true;
+       }
        inet_proto_csum_replace2(&hdr->checksum, skb,
                                 hdr->un.echo.id, tuple->src.u.icmp.id, false);
        hdr->un.echo.id = tuple->src.u.icmp.id;
index 273f3838318b0e2612a89a28d9b2b48a23602ce5..65f51a2e9c2a7343b2d0677526932f984eba0684 100644 (file)
@@ -22,6 +22,8 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 
+#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+
 static LIST_HEAD(nf_tables_expressions);
 static LIST_HEAD(nf_tables_objects);
 static LIST_HEAD(nf_tables_flowtables);
@@ -564,33 +566,34 @@ __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
 }
 
 /*
- * Loading a module requires dropping mutex that guards the
- * transaction.
- * We first need to abort any pending transactions as once
- * mutex is unlocked a different client could start a new
- * transaction.  It must not see any 'future generation'
- * changes * as these changes will never happen.
+ * Loading a module requires dropping mutex that guards the transaction.
+ * A different client might race to start a new transaction meanwhile. Zap the
+ * list of pending transaction and then restore it once the mutex is grabbed
+ * again. Users of this function return EAGAIN which implicitly triggers the
+ * transaction abort path to clean up the list of pending transactions.
  */
 #ifdef CONFIG_MODULES
-static int __nf_tables_abort(struct net *net);
-
 static void nft_request_module(struct net *net, const char *fmt, ...)
 {
        char module_name[MODULE_NAME_LEN];
+       LIST_HEAD(commit_list);
        va_list args;
        int ret;
 
-       __nf_tables_abort(net);
+       list_splice_init(&net->nft.commit_list, &commit_list);
 
        va_start(args, fmt);
        ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
        va_end(args);
-       if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret))
+       if (ret >= MODULE_NAME_LEN)
                return;
 
        mutex_unlock(&net->nft.commit_mutex);
        request_module("%s", module_name);
        mutex_lock(&net->nft.commit_mutex);
+
+       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+       list_splice(&commit_list, &net->nft.commit_list);
 }
 #endif
 
@@ -1045,12 +1048,18 @@ static int nft_flush_table(struct nft_ctx *ctx)
        }
 
        list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
+               if (!nft_is_active_next(ctx->net, flowtable))
+                       continue;
+
                err = nft_delflowtable(ctx, flowtable);
                if (err < 0)
                        goto out;
        }
 
        list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
+               if (!nft_is_active_next(ctx->net, obj))
+                       continue;
+
                err = nft_delobj(ctx, obj);
                if (err < 0)
                        goto out;
@@ -1241,7 +1250,8 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
        [NFTA_CHAIN_HOOK]       = { .type = NLA_NESTED },
        [NFTA_CHAIN_POLICY]     = { .type = NLA_U32 },
-       [NFTA_CHAIN_TYPE]       = { .type = NLA_STRING },
+       [NFTA_CHAIN_TYPE]       = { .type = NLA_STRING,
+                                   .len = NFT_MODULE_AUTOLOAD_LIMIT },
        [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
        [NFTA_CHAIN_FLAGS]      = { .type = NLA_U32 },
 };
@@ -1676,6 +1686,7 @@ static int nf_tables_parse_netdev_hooks(struct net *net,
                        goto err_hook;
                }
                if (nft_hook_list_find(hook_list, hook)) {
+                       kfree(hook);
                        err = -EEXIST;
                        goto err_hook;
                }
@@ -2355,7 +2366,8 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
 }
 
 static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
-       [NFTA_EXPR_NAME]        = { .type = NLA_STRING },
+       [NFTA_EXPR_NAME]        = { .type = NLA_STRING,
+                                   .len = NFT_MODULE_AUTOLOAD_LIMIT },
        [NFTA_EXPR_DATA]        = { .type = NLA_NESTED },
 };
 
@@ -4198,7 +4210,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
        [NFTA_SET_ELEM_USERDATA]        = { .type = NLA_BINARY,
                                            .len = NFT_USERDATA_MAXLEN },
        [NFTA_SET_ELEM_EXPR]            = { .type = NLA_NESTED },
-       [NFTA_SET_ELEM_OBJREF]          = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_OBJREF]          = { .type = NLA_STRING,
+                                           .len = NFT_OBJ_MAXNAMELEN - 1 },
 };
 
 static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -5984,6 +5997,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
        return ERR_PTR(-ENOENT);
 }
 
+/* Only called from error and netdev event paths. */
 static void nft_unregister_flowtable_hook(struct net *net,
                                          struct nft_flowtable *flowtable,
                                          struct nft_hook *hook)
@@ -5999,7 +6013,7 @@ static void nft_unregister_flowtable_net_hooks(struct net *net,
        struct nft_hook *hook;
 
        list_for_each_entry(hook, &flowtable->hook_list, list)
-               nft_unregister_flowtable_hook(net, flowtable, hook);
+               nf_unregister_net_hook(net, &hook->ops);
 }
 
 static int nft_register_flowtable_net_hooks(struct net *net,
@@ -6448,12 +6462,14 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
 {
        struct nft_hook *hook, *next;
 
+       flowtable->data.type->free(&flowtable->data);
        list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+               flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+                                           FLOW_BLOCK_UNBIND);
                list_del_rcu(&hook->list);
                kfree(hook);
        }
        kfree(flowtable->name);
-       flowtable->data.type->free(&flowtable->data);
        module_put(flowtable->data.type->owner);
        kfree(flowtable);
 }
@@ -6497,6 +6513,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
                if (hook->ops.dev != dev)
                        continue;
 
+               /* flow_offload_netdev_event() cleans up entries for us. */
                nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
                list_del_rcu(&hook->list);
                kfree_rcu(hook, rcu);
index dd82ff2ee19fef7fe62afcbd1443719075b7df0f..b70b48996801a64e1da3c03792bbfd67350d8c8f 100644 (file)
@@ -200,9 +200,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
 static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
                                     const struct nft_expr *expr)
 {
-       struct nft_flow_offload *priv = nft_expr_priv(expr);
-
-       priv->flowtable->use--;
        nf_ct_netns_put(ctx->net, ctx->family);
 }
 
index 4c33dfc9dab5058333366b90c730a43cddd5d32c..d67f83a0958d3661aa470a8892e1507386b87195 100644 (file)
@@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
        taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
 
        if (priv->sreg_port)
-               tport = regs->data[priv->sreg_port];
+               tport = nft_reg_load16(&regs->data[priv->sreg_port]);
        if (!tport)
                tport = hp->dest;
 
@@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
        taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
 
        if (priv->sreg_port)
-               tport = regs->data[priv->sreg_port];
+               tport = nft_reg_load16(&regs->data[priv->sreg_port]);
        if (!tport)
                tport = hp->dest;
 
index 3d4c2ae605a8e2c3ceee669570775cd5ebbb3e1e..5284fcf16be73463f8ac679989298b6b5d520096 100644 (file)
@@ -76,7 +76,7 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
        struct nft_tunnel *priv = nft_expr_priv(expr);
        u32 len;
 
-       if (!tb[NFTA_TUNNEL_KEY] &&
+       if (!tb[NFTA_TUNNEL_KEY] ||
            !tb[NFTA_TUNNEL_DREG])
                return -EINVAL;
 
@@ -266,6 +266,9 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
        if (err < 0)
                return err;
 
+       if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
+                return -EINVAL;
+
        version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
        switch (version) {
        case ERSPAN_VERSION:
index 88f98f27ad88e5d786ca771a94801c63c9c419a1..3d24d45be5f446199779ddc7d52b1414c5e4e8f8 100644 (file)
@@ -196,7 +196,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
        hdr->size = cpu_to_le32(len);
        hdr->confirm_rx = 0;
 
-       skb_put_padto(skb, ALIGN(len, 4));
+       skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
 
        mutex_lock(&node->ep_lock);
        if (node->ep)
index 7c7d10f2e0c181776611cd54d6048a17ed512d5f..5e99df80e80a775b26e0de82eec5738454398abc 100644 (file)
@@ -209,6 +209,7 @@ struct rxrpc_skb_priv {
 struct rxrpc_security {
        const char              *name;          /* name of this service */
        u8                      security_index; /* security type provided */
+       u32                     no_key_abort;   /* Abort code indicating no key */
 
        /* Initialise a security service */
        int (*init)(void);
@@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
                                                     struct sk_buff *);
 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
-void rxrpc_new_incoming_connection(struct rxrpc_sock *,
-                                  struct rxrpc_connection *, struct sk_buff *);
+void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
+                                  const struct rxrpc_security *, struct key *,
+                                  struct sk_buff *);
 void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
 
 /*
@@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad;
 int __init rxrpc_init_security(void);
 void rxrpc_exit_security(void);
 int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
+                                  const struct rxrpc_security **, struct key **,
+                                  struct sk_buff *);
 
 /*
  * sendmsg.c
index 135bf5cd8dd51be0414ebb89975331d3f47d7b4e..70e44abf106c86c731162a7a40be9963ba9b25f4 100644 (file)
@@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
        kfree(b);
 }
 
+/*
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
+ */
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       ktime_t now = skb->tstamp;
+
+       if (call->peer->rtt_usage < 3 ||
+           ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+               rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+                                 true, true,
+                                 rxrpc_propose_ack_ping_for_params);
+}
+
 /*
  * Allocate a new incoming call from the prealloc pool, along with a connection
  * and a peer as necessary.
@@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                                    struct rxrpc_local *local,
                                                    struct rxrpc_peer *peer,
                                                    struct rxrpc_connection *conn,
+                                                   const struct rxrpc_security *sec,
+                                                   struct key *key,
                                                    struct sk_buff *skb)
 {
        struct rxrpc_backlog *b = rx->backlog;
@@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                conn->params.local = rxrpc_get_local(local);
                conn->params.peer = peer;
                rxrpc_see_connection(conn);
-               rxrpc_new_incoming_connection(rx, conn, skb);
+               rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
        } else {
                rxrpc_get_connection(conn);
        }
@@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       const struct rxrpc_security *sec = NULL;
        struct rxrpc_connection *conn;
        struct rxrpc_peer *peer = NULL;
-       struct rxrpc_call *call;
+       struct rxrpc_call *call = NULL;
+       struct key *key = NULL;
 
        _enter("");
 
@@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
                skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
                skb->priority = RX_INVALID_OPERATION;
-               _leave(" = NULL [close]");
-               call = NULL;
-               goto out;
+               goto no_call;
        }
 
        /* The peer, connection and call may all have sprung into existence due
@@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
         */
        conn = rxrpc_find_connection_rcu(local, skb, &peer);
 
-       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
+       if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
+               goto no_call;
+
+       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
+       key_put(key);
        if (!call) {
                skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
-               _leave(" = NULL [busy]");
-               call = NULL;
-               goto out;
+               goto no_call;
        }
 
        trace_rxrpc_receive(call, rxrpc_receive_incoming,
                            sp->hdr.serial, sp->hdr.seq);
 
-       /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
-        * sendmsg()/recvmsg() inconveniently stealing the mutex once the
-        * notification is generated.
-        *
-        * The BUG should never happen because the kernel should be well
-        * behaved enough not to access the call before the first notification
-        * event and userspace is prevented from doing so until the state is
-        * appropriate.
-        */
-       if (!mutex_trylock(&call->user_mutex))
-               BUG();
-
        /* Make the call live. */
        rxrpc_incoming_call(rx, call, skb);
        conn = call->conn;
@@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                BUG();
        }
        spin_unlock(&conn->state_lock);
+       spin_unlock(&rx->incoming_lock);
+
+       rxrpc_send_ping(call, skb);
 
        if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
                rxrpc_notify_socket(call);
@@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
        rxrpc_put_call(call, rxrpc_call_put);
 
        _leave(" = %p{%d}", call, call->debug_id);
-out:
-       spin_unlock(&rx->incoming_lock);
        return call;
+
+no_call:
+       spin_unlock(&rx->incoming_lock);
+       _leave(" = NULL [%u]", skb->mark);
+       return NULL;
 }
 
 /*
index a1ceef4f5cd07754fff2fa5e1472ff60a50a272e..808a4723f8684c849da712808938df11f3ce4872 100644 (file)
@@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
        _enter("{%d}", conn->debug_id);
 
        ASSERT(conn->security_ix != 0);
-
-       if (!conn->params.key) {
-               _debug("set up security");
-               ret = rxrpc_init_server_conn_security(conn);
-               switch (ret) {
-               case 0:
-                       break;
-               case -ENOENT:
-                       abort_code = RX_CALL_DEAD;
-                       goto abort;
-               default:
-                       abort_code = RXKADNOAUTH;
-                       goto abort;
-               }
-       }
+       ASSERT(conn->server_key);
 
        if (conn->security->issue_challenge(conn) < 0) {
                abort_code = RX_CALL_DEAD;
index 123d6ceab15cb0b00ccf65aec61370b64cda811b..21da48e3d2e5188b79370910753dac54cfd14760 100644 (file)
@@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
  */
 void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
                                   struct rxrpc_connection *conn,
+                                  const struct rxrpc_security *sec,
+                                  struct key *key,
                                   struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
        conn->service_id        = sp->hdr.serviceId;
        conn->security_ix       = sp->hdr.securityIndex;
        conn->out_clientflag    = 0;
+       conn->security          = sec;
+       conn->server_key        = key_get(key);
        if (conn->security_ix)
                conn->state     = RXRPC_CONN_SERVICE_UNSECURED;
        else
index 157be1ff8697be438a7ee6636e37671169fff3df..86bd133b4fa0a858595eb2e259d99b31e58c47e7 100644 (file)
@@ -192,22 +192,6 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
        goto out_no_clear_ca;
 }
 
-/*
- * Ping the other end to fill our RTT cache and to retrieve the rwind
- * and MTU parameters.
- */
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
-{
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       ktime_t now = skb->tstamp;
-
-       if (call->peer->rtt_usage < 3 ||
-           ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
-               rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
-                                 true, true,
-                                 rxrpc_propose_ack_ping_for_params);
-}
-
 /*
  * Apply a hard ACK by advancing the Tx window.
  */
@@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
                call = rxrpc_new_incoming_call(local, rx, skb);
                if (!call)
                        goto reject_packet;
-               rxrpc_send_ping(call, skb);
-               mutex_unlock(&call->user_mutex);
        }
 
        /* Process a call packet; this either discards or passes on the ref
index 8d8aa3c230b5515d8cde676432c77a733f14afe5..098f1f9ec53ba10642dc0c3d2c608a44de688e5b 100644 (file)
@@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
        u32 serial;
        int ret;
 
-       _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
+       _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
 
-       ret = key_validate(conn->params.key);
+       ret = key_validate(conn->server_key);
        if (ret < 0)
                return ret;
 
@@ -1293,6 +1293,7 @@ static void rxkad_exit(void)
 const struct rxrpc_security rxkad = {
        .name                           = "rxkad",
        .security_index                 = RXRPC_SECURITY_RXKAD,
+       .no_key_abort                   = RXKADUNKNOWNKEY,
        .init                           = rxkad_init,
        .exit                           = rxkad_exit,
        .init_connection_security       = rxkad_init_connection_security,
index a4c47d2b705478eabf87b699da6852207f525f56..9b1fb9ed07177215aa533666d9af0fbc8bda791e 100644 (file)
@@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 }
 
 /*
- * initialise the security on a server connection
+ * Find the security key for a server connection.
  */
-int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
+bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx,
+                                  const struct rxrpc_security **_sec,
+                                  struct key **_key,
+                                  struct sk_buff *skb)
 {
        const struct rxrpc_security *sec;
-       struct rxrpc_local *local = conn->params.local;
-       struct rxrpc_sock *rx;
-       struct key *key;
-       key_ref_t kref;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       key_ref_t kref = NULL;
        char kdesc[5 + 1 + 3 + 1];
 
        _enter("");
 
-       sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
+       sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex);
 
-       sec = rxrpc_security_lookup(conn->security_ix);
+       sec = rxrpc_security_lookup(sp->hdr.securityIndex);
        if (!sec) {
-               _leave(" = -ENOKEY [lookup]");
-               return -ENOKEY;
+               trace_rxrpc_abort(0, "SVS",
+                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                                 RX_INVALID_OPERATION, EKEYREJECTED);
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+               skb->priority = RX_INVALID_OPERATION;
+               return false;
        }
 
-       /* find the service */
-       read_lock(&local->services_lock);
-       rx = rcu_dereference_protected(local->service,
-                                      lockdep_is_held(&local->services_lock));
-       if (rx && (rx->srx.srx_service == conn->service_id ||
-                  rx->second_service == conn->service_id))
-               goto found_service;
+       if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE)
+               goto out;
 
-       /* the service appears to have died */
-       read_unlock(&local->services_lock);
-       _leave(" = -ENOENT");
-       return -ENOENT;
-
-found_service:
        if (!rx->securities) {
-               read_unlock(&local->services_lock);
-               _leave(" = -ENOKEY");
-               return -ENOKEY;
+               trace_rxrpc_abort(0, "SVR",
+                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                                 RX_INVALID_OPERATION, EKEYREJECTED);
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+               skb->priority = RX_INVALID_OPERATION;
+               return false;
        }
 
        /* look through the service's keyring */
        kref = keyring_search(make_key_ref(rx->securities, 1UL),
                              &key_type_rxrpc_s, kdesc, true);
        if (IS_ERR(kref)) {
-               read_unlock(&local->services_lock);
-               _leave(" = %ld [search]", PTR_ERR(kref));
-               return PTR_ERR(kref);
+               trace_rxrpc_abort(0, "SVK",
+                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                                 sec->no_key_abort, EKEYREJECTED);
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+               skb->priority = sec->no_key_abort;
+               return false;
        }
 
-       key = key_ref_to_ptr(kref);
-       read_unlock(&local->services_lock);
-
-       conn->server_key = key;
-       conn->security = sec;
-
-       _leave(" = 0");
-       return 0;
+out:
+       *_sec = sec;
+       *_key = key_ref_to_ptr(kref);
+       return true;
 }
index 40038c321b4a970dc940714ccda4b39f0d261d6a..19649623493b158b3008c82ce2409ae80ffa6dc6 100644 (file)
@@ -360,6 +360,16 @@ static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index)
        return tcf_idr_search(tn, a, index);
 }
 
+static void tcf_ctinfo_cleanup(struct tc_action *a)
+{
+       struct tcf_ctinfo *ci = to_ctinfo(a);
+       struct tcf_ctinfo_params *cp;
+
+       cp = rcu_dereference_protected(ci->params, 1);
+       if (cp)
+               kfree_rcu(cp, rcu);
+}
+
 static struct tc_action_ops act_ctinfo_ops = {
        .kind   = "ctinfo",
        .id     = TCA_ID_CTINFO,
@@ -367,6 +377,7 @@ static struct tc_action_ops act_ctinfo_ops = {
        .act    = tcf_ctinfo_act,
        .dump   = tcf_ctinfo_dump,
        .init   = tcf_ctinfo_init,
+       .cleanup= tcf_ctinfo_cleanup,
        .walk   = tcf_ctinfo_walker,
        .lookup = tcf_ctinfo_search,
        .size   = sizeof(struct tcf_ctinfo),
index 5e6379028fc392031f4b84599f666a2c61f071d2..c1fcd85719d6a7fa86e65ebb89e82e0d931b7ea0 100644 (file)
@@ -537,6 +537,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        }
 
        ife = to_ife(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&ife->metalist);
+
        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
        if (err < 0)
                goto release_idr;
@@ -566,10 +569,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                p->eth_type = ife_type;
        }
 
-
-       if (ret == ACT_P_CREATED)
-               INIT_LIST_HEAD(&ife->metalist);
-
        if (tb[TCA_IFE_METALST]) {
                err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
                                                  tb[TCA_IFE_METALST], NULL,
index 1e3eb3a975324a2630f1b66f08655ea6de8d331c..1ad300e6dbc0a0bfad2c278101e6f3446752ec2e 100644 (file)
@@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
        bool use_reinsert;
        bool want_ingress;
        bool is_redirect;
+       bool expects_nh;
        int m_eaction;
        int mac_len;
+       bool at_nh;
 
        rec_level = __this_cpu_inc_return(mirred_rec_level);
        if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
@@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                        goto out;
        }
 
-       /* If action's target direction differs than filter's direction,
-        * and devices expect a mac header on xmit, then mac push/pull is
-        * needed.
-        */
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
-       if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
-               if (!skb_at_tc_ingress(skb)) {
-                       /* caught at egress, act ingress: pull mac */
-                       mac_len = skb_network_header(skb) - skb_mac_header(skb);
+
+       expects_nh = want_ingress || !m_mac_header_xmit;
+       at_nh = skb->data == skb_network_header(skb);
+       if (at_nh != expects_nh) {
+               mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
+                         skb_network_header(skb) - skb_mac_header(skb);
+               if (expects_nh) {
+                       /* target device/action expect data at nh */
                        skb_pull_rcsum(skb2, mac_len);
                } else {
-                       /* caught at ingress, act egress: push mac */
-                       skb_push_rcsum(skb2, skb->mac_len);
+                       /* target device/action expect data at mac */
+                       skb_push_rcsum(skb2, mac_len);
                }
        }
 
index 6a0eacafdb19117701e70fbec63ca7028ecd14a9..76e0d122616aec71f56ded2b10f696dc1a87c105 100644 (file)
@@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
                tcf_proto_destroy(tp, rtnl_held, true, extack);
 }
 
-static int walker_check_empty(struct tcf_proto *tp, void *fh,
-                             struct tcf_walker *arg)
+static bool tcf_proto_check_delete(struct tcf_proto *tp)
 {
-       if (fh) {
-               arg->nonempty = true;
-               return -1;
-       }
-       return 0;
-}
-
-static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
-{
-       struct tcf_walker walker = { .fn = walker_check_empty, };
-
-       if (tp->ops->walk) {
-               tp->ops->walk(tp, &walker, rtnl_held);
-               return !walker.nonempty;
-       }
-       return true;
-}
+       if (tp->ops->delete_empty)
+               return tp->ops->delete_empty(tp);
 
-static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
-{
-       spin_lock(&tp->lock);
-       if (tcf_proto_is_empty(tp, rtnl_held))
-               tp->deleting = true;
-       spin_unlock(&tp->lock);
+       tp->deleting = true;
        return tp->deleting;
 }
 
@@ -1751,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
         * concurrently.
         * Mark tp for deletion if it is empty.
         */
-       if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
+       if (!tp_iter || !tcf_proto_check_delete(tp)) {
                mutex_unlock(&chain->filter_chain_lock);
                return;
        }
index 0d125de54285058591dcff0a6b400690a94679af..b0f42e62dd7607b4ee55ad39443ca075b9356bcf 100644 (file)
@@ -2773,6 +2773,17 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
                f->res.class = cl;
 }
 
+static bool fl_delete_empty(struct tcf_proto *tp)
+{
+       struct cls_fl_head *head = fl_head_dereference(tp);
+
+       spin_lock(&tp->lock);
+       tp->deleting = idr_is_empty(&head->handle_idr);
+       spin_unlock(&tp->lock);
+
+       return tp->deleting;
+}
+
 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .kind           = "flower",
        .classify       = fl_classify,
@@ -2782,6 +2793,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .put            = fl_put,
        .change         = fl_change,
        .delete         = fl_delete,
+       .delete_empty   = fl_delete_empty,
        .walk           = fl_walk,
        .reoffload      = fl_reoffload,
        .hw_add         = fl_hw_add,
index 66c6bcec16cbc2e06f1e356a3a114a8154bb55a4..a0e6fac613de215bd2ca588624e2f3edda7c19ff 100644 (file)
@@ -1108,33 +1108,10 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        return err;
 }
 
-static bool u32_hnode_empty(struct tc_u_hnode *ht, bool *non_root_ht)
-{
-       int i;
-
-       if (!ht)
-               return true;
-       if (!ht->is_root) {
-               *non_root_ht = true;
-               return false;
-       }
-       if (*non_root_ht)
-               return false;
-       if (ht->refcnt < 2)
-               return true;
-
-       for (i = 0; i <= ht->divisor; i++) {
-               if (rtnl_dereference(ht->ht[i]))
-                       return false;
-       }
-       return true;
-}
-
 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
                     bool rtnl_held)
 {
        struct tc_u_common *tp_c = tp->data;
-       bool non_root_ht = false;
        struct tc_u_hnode *ht;
        struct tc_u_knode *n;
        unsigned int h;
@@ -1147,8 +1124,6 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
             ht = rtnl_dereference(ht->next)) {
                if (ht->prio != tp->prio)
                        continue;
-               if (u32_hnode_empty(ht, &non_root_ht))
-                       return;
                if (arg->count >= arg->skip) {
                        if (arg->fn(tp, ht, arg) < 0) {
                                arg->stop = 1;
index e0f40400f679c2ae40aecd43c2a4118fab515505..2277369feae58bfb9c70b449e7e096ae5f61dae1 100644 (file)
@@ -1769,7 +1769,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                                                      q->avg_window_begin));
                        u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
 
-                       do_div(b, window_interval);
+                       b = div64_u64(b, window_interval);
                        q->avg_peak_bandwidth =
                                cake_ewma(q->avg_peak_bandwidth, b,
                                          b > q->avg_peak_bandwidth ? 2 : 8);
index b1c7e726ce5d1ae139f765c5b92dfdaea9bee258..a5a295477eccd52952e26e2ce121315341dddd0f 100644 (file)
@@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
                                     f->socket_hash != sk->sk_hash)) {
                                f->credit = q->initial_quantum;
                                f->socket_hash = sk->sk_hash;
+                               if (q->rate_enable)
+                                       smp_store_release(&sk->sk_pacing_status,
+                                                         SK_PACING_FQ);
                                if (fq_flow_is_throttled(f))
                                        fq_flow_unset_throttled(q, f);
                                f->time_next_packet = 0ULL;
@@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 
        fq_flow_set_detached(f);
        f->sk = sk;
-       if (skb->sk == sk)
+       if (skb->sk == sk) {
                f->socket_hash = sk->sk_hash;
+               if (q->rate_enable)
+                       smp_store_release(&sk->sk_pacing_status,
+                                         SK_PACING_FQ);
+       }
        f->credit = q->initial_quantum;
 
        rb_link_node(&f->fq_node, parent, p);
@@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        f->qlen++;
        qdisc_qstats_backlog_inc(sch, skb);
        if (fq_flow_is_detached(f)) {
-               struct sock *sk = skb->sk;
-
                fq_flow_add_tail(&q->new_flows, f);
                if (time_after(jiffies, f->age + q->flow_refill_delay))
                        f->credit = max_t(u32, f->credit, q->quantum);
-               if (sk && q->rate_enable) {
-                       if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
-                                    SK_PACING_FQ))
-                               smp_store_release(&sk->sk_pacing_status,
-                                                 SK_PACING_FQ);
-               }
                q->inactive_flows--;
        }
 
@@ -787,10 +786,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
        if (tb[TCA_FQ_QUANTUM]) {
                u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 
-               if (quantum > 0)
+               if (quantum > 0 && quantum <= (1 << 20)) {
                        q->quantum = quantum;
-               else
+               } else {
+                       NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
                        err = -EINVAL;
+               }
        }
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
index 18b884cfdfe8de12223dfa3f8058b8c31726a885..647941702f9fcaa47242382c8489a8ee179064f4 100644 (file)
@@ -292,8 +292,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
        struct tc_prio_qopt_offload graft_offload;
        unsigned long band = arg - 1;
 
-       if (new == NULL)
-               new = &noop_qdisc;
+       if (!new) {
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       TC_H_MAKE(sch->handle, arg), extack);
+               if (!new)
+                       new = &noop_qdisc;
+               else
+                       qdisc_hash_add(new, true);
+       }
 
        *old = qdisc_replace(sch, new, &q->queues[band]);
 
index acd737d4c0e0a938895ed10fc8aa39e3f908b64f..834e9f82afedc47e99fab3cd140d1d82795e93ac 100644 (file)
@@ -1363,8 +1363,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        /* Generate an INIT ACK chunk.  */
                        new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
                                                     0);
-                       if (!new_obj)
-                               goto nomem;
+                       if (!new_obj) {
+                               error = -ENOMEM;
+                               break;
+                       }
 
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(new_obj));
@@ -1386,7 +1388,8 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        if (!new_obj) {
                                if (cmd->obj.chunk)
                                        sctp_chunk_free(cmd->obj.chunk);
-                               goto nomem;
+                               error = -ENOMEM;
+                               break;
                        }
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(new_obj));
@@ -1433,8 +1436,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
 
                        /* Generate a SHUTDOWN chunk.  */
                        new_obj = sctp_make_shutdown(asoc, chunk);
-                       if (!new_obj)
-                               goto nomem;
+                       if (!new_obj) {
+                               error = -ENOMEM;
+                               break;
+                       }
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(new_obj));
                        break;
@@ -1770,11 +1775,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        break;
                }
 
-               if (error)
+               if (error) {
+                       cmd = sctp_next_cmd(commands);
+                       while (cmd) {
+                               if (cmd->verb == SCTP_CMD_REPLY)
+                                       sctp_chunk_free(cmd->obj.chunk);
+                               cmd = sctp_next_cmd(commands);
+                       }
                        break;
+               }
        }
 
-out:
        /* If this is in response to a received chunk, wait until
         * we are done with the packet to open the queue so that we don't
         * send multiple packets in response to a single request.
@@ -1789,7 +1800,4 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                sp->data_ready_signalled = 0;
 
        return error;
-nomem:
-       error = -ENOMEM;
-       goto out;
 }
index 6a30392068a04bfcefcb14c3d7f13fc092d59cd3..c1a100d2fed39c2d831487e05fcbf5e8d507d470 100644 (file)
@@ -84,10 +84,8 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
                return 0;
 
        ret = genradix_prealloc(&stream->out, outcnt, gfp);
-       if (ret) {
-               genradix_free(&stream->out);
+       if (ret)
                return ret;
-       }
 
        stream->outcnt = outcnt;
        return 0;
@@ -102,10 +100,8 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
                return 0;
 
        ret = genradix_prealloc(&stream->in, incnt, gfp);
-       if (ret) {
-               genradix_free(&stream->in);
+       if (ret)
                return ret;
-       }
 
        stream->incnt = incnt;
        return 0;
@@ -123,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
         * a new one with new outcnt to save memory if needed.
         */
        if (outcnt == stream->outcnt)
-               goto in;
+               goto handle_in;
 
        /* Filter out chunks queued on streams that won't exist anymore */
        sched->unsched_all(stream);
@@ -132,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 
        ret = sctp_stream_alloc_out(stream, outcnt, gfp);
        if (ret)
-               goto out;
+               goto out_err;
 
        for (i = 0; i < stream->outcnt; i++)
                SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
-in:
+handle_in:
        sctp_stream_interleave_init(stream);
        if (!incnt)
                goto out;
 
        ret = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (ret) {
-               sched->free(stream);
-               genradix_free(&stream->out);
-               stream->outcnt = 0;
-               goto out;
-       }
+       if (ret)
+               goto in_err;
+
+       goto out;
 
+in_err:
+       sched->free(stream);
+       genradix_free(&stream->in);
+out_err:
+       genradix_free(&stream->out);
+       stream->outcnt = 0;
 out:
        return ret;
 }
index 7235a60326712186060ffc918b8cd14c596c2a3c..3bbe1a58ec876ec594256166c948ade2d9b4948b 100644 (file)
@@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 
                pf->af->from_sk(&addr, sk);
                pf->to_sk_daddr(&t->ipaddr, sk);
-               dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+               dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
                pf->to_sk_daddr(&addr, sk);
 
                dst = sctp_transport_dst_check(t);
index 77c7dd7f05e8be8a9793eccee4ba77d4aa6ca15b..fda3889993cbbef28c909cbd334d2820ce0ff3a4 100644 (file)
@@ -77,7 +77,7 @@
 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
                                       struct rpcrdma_sendctx *sc);
 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
-static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
+static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
 static struct rpcrdma_regbuf *
@@ -244,6 +244,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                        ia->ri_id->device->name,
                        rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
 #endif
+               init_completion(&ia->ri_remove_done);
                set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
                ep->rep_connected = -ENODEV;
                xprt_force_disconnect(xprt);
@@ -297,7 +298,6 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
        int rc;
 
        init_completion(&ia->ri_done);
-       init_completion(&ia->ri_remove_done);
 
        id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
                            xprt, RDMA_PS_TCP, IB_QPT_RC);
@@ -421,7 +421,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
        /* The ULP is responsible for ensuring all DMA
         * mappings and MRs are gone.
         */
-       rpcrdma_reps_destroy(buf);
+       rpcrdma_reps_unmap(r_xprt);
        list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
                rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
                rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
@@ -599,6 +599,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
                                    struct ib_qp_init_attr *qp_init_attr)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+       struct rpcrdma_ep *ep = &r_xprt->rx_ep;
        int rc, err;
 
        trace_xprtrdma_reinsert(r_xprt);
@@ -613,6 +614,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
                pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
                goto out2;
        }
+       memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr));
 
        rc = -ENETUNREACH;
        err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
@@ -1090,6 +1092,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
        rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
        rep->rr_recv_wr.num_sge = 1;
        rep->rr_temp = temp;
+       list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
        return rep;
 
 out_free:
@@ -1100,6 +1103,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
 
 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
 {
+       list_del(&rep->rr_all);
        rpcrdma_regbuf_free(rep->rr_rdmabuf);
        kfree(rep);
 }
@@ -1118,10 +1122,16 @@ static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
 static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
                            struct rpcrdma_rep *rep)
 {
-       if (!rep->rr_temp)
-               llist_add(&rep->rr_node, &buf->rb_free_reps);
-       else
-               rpcrdma_rep_destroy(rep);
+       llist_add(&rep->rr_node, &buf->rb_free_reps);
+}
+
+static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
+{
+       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+       struct rpcrdma_rep *rep;
+
+       list_for_each_entry(rep, &buf->rb_all_reps, rr_all)
+               rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
 }
 
 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
@@ -1152,6 +1162,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
 
        INIT_LIST_HEAD(&buf->rb_send_bufs);
        INIT_LIST_HEAD(&buf->rb_allreqs);
+       INIT_LIST_HEAD(&buf->rb_all_reps);
 
        rc = -ENOMEM;
        for (i = 0; i < buf->rb_max_requests; i++) {
@@ -1504,6 +1515,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
        wr = NULL;
        while (needed) {
                rep = rpcrdma_rep_get_locked(buf);
+               if (rep && rep->rr_temp) {
+                       rpcrdma_rep_destroy(rep);
+                       continue;
+               }
                if (!rep)
                        rep = rpcrdma_rep_create(r_xprt, temp);
                if (!rep)
index 5d15140a026601819f851fa5f3b98682e7898167..d796d68609edbf9c0515b26f6488e095ba2c9983 100644 (file)
@@ -203,6 +203,7 @@ struct rpcrdma_rep {
        struct xdr_stream       rr_stream;
        struct llist_node       rr_node;
        struct ib_recv_wr       rr_recv_wr;
+       struct list_head        rr_all;
 };
 
 /* To reduce the rate at which a transport invokes ib_post_recv
@@ -368,6 +369,7 @@ struct rpcrdma_buffer {
 
        struct list_head        rb_allreqs;
        struct list_head        rb_all_mrs;
+       struct list_head        rb_all_reps;
 
        struct llist_head       rb_free_reps;
 
index 11255e970dd45b489450f1b746ec63e481853f49..ee49a9f1dd4fe071edf7e02aa6371f3fd3442ef0 100644 (file)
@@ -9,7 +9,7 @@ tipc-y  += addr.o bcast.o bearer.o \
           core.o link.o discover.o msg.o  \
           name_distr.o  subscr.o monitor.o name_table.o net.o  \
           netlink.o netlink_compat.o node.o socket.o eth_media.o \
-          topsrv.o socket.o group.o trace.o
+          topsrv.o group.o trace.o
 
 CFLAGS_trace.o += -I$(src)
 
@@ -20,5 +20,3 @@ tipc-$(CONFIG_TIPC_CRYPTO)    += crypto.o
 
 
 obj-$(CONFIG_TIPC_DIAG)        += diag.o
-
-tipc_diag-y    := diag.o
index 0254bb7e418bebe772bf46c7ccd997cf95528cb1..217516357ef26223a309be055cb8f6bbc7a78007 100644 (file)
@@ -204,8 +204,8 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
        }
 
-       attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
-                               sizeof(struct nlattr *), GFP_KERNEL);
+       attrbuf = kcalloc(tipc_genl_family.maxattr + 1,
+                         sizeof(struct nlattr *), GFP_KERNEL);
        if (!attrbuf) {
                err = -ENOMEM;
                goto err_out;
index 6552f986774cadef123a6763deec5ff05ac7269a..f9b4fb92c0b1c98f373bc2346e5d5e77c86d48f8 100644 (file)
@@ -287,12 +287,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
  *
  * Caller must hold socket lock
  */
-static void tsk_rej_rx_queue(struct sock *sk)
+static void tsk_rej_rx_queue(struct sock *sk, int error)
 {
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
-               tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
+               tipc_sk_respond(sk, skb, error);
 }
 
 static bool tipc_sk_connected(struct sock *sk)
@@ -545,34 +545,45 @@ static void __tipc_shutdown(struct socket *sock, int error)
        /* Remove pending SYN */
        __skb_queue_purge(&sk->sk_write_queue);
 
-       /* Reject all unreceived messages, except on an active connection
-        * (which disconnects locally & sends a 'FIN+' to peer).
-        */
-       while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-               if (TIPC_SKB_CB(skb)->bytes_read) {
-                       kfree_skb(skb);
-                       continue;
-               }
-               if (!tipc_sk_type_connectionless(sk) &&
-                   sk->sk_state != TIPC_DISCONNECTING) {
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                       tipc_node_remove_conn(net, dnode, tsk->portid);
-               }
-               tipc_sk_respond(sk, skb, error);
+       /* Remove partially received buffer if any */
+       skb = skb_peek(&sk->sk_receive_queue);
+       if (skb && TIPC_SKB_CB(skb)->bytes_read) {
+               __skb_unlink(skb, &sk->sk_receive_queue);
+               kfree_skb(skb);
        }
 
-       if (tipc_sk_type_connectionless(sk))
+       /* Reject all unreceived messages if connectionless */
+       if (tipc_sk_type_connectionless(sk)) {
+               tsk_rej_rx_queue(sk, error);
                return;
+       }
 
-       if (sk->sk_state != TIPC_DISCONNECTING) {
+       switch (sk->sk_state) {
+       case TIPC_CONNECTING:
+       case TIPC_ESTABLISHED:
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               tipc_node_remove_conn(net, dnode, tsk->portid);
+               /* Send a FIN+/- to its peer */
+               skb = __skb_dequeue(&sk->sk_receive_queue);
+               if (skb) {
+                       __skb_queue_purge(&sk->sk_receive_queue);
+                       tipc_sk_respond(sk, skb, error);
+                       break;
+               }
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
                                      tsk_own_node(tsk), tsk_peer_port(tsk),
                                      tsk->portid, error);
                if (skb)
                        tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
-               tipc_node_remove_conn(net, dnode, tsk->portid);
-               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               break;
+       case TIPC_LISTEN:
+               /* Reject all SYN messages */
+               tsk_rej_rx_queue(sk, error);
+               break;
+       default:
+               __skb_queue_purge(&sk->sk_receive_queue);
+               break;
        }
 }
 
@@ -2432,8 +2443,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
                        return sock_intr_errno(*timeo_p);
 
                add_wait_queue(sk_sleep(sk), &wait);
-               done = sk_wait_event(sk, timeo_p,
-                                    sk->sk_state != TIPC_CONNECTING, &wait);
+               done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
+                                    &wait);
                remove_wait_queue(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
@@ -2643,7 +2654,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
         * Reject any stray messages received by new socket
         * before the socket lock was taken (very, very unlikely)
         */
-       tsk_rej_rx_queue(new_sk);
+       tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
 
        /* Connect new socket to it's peer */
        tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
index dac24c7aa7d4bb45b3188dae3fb1f6030cd3c299..94774c0e5ff32706dc1a36ebc3133dadde218501 100644 (file)
@@ -732,15 +732,19 @@ static int tls_init(struct sock *sk)
        return rc;
 }
 
-static void tls_update(struct sock *sk, struct proto *p)
+static void tls_update(struct sock *sk, struct proto *p,
+                      void (*write_space)(struct sock *sk))
 {
        struct tls_context *ctx;
 
        ctx = tls_get_ctx(sk);
-       if (likely(ctx))
+       if (likely(ctx)) {
+               ctx->sk_write_space = write_space;
                ctx->sk_proto = p;
-       else
+       } else {
                sk->sk_prot = p;
+               sk->sk_write_space = write_space;
+       }
 }
 
 static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
index c6803a82b769b15a117fc444ad98861a25c16f28..c98e602a1a2ded6f90743723445dd4b4a6a3a4c2 100644 (file)
@@ -256,8 +256,6 @@ static int tls_do_decryption(struct sock *sk,
                        return ret;
 
                ret = crypto_wait_req(ret, &ctx->async_wait);
-       } else if (ret == -EBADMSG) {
-               TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
        }
 
        if (async)
@@ -682,12 +680,32 @@ static int tls_push_record(struct sock *sk, int flags,
 
        split_point = msg_pl->apply_bytes;
        split = split_point && split_point < msg_pl->sg.size;
+       if (unlikely((!split &&
+                     msg_pl->sg.size +
+                     prot->overhead_size > msg_en->sg.size) ||
+                    (split &&
+                     split_point +
+                     prot->overhead_size > msg_en->sg.size))) {
+               split = true;
+               split_point = msg_en->sg.size;
+       }
        if (split) {
                rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
                                           split_point, prot->overhead_size,
                                           &orig_end);
                if (rc < 0)
                        return rc;
+               /* This can happen if above tls_split_open_record allocates
+                * a single large encryption buffer instead of two smaller
+                * ones. In this case adjust pointers and continue without
+                * split.
+                */
+               if (!msg_pl->sg.size) {
+                       tls_merge_open_record(sk, rec, tmp, orig_end);
+                       msg_pl = &rec->msg_plaintext;
+                       msg_en = &rec->msg_encrypted;
+                       split = false;
+               }
                sk_msg_trim(sk, msg_en, msg_pl->sg.size +
                            prot->overhead_size);
        }
@@ -709,6 +727,12 @@ static int tls_push_record(struct sock *sk, int flags,
                sg_mark_end(sk_msg_elem(msg_pl, i));
        }
 
+       if (msg_pl->sg.end < msg_pl->sg.start) {
+               sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
+                        MAX_SKB_FRAGS - msg_pl->sg.start + 1,
+                        msg_pl->sg.data);
+       }
+
        i = msg_pl->sg.start;
        sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
 
@@ -772,7 +796,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
        psock = sk_psock_get(sk);
        if (!psock || !policy) {
                err = tls_push_record(sk, flags, record_type);
-               if (err) {
+               if (err && err != -EINPROGRESS) {
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
                }
@@ -783,10 +807,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
        if (psock->eval == __SK_NONE) {
                delta = msg->sg.size;
                psock->eval = sk_psock_msg_verdict(sk, psock, msg);
-               if (delta < msg->sg.size)
-                       delta -= msg->sg.size;
-               else
-                       delta = 0;
+               delta -= msg->sg.size;
        }
        if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
            !enospc && !full_record) {
@@ -801,7 +822,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
        switch (psock->eval) {
        case __SK_PASS:
                err = tls_push_record(sk, flags, record_type);
-               if (err < 0) {
+               if (err && err != -EINPROGRESS) {
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
                        goto out_err;
@@ -1515,7 +1536,9 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
                                if (err == -EINPROGRESS)
                                        tls_advance_record_sn(sk, prot,
                                                              &tls_ctx->rx);
-
+                               else if (err == -EBADMSG)
+                                       TLS_INC_STATS(sock_net(sk),
+                                                     LINUX_MIB_TLSDECRYPTERROR);
                                return err;
                        }
                } else {
index b3bdae74c2435e2445a8badd7f6d3d3561be8908..3492c021925f4b2163ff5aff46d334ae64bbdc41 100644 (file)
@@ -138,28 +138,15 @@ struct hvsock {
  ****************************************************************************
  * The only valid Service GUIDs, from the perspectives of both the host and *
  * Linux VM, that can be connected by the other end, must conform to this   *
- * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in    *
- * this range [0, 0x7FFFFFFF].                                              *
+ * format: <port>-facb-11e6-bd58-64006a7986d3.                              *
  ****************************************************************************
  *
  * When we write apps on the host to connect(), the GUID ServiceID is used.
  * When we write apps in Linux VM to connect(), we only need to specify the
  * port and the driver will form the GUID and use that to request the host.
  *
- * From the perspective of Linux VM:
- * 1. the local ephemeral port (i.e. the local auto-bound port when we call
- * connect() without explicit bind()) is generated by __vsock_bind_stream(),
- * and the range is [1024, 0xFFFFFFFF).
- * 2. the remote ephemeral port (i.e. the auto-generated remote port for
- * a connect request initiated by the host's connect()) is generated by
- * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
  */
 
-#define MAX_LISTEN_PORT                        ((u32)0x7FFFFFFF)
-#define MAX_VM_LISTEN_PORT             MAX_LISTEN_PORT
-#define MAX_HOST_LISTEN_PORT           MAX_LISTEN_PORT
-#define MIN_HOST_EPHEMERAL_PORT                (MAX_HOST_LISTEN_PORT + 1)
-
 /* 00000000-facb-11e6-bd58-64006a7986d3 */
 static const guid_t srv_id_template =
        GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
@@ -184,34 +171,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
        vsock_addr_init(addr, VMADDR_CID_ANY, port);
 }
 
-static void hvs_remote_addr_init(struct sockaddr_vm *remote,
-                                struct sockaddr_vm *local)
-{
-       static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
-       struct sock *sk;
-
-       /* Remote peer is always the host */
-       vsock_addr_init(remote, VMADDR_CID_HOST, VMADDR_PORT_ANY);
-
-       while (1) {
-               /* Wrap around ? */
-               if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
-                   host_ephemeral_port == VMADDR_PORT_ANY)
-                       host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
-
-               remote->svm_port = host_ephemeral_port++;
-
-               sk = vsock_find_connected_socket(remote, local);
-               if (!sk) {
-                       /* Found an available ephemeral port */
-                       return;
-               }
-
-               /* Release refcnt got in vsock_find_connected_socket */
-               sock_put(sk);
-       }
-}
-
 static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
 {
        set_channel_pending_send_size(chan,
@@ -341,12 +300,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
        if_type = &chan->offermsg.offer.if_type;
        if_instance = &chan->offermsg.offer.if_instance;
        conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
-
-       /* The host or the VM should only listen on a port in
-        * [0, MAX_LISTEN_PORT]
-        */
-       if (!is_valid_srv_id(if_type) ||
-           get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
+       if (!is_valid_srv_id(if_type))
                return;
 
        hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
@@ -371,8 +325,11 @@ static void hvs_open_connection(struct vmbus_channel *chan)
                vnew = vsock_sk(new);
 
                hvs_addr_init(&vnew->local_addr, if_type);
-               hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
 
+               /* Remote peer is always the host */
+               vsock_addr_init(&vnew->remote_addr,
+                               VMADDR_CID_HOST, VMADDR_PORT_ANY);
+               vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
                ret = vsock_assign_transport(vnew, vsock_sk(sk));
                /* Transport assigned (looking at remote_addr) must be the
                 * same where we received the request.
@@ -766,16 +723,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk)
 
 static bool hvs_stream_allow(u32 cid, u32 port)
 {
-       /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
-        * reserved as ephemeral ports, which are used as the host's ports
-        * when the host initiates connections.
-        *
-        * Perform this check in the guest so an immediate error is produced
-        * instead of a timeout.
-        */
-       if (port > MAX_HOST_LISTEN_PORT)
-               return false;
-
        if (cid == VMADDR_CID_HOST)
                return true;
 
index da5262b2298bda73837facb23781a3dfb8f26ca3..1e97ac5435b236fdaa6b087a579e7e950a2bf669 100644 (file)
@@ -10843,6 +10843,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (err)
                        return err;
 
+               cfg80211_sinfo_release_content(&sinfo);
                if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
                        wdev->cqm_config->last_rssi_event_value =
                                (s8) sinfo.rx_beacon_signal_avg;
@@ -13796,6 +13797,8 @@ static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
+       cfg80211_sinfo_release_content(&sinfo);
+
        return rdev_probe_mesh_link(rdev, dev, dest, buf, len);
 }
 
index e853a4fe6f97f2cd716cdd5253a4d47f364b9d8a..e0d34f796d0b3ab934cf7f8945bb0b86440c0285 100644 (file)
@@ -538,6 +538,10 @@ static inline int
 rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
 {
        int ret;
+
+       if (!rdev->ops->set_wiphy_params)
+               return -EOPNOTSUPP;
+
        trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
        ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
        trace_rdev_return_int(&rdev->wiphy, ret);
@@ -1167,6 +1171,16 @@ rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
        return ret;
 }
 
+static inline void
+rdev_end_cac(struct cfg80211_registered_device *rdev,
+            struct net_device *dev)
+{
+       trace_rdev_end_cac(&rdev->wiphy, dev);
+       if (rdev->ops->end_cac)
+               rdev->ops->end_cac(&rdev->wiphy, dev);
+       trace_rdev_return_void(&rdev->wiphy);
+}
+
 static inline int
 rdev_set_mcast_rate(struct cfg80211_registered_device *rdev,
                    struct net_device *dev,
index 446c76d44e65a04878ceeaabda4f428e59e5b0f9..fff9a74891fc433e5ac05970b23aa7f13adaad78 100644 (file)
@@ -2261,14 +2261,15 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
 
 static void handle_channel_custom(struct wiphy *wiphy,
                                  struct ieee80211_channel *chan,
-                                 const struct ieee80211_regdomain *regd)
+                                 const struct ieee80211_regdomain *regd,
+                                 u32 min_bw)
 {
        u32 bw_flags = 0;
        const struct ieee80211_reg_rule *reg_rule = NULL;
        const struct ieee80211_power_rule *power_rule = NULL;
        u32 bw;
 
-       for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+       for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
                reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq),
                                              regd, bw);
                if (!IS_ERR(reg_rule))
@@ -2324,8 +2325,14 @@ static void handle_band_custom(struct wiphy *wiphy,
        if (!sband)
                return;
 
+       /*
+        * We currently assume that you always want at least 20 MHz,
+        * otherwise channel 12 might get enabled if this rule is
+        * compatible to US, which permits 2402 - 2472 MHz.
+        */
        for (i = 0; i < sband->n_channels; i++)
-               handle_channel_custom(wiphy, &sband->channels[i], regd);
+               handle_channel_custom(wiphy, &sband->channels[i], regd,
+                                     MHZ_TO_KHZ(20));
 }
 
 /* Used by drivers prior to wiphy registration */
@@ -3885,6 +3892,25 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(regulatory_pre_cac_allowed);
 
+static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
+{
+       struct wireless_dev *wdev;
+       /* If we finished CAC or received radar, we should end any
+        * CAC running on the same channels.
+        * the check !cfg80211_chandef_dfs_usable contain 2 options:
+        * either all channels are available - those the CAC_FINISHED
+        * event has effected another wdev state, or there is a channel
+        * in unavailable state in wdev chandef - those the RADAR_DETECTED
+        * event has effected another wdev state.
+        * In both cases we should end the CAC on the wdev.
+        */
+       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+               if (wdev->cac_started &&
+                   !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef))
+                       rdev_end_cac(rdev, wdev->netdev);
+       }
+}
+
 void regulatory_propagate_dfs_state(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef,
                                    enum nl80211_dfs_state dfs_state,
@@ -3911,8 +3937,10 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy,
                cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state);
 
                if (event == NL80211_RADAR_DETECTED ||
-                   event == NL80211_RADAR_CAC_FINISHED)
+                   event == NL80211_RADAR_CAC_FINISHED) {
                        cfg80211_sched_dfs_chan_update(rdev);
+                       cfg80211_check_and_end_cac(rdev);
+               }
 
                nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL);
        }
index 7a6c38ddc65adbaf60855749a22f120a86f05a2b..d32a2ec4d96ace3b26b53bc8e895ab305961ae4b 100644 (file)
@@ -1307,14 +1307,14 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
        if (wdev->conn_owner_nlportid) {
                switch (wdev->iftype) {
                case NL80211_IFTYPE_ADHOC:
-                       cfg80211_leave_ibss(rdev, wdev->netdev, false);
+                       __cfg80211_leave_ibss(rdev, wdev->netdev, false);
                        break;
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_P2P_GO:
-                       cfg80211_stop_ap(rdev, wdev->netdev, false);
+                       __cfg80211_stop_ap(rdev, wdev->netdev, false);
                        break;
                case NL80211_IFTYPE_MESH_POINT:
-                       cfg80211_leave_mesh(rdev, wdev->netdev);
+                       __cfg80211_leave_mesh(rdev, wdev->netdev);
                        break;
                case NL80211_IFTYPE_STATION:
                case NL80211_IFTYPE_P2P_CLIENT:
index d98ad2b3143b04dc1e75f69995a13f725c739984..8677d7ab7d6921fee3b5245dc3ccef25757526ba 100644 (file)
@@ -646,6 +646,11 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
        TP_ARGS(wiphy, netdev)
 );
 
+DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac,
+            TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+            TP_ARGS(wiphy, netdev)
+);
+
 DECLARE_EVENT_CLASS(station_add_change,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
                 struct station_parameters *params),
index 5b4ed5bbc542555a4ebd4e8fcfbb6dccf10e9f3f..8481e9ac33da5c71652186e8110b92e025eedae4 100644 (file)
@@ -564,7 +564,7 @@ __frame_add_frag(struct sk_buff *skb, struct page *page,
        struct skb_shared_info *sh = skb_shinfo(skb);
        int page_offset;
 
-       page_ref_inc(page);
+       get_page(page);
        page_offset = ptr - page_address(page);
        skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
 }
index 5e677dac2a0ceaa94b6f2d1548133b0fc33da8dc..69102fda9ebd46343a0444d7206799223e24a0fd 100644 (file)
@@ -657,7 +657,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
        return NULL;
 }
 
-static int iw_handler_get_iwstats(struct net_device *          dev,
+/* noinline to avoid a bogus warning with -O3 */
+static noinline int iw_handler_get_iwstats(struct net_device * dev,
                                  struct iw_request_info *      info,
                                  union iwreq_data *            wrqu,
                                  char *                        extra)
index 2efe44a346443d6715b271437d3c0d8de52f1f3e..d5b09bbff3754ff4138b6c6effe1c1e52901b266 100644 (file)
@@ -766,6 +766,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
        if (sk->sk_state == TCP_ESTABLISHED)
                goto out;
 
+       rc = -EALREADY; /* Do nothing if call is already in progress */
+       if (sk->sk_state == TCP_SYN_SENT)
+               goto out;
+
        sk->sk_state   = TCP_CLOSE;
        sock->state = SS_UNCONNECTED;
 
@@ -812,7 +816,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
        /* Now the loop */
        rc = -EINPROGRESS;
        if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
-               goto out_put_neigh;
+               goto out;
 
        rc = x25_wait_for_connection_establishment(sk);
        if (rc)
index 6d0125ca8af714e712babe7a425eab4695efed04..20291ec6489f31e4d3380c6d102711fc8e2c9692 100644 (file)
@@ -298,14 +298,14 @@ int main(void)
                req = malloc(sizes.seccomp_notif);
                if (!req)
                        goto out_close;
-               memset(req, 0, sizeof(*req));
 
                resp = malloc(sizes.seccomp_notif_resp);
                if (!resp)
                        goto out_req;
-               memset(resp, 0, sizeof(*resp));
+               memset(resp, 0, sizes.seccomp_notif_resp);
 
                while (1) {
+                       memset(req, 0, sizes.seccomp_notif);
                        if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
                                perror("ioctl recv");
                                goto out_resp;
index d33de0b9f4f5f6268460136611756673373e905c..e3569543bdac213f337b420b726daec4a1470009 100644 (file)
@@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS
          An arch should select this symbol if it supports building with
          GCC plugins.
 
-config GCC_PLUGINS
-       bool
+menuconfig GCC_PLUGINS
+       bool "GCC plugins"
        depends on HAVE_GCC_PLUGINS
        depends on PLUGIN_HOSTCC != ""
        default y
@@ -25,8 +25,7 @@ config GCC_PLUGINS
 
          See Documentation/core-api/gcc-plugins.rst for details.
 
-menu "GCC plugins"
-       depends on GCC_PLUGINS
+if GCC_PLUGINS
 
 config GCC_PLUGIN_CYC_COMPLEXITY
        bool "Compute the cyclomatic complexity of a function" if EXPERT
@@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK
        bool
        depends on GCC_PLUGINS && ARM
 
-endmenu
+endif
index 7c230016b08d155bf2abb906f2227a0586154e6c..357dc56bcf30dccc34318766c775c966273cb2ec 100755 (executable)
@@ -136,7 +136,7 @@ mkdir -p debian/source/
 echo "1.0" > debian/source/format
 
 echo $debarch > debian/arch
-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)"
+extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
 extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
 
 # Generate a simple changelog template
index 09996f2552ee45b1503d3fae321032a90e03cc14..47aff8700547d2434e2b4beeffa89dc59f8b1e96 100644 (file)
@@ -623,7 +623,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
 
 void __aa_bump_ns_revision(struct aa_ns *ns)
 {
-       ns->revision++;
+       WRITE_ONCE(ns->revision, ns->revision + 1);
        wake_up_interruptible(&ns->wait);
 }
 
index 9be7ccb8379edff7b2261ed87c0ec90a4ebe2670..6ceb74e0f7895548c5faaa6f95b7eb8c11367e87 100644 (file)
@@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
 
        if (!bprm || !profile->xattr_count)
                return 0;
+       might_sleep();
 
        /* transition from exec match to xattr set */
        state = aa_dfa_null_transition(profile->xmatch, state);
@@ -361,10 +362,11 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
 }
 
 /**
- * __attach_match_ - find an attachment match
+ * find_attach - do attachment search for unconfined processes
  * @bprm - binprm structure of transitioning task
- * @name - to match against  (NOT NULL)
+ * @ns: the current namespace  (NOT NULL)
  * @head - profile list to walk  (NOT NULL)
+ * @name - to match against  (NOT NULL)
  * @info - info message if there was an error (NOT NULL)
  *
  * Do a linear search on the profiles in the list.  There is a matching
@@ -374,12 +376,11 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
  *
  * Requires: @head not be shared or have appropriate locks held
  *
- * Returns: profile or NULL if no match found
+ * Returns: label or NULL if no match found
  */
-static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
-                                        const char *name,
-                                        struct list_head *head,
-                                        const char **info)
+static struct aa_label *find_attach(const struct linux_binprm *bprm,
+                                   struct aa_ns *ns, struct list_head *head,
+                                   const char *name, const char **info)
 {
        int candidate_len = 0, candidate_xattrs = 0;
        bool conflict = false;
@@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
        AA_BUG(!name);
        AA_BUG(!head);
 
+       rcu_read_lock();
+restart:
        list_for_each_entry_rcu(profile, head, base.list) {
                if (profile->label.flags & FLAG_NULL &&
                    &profile->label == ns_unconfined(profile->ns))
@@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
                        perm = dfa_user_allow(profile->xmatch, state);
                        /* any accepting state means a valid match. */
                        if (perm & MAY_EXEC) {
-                               int ret;
+                               int ret = 0;
 
                                if (count < candidate_len)
                                        continue;
 
-                               ret = aa_xattrs_match(bprm, profile, state);
-                               /* Fail matching if the xattrs don't match */
-                               if (ret < 0)
-                                       continue;
-
+                               if (bprm && profile->xattr_count) {
+                                       long rev = READ_ONCE(ns->revision);
+
+                                       if (!aa_get_profile_not0(profile))
+                                               goto restart;
+                                       rcu_read_unlock();
+                                       ret = aa_xattrs_match(bprm, profile,
+                                                             state);
+                                       rcu_read_lock();
+                                       aa_put_profile(profile);
+                                       if (rev !=
+                                           READ_ONCE(ns->revision))
+                                               /* policy changed */
+                                               goto restart;
+                                       /*
+                                        * Fail matching if the xattrs don't
+                                        * match
+                                        */
+                                       if (ret < 0)
+                                               continue;
+                               }
                                /*
                                 * TODO: allow for more flexible best match
                                 *
@@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
                                candidate_xattrs = ret;
                                conflict = false;
                        }
-               } else if (!strcmp(profile->base.name, name))
+               } else if (!strcmp(profile->base.name, name)) {
                        /*
                         * old exact non-re match, without conditionals such
                         * as xattrs. no more searching required
                         */
-                       return profile;
+                       candidate = profile;
+                       goto out;
+               }
        }
 
-       if (conflict) {
-               *info = "conflicting profile attachments";
+       if (!candidate || conflict) {
+               if (conflict)
+                       *info = "conflicting profile attachments";
+               rcu_read_unlock();
                return NULL;
        }
 
-       return candidate;
-}
-
-/**
- * find_attach - do attachment search for unconfined processes
- * @bprm - binprm structure of transitioning task
- * @ns: the current namespace  (NOT NULL)
- * @list: list to search  (NOT NULL)
- * @name: the executable name to match against  (NOT NULL)
- * @info: info message if there was an error
- *
- * Returns: label or NULL if no match found
- */
-static struct aa_label *find_attach(const struct linux_binprm *bprm,
-                                   struct aa_ns *ns, struct list_head *list,
-                                   const char *name, const char **info)
-{
-       struct aa_profile *profile;
-
-       rcu_read_lock();
-       profile = aa_get_profile(__attach_match(bprm, name, list, info));
+out:
+       candidate = aa_get_newest_profile(candidate);
        rcu_read_unlock();
 
-       return profile ? &profile->label : NULL;
+       return &candidate->label;
 }
 
 static const char *next_name(int xtype, const char *name)
index fe2ebe5e865efcb14a4b84352e006b247f5ec2f2..f1caf3674e1c1cedf39c01fb800d40397218753a 100644 (file)
@@ -618,8 +618,7 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
        fctx = file_ctx(file);
 
        rcu_read_lock();
-       flabel  = aa_get_newest_label(rcu_dereference(fctx->label));
-       rcu_read_unlock();
+       flabel  = rcu_dereference(fctx->label);
        AA_BUG(!flabel);
 
        /* revalidate access, if task is unconfined, or the cached cred
@@ -631,9 +630,13 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
         */
        denied = request & ~fctx->allow;
        if (unconfined(label) || unconfined(flabel) ||
-           (!denied && aa_label_is_subset(flabel, label)))
+           (!denied && aa_label_is_subset(flabel, label))) {
+               rcu_read_unlock();
                goto done;
+       }
 
+       flabel  = aa_get_newest_label(flabel);
+       rcu_read_unlock();
        /* TODO: label cross check */
 
        if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
@@ -643,8 +646,9 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
        else if (S_ISSOCK(file_inode(file)->i_mode))
                error = __file_sock_perm(op, label, flabel, file, request,
                                         denied);
-done:
        aa_put_label(flabel);
+
+done:
        return error;
 }
 
index 4ed6688f9d404c1d73d6da7134b572db760ebb21..e0828ee7a34573edb6519836363d5ab4e462197e 100644 (file)
@@ -442,7 +442,7 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
        buffer = aa_get_buffer(false);
        old_buffer = aa_get_buffer(false);
        error = -ENOMEM;
-       if (!buffer || old_buffer)
+       if (!buffer || !old_buffer)
                goto out;
 
        error = fn_for_each_confined(label, profile,
index 03104830c9132a38574407a32a2c87536dbea8f6..269f2f53c0b115405eda0046249151b605d1d116 100644 (file)
@@ -1125,8 +1125,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
                mutex_lock_nested(&ns->parent->lock, ns->level);
-               __aa_remove_ns(ns);
                __aa_bump_ns_revision(ns);
+               __aa_remove_ns(ns);
                mutex_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
@@ -1138,9 +1138,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
                        goto fail_ns_lock;
                }
                name = profile->base.hname;
+               __aa_bump_ns_revision(ns);
                __remove_profile(profile);
                __aa_labelset_update_subtree(ns);
-               __aa_bump_ns_revision(ns);
                mutex_unlock(&ns->lock);
        }
 
index dd3d5942e6692457b505e137e0bd30ef6cb90dab..c36bafbcd77ee4fc29396cbd657c600589448e94 100644 (file)
@@ -951,7 +951,8 @@ static bool tomoyo_manager(void)
        exe = tomoyo_get_exe();
        if (!exe)
                return false;
-       list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
+       list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (!ptr->head.is_deleted &&
                    (!tomoyo_pathcmp(domainname, ptr->manager) ||
                     !strcmp(exe, ptr->manager->name))) {
@@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname)
        if (mutex_lock_interruptible(&tomoyo_policy_lock))
                return -EINTR;
        /* Is there an active domain? */
-       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                /* Never delete tomoyo_kernel_domain */
                if (domain == &tomoyo_kernel_domain)
                        continue;
@@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void)
 
        tomoyo_policy_loaded = true;
        pr_info("TOMOYO: 2.6.0\n");
-       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                const u8 profile = domain->profile;
                struct tomoyo_policy_namespace *ns = domain->ns;
 
index 8526a0a74023855bbf383e56528e7ebf8107a8a9..7869d6a9980bfc4e7c0cf70a826d7b5661aaef67 100644 (file)
@@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
 
        if (mutex_lock_interruptible(&tomoyo_policy_lock))
                return -ENOMEM;
-       list_for_each_entry_rcu(entry, list, list) {
+       list_for_each_entry_rcu(entry, list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
                        continue;
                if (!check_duplicate(entry, new_entry))
@@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
        }
        if (mutex_lock_interruptible(&tomoyo_policy_lock))
                goto out;
-       list_for_each_entry_rcu(entry, list, list) {
+       list_for_each_entry_rcu(entry, list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
                        continue;
                if (!tomoyo_same_acl_head(entry, new_entry) ||
@@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
        u16 i = 0;
 
 retry:
-       list_for_each_entry_rcu(ptr, list, list) {
+       list_for_each_entry_rcu(ptr, list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (ptr->is_deleted || ptr->type != r->param_type)
                        continue;
                if (!check_entry(r, ptr))
@@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition
 {
        const struct tomoyo_transition_control *ptr;
 
-       list_for_each_entry_rcu(ptr, list, head.list) {
+       list_for_each_entry_rcu(ptr, list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (ptr->head.is_deleted || ptr->type != type)
                        continue;
                if (ptr->domainname) {
@@ -735,7 +739,8 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
 
                /* Check 'aggregator' directive. */
                candidate = &exename;
-               list_for_each_entry_rcu(ptr, list, head.list) {
+               list_for_each_entry_rcu(ptr, list, head.list,
+                                       srcu_read_lock_held(&tomoyo_ss)) {
                        if (ptr->head.is_deleted ||
                            !tomoyo_path_matches_pattern(&exename,
                                                         ptr->original_name))
index a37c7dc66e4448e41a498b5005950e6d127833e3..1cecdd7975971224683fc5f660eaf5e29d2fdacf 100644 (file)
@@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
 {
        struct tomoyo_path_group *member;
 
-       list_for_each_entry_rcu(member, &group->member_list, head.list) {
+       list_for_each_entry_rcu(member, &group->member_list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (member->head.is_deleted)
                        continue;
                if (!tomoyo_path_matches_pattern(pathname, member->member_name))
@@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min,
        struct tomoyo_number_group *member;
        bool matched = false;
 
-       list_for_each_entry_rcu(member, &group->member_list, head.list) {
+       list_for_each_entry_rcu(member, &group->member_list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (member->head.is_deleted)
                        continue;
                if (min > member->number.values[1] ||
@@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
        bool matched = false;
        const u8 size = is_ipv6 ? 16 : 4;
 
-       list_for_each_entry_rcu(member, &group->member_list, head.list) {
+       list_for_each_entry_rcu(member, &group->member_list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (member->head.is_deleted)
                        continue;
                if (member->address.is_ipv6 != is_ipv6)
index e7832448d721d5acaa502b3a47f17a4fa0a94e86..bf38fc1b59b2878b42533c349a37646fb2630e0b 100644 (file)
@@ -217,31 +217,6 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
        return ERR_PTR(-ENOMEM);
 }
 
-/**
- * tomoyo_get_socket_name - Get the name of a socket.
- *
- * @path:   Pointer to "struct path".
- * @buffer: Pointer to buffer to return value in.
- * @buflen: Sizeof @buffer.
- *
- * Returns the buffer.
- */
-static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
-                                   const int buflen)
-{
-       struct inode *inode = d_backing_inode(path->dentry);
-       struct socket *sock = inode ? SOCKET_I(inode) : NULL;
-       struct sock *sk = sock ? sock->sk : NULL;
-
-       if (sk) {
-               snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
-                        sk->sk_family, sk->sk_type, sk->sk_protocol);
-       } else {
-               snprintf(buffer, buflen, "socket:[unknown]");
-       }
-       return buffer;
-}
-
 /**
  * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
  *
@@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path)
                        break;
                /* To make sure that pos is '\0' terminated. */
                buf[buf_len - 1] = '\0';
-               /* Get better name for socket. */
-               if (sb->s_magic == SOCKFS_MAGIC) {
-                       pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
-                       goto encode;
-               }
-               /* For "pipe:[\$]". */
+               /* For "pipe:[\$]" and "socket:[\$]". */
                if (dentry->d_op && dentry->d_op->d_dname) {
                        pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
                        goto encode;
index 52752e1a84ed8f9131d303896e3a3af7504247da..eba0b3395851e0ae4a3be8be16dc37f28dccd726 100644 (file)
@@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
 
        name.name = domainname;
        tomoyo_fill_path_info(&name);
-       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (!domain->is_deleted &&
                    !tomoyo_pathcmp(&name, domain->domainname))
                        return domain;
@@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                return false;
        if (!domain)
                return true;
-       list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+       list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                u16 perm;
                u8 i;
 
index 63dc7bdb622df37ef1827121459bc9f9d4f78782..be59b59c9be40c233995817f3ddb3e5d71fbb6d7 100644 (file)
@@ -471,15 +471,19 @@ void snd_seq_info_timer_read(struct snd_info_entry *entry,
                q = queueptr(idx);
                if (q == NULL)
                        continue;
-               if ((tmr = q->timer) == NULL ||
-                   (ti = tmr->timeri) == NULL) {
-                       queuefree(q);
-                       continue;
-               }
+               mutex_lock(&q->timer_mutex);
+               tmr = q->timer;
+               if (!tmr)
+                       goto unlock;
+               ti = tmr->timeri;
+               if (!ti)
+                       goto unlock;
                snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
                resolution = snd_timer_resolution(ti) * tmr->ticks;
                snd_iprintf(buffer, "  Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000);
                snd_iprintf(buffer, "  Skew : %u / %u\n", tmr->skew, tmr->skew_base);
+unlock:
+               mutex_unlock(&q->timer_mutex);
                queuefree(q);
        }
 }
index a63fcbc875adc3b9906f90d13fb3f5c8ea4ebbec..02f4a8318e38e7309da42b3274a59fee95d8349f 100644 (file)
@@ -159,8 +159,11 @@ int snd_dice_detect_extension_formats(struct snd_dice *dice)
                int j;
 
                for (j = i + 1; j < 9; ++j) {
-                       if (pointers[i * 2] == pointers[j * 2])
+                       if (pointers[i * 2] == pointers[j * 2]) {
+                               // Fallback to limited functionality.
+                               err = -ENXIO;
                                goto end;
+                       }
                }
        }
 
index e80bb84c43f6780fc7006fd3930b2c546e80b2e3..f823a2ab3544bf769101e5c263b7e7d3ca96dd74 100644 (file)
@@ -157,14 +157,15 @@ static void read_status_messages(struct amdtp_stream *s,
                        if ((before ^ after) & mask) {
                                struct snd_firewire_tascam_change *entry =
                                                &tscm->queue[tscm->push_pos];
+                               unsigned long flag;
 
-                               spin_lock_irq(&tscm->lock);
+                               spin_lock_irqsave(&tscm->lock, flag);
                                entry->index = index;
                                entry->before = before;
                                entry->after = after;
                                if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT)
                                        tscm->push_pos = 0;
-                               spin_unlock_irq(&tscm->lock);
+                               spin_unlock_irqrestore(&tscm->lock, flag);
 
                                wake_up(&tscm->hwdep_wait);
                        }
index 906b1e20bae01204c5a3bd3cacf9f38bd6318630..286361ecd6404dd5714ea1175a941ce732f890fc 100644 (file)
@@ -363,7 +363,6 @@ static const struct regmap_config hda_regmap_cfg = {
        .reg_write = hda_reg_write,
        .use_single_read = true,
        .use_single_write = true,
-       .disable_locking = true,
 };
 
 /**
index b856b89378ac556d1dead5ec23b875542ac6d736..8ef223aa1e37ef245ca41c34b7dbe363ecca9a2c 100644 (file)
@@ -125,7 +125,7 @@ static char *patch[SNDRV_CARDS];
 static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
                                        CONFIG_SND_HDA_INPUT_BEEP_MODE};
 #endif
-static bool dsp_driver = 1;
+static bool dmic_detect = 1;
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -160,9 +160,10 @@ module_param_array(beep_mode, bool, NULL, 0444);
 MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
                            "(0=off, 1=on) (default=1).");
 #endif
-module_param(dsp_driver, bool, 0444);
-MODULE_PARM_DESC(dsp_driver, "Allow DSP driver selection (bypass this driver) "
-                            "(0=off, 1=on) (default=1)");
+module_param(dmic_detect, bool, 0444);
+MODULE_PARM_DESC(dmic_detect, "Allow DSP driver selection (bypass this driver) "
+                            "(0=off, 1=on) (default=1); "
+                "deprecated, use snd-intel-dspcfg.dsp_driver option instead");
 
 #ifdef CONFIG_PM
 static int param_set_xint(const char *val, const struct kernel_param *kp);
@@ -282,12 +283,13 @@ enum {
 
 /* quirks for old Intel chipsets */
 #define AZX_DCAPS_INTEL_ICH \
-       (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE)
+       (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\
+        AZX_DCAPS_SYNC_WRITE)
 
 /* quirks for Intel PCH */
 #define AZX_DCAPS_INTEL_PCH_BASE \
        (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\
-        AZX_DCAPS_SNOOP_TYPE(SCH))
+        AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
 
 /* PCH up to IVB; no runtime PM; bind with i915 gfx */
 #define AZX_DCAPS_INTEL_PCH_NOPM \
@@ -302,13 +304,13 @@ enum {
 #define AZX_DCAPS_INTEL_HASWELL \
        (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
         AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
-        AZX_DCAPS_SNOOP_TYPE(SCH))
+        AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
 
 /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
 #define AZX_DCAPS_INTEL_BROADWELL \
        (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
         AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
-        AZX_DCAPS_SNOOP_TYPE(SCH))
+        AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
 
 #define AZX_DCAPS_INTEL_BAYTRAIL \
        (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT)
@@ -1410,7 +1412,17 @@ static bool atpx_present(void)
        acpi_handle dhandle, atpx_handle;
        acpi_status status;
 
-       while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               dhandle = ACPI_HANDLE(&pdev->dev);
+               if (dhandle) {
+                       status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               pci_dev_put(pdev);
+                               return true;
+                       }
+               }
+       }
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
                dhandle = ACPI_HANDLE(&pdev->dev);
                if (dhandle) {
                        status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
@@ -2088,11 +2100,13 @@ static int azx_probe(struct pci_dev *pci,
        /*
         * stop probe if another Intel's DSP driver should be activated
         */
-       if (dsp_driver) {
+       if (dmic_detect) {
                err = snd_intel_dsp_driver_probe(pci);
                if (err != SND_INTEL_DSP_DRIVER_ANY &&
                    err != SND_INTEL_DSP_DRIVER_LEGACY)
                        return -ENODEV;
+       } else {
+               dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n");
        }
 
        err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
index dbfafee97931a42952dd33690bc4f80209e39cde..f2ea3528bfb1d3eb91227aa8cb8b2cc393b50c81 100644 (file)
@@ -412,6 +412,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0672:
                alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
                break;
+       case 0x10ec0222:
        case 0x10ec0623:
                alc_update_coef_idx(codec, 0x19, 1<<13, 0);
                break;
@@ -430,6 +431,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
                break;
        case 0x10ec0899:
        case 0x10ec0900:
+       case 0x10ec0b00:
        case 0x10ec1168:
        case 0x10ec1220:
                alc_update_coef_idx(codec, 0x7, 1<<1, 0);
@@ -501,6 +503,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
        case 0x10ec0298:
@@ -2525,6 +2528,7 @@ static int patch_alc882(struct hda_codec *codec)
        case 0x10ec0882:
        case 0x10ec0885:
        case 0x10ec0900:
+       case 0x10ec0b00:
        case 0x10ec1220:
                break;
        default:
@@ -5904,9 +5908,12 @@ enum {
        ALC256_FIXUP_ASUS_HEADSET_MIC,
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
-       ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
        ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
-       ALC294_FIXUP_ASUS_INTSPK_GPIO,
+       ALC289_FIXUP_DELL_SPK2,
+       ALC289_FIXUP_DUAL_SPK,
+       ALC294_FIXUP_SPK2_TO_DAC1,
+       ALC294_FIXUP_ASUS_DUAL_SPK,
+
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6981,33 +6988,45 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                }
        },
-       [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+       [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
-                       { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
-                       { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+                       { 0x19, 0x04a11040 },
+                       { 0x21, 0x04211020 },
                        { }
                },
                .chained = true,
-               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
        },
-       [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
+       [ALC289_FIXUP_DELL_SPK2] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
-                       { 0x19, 0x04a11040 },
-                       { 0x21, 0x04211020 },
+                       { 0x17, 0x90170130 }, /* bass spk */
                        { }
                },
                .chained = true,
-               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+               .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
        },
-       [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
+       [ALC289_FIXUP_DUAL_SPK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC289_FIXUP_DELL_SPK2
+       },
+       [ALC294_FIXUP_SPK2_TO_DAC1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+       },
+       [ALC294_FIXUP_ASUS_DUAL_SPK] = {
                .type = HDA_FIXUP_FUNC,
                /* The GPIO must be pulled to initialize the AMP */
                .v.func = alc_fixup_gpio4,
                .chained = true,
-               .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
+               .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
        },
+
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7080,6 +7099,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7167,7 +7188,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
-       SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
+       SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
@@ -7239,6 +7260,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
        SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -9237,6 +9259,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+       HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
        {} /* terminator */
index c80a16ee6e76171ed5123e0c374b83afbd1e6545..242542e23d283b31e01a1154fdf4103d9c9a3132 100644 (file)
@@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
        unsigned long flags;
        unsigned char mclk_change;
        unsigned int i, old_rate;
+       bool call_set_rate = false;
 
        if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
                return -EINVAL;
@@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
                 * setting clock rate for internal clock mode */
                old_rate = ice->get_rate(ice);
                if (force || (old_rate != rate))
-                       ice->set_rate(ice, rate);
+                       call_set_rate = true;
                else if (rate == ice->cur_rate) {
                        spin_unlock_irqrestore(&ice->reg_lock, flags);
                        return 0;
@@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
        }
 
        ice->cur_rate = rate;
+       spin_unlock_irqrestore(&ice->reg_lock, flags);
+
+       if (call_set_rate)
+               ice->set_rate(ice, rate);
 
        /* setting master clock */
        mclk_change = ice->set_mclk(ice, rate);
 
-       spin_unlock_irqrestore(&ice->reg_lock, flags);
-
        if (mclk_change && ice->gpio.i2s_mclk_changed)
                ice->gpio.i2s_mclk_changed(ice);
        if (ice->gpio.set_pro_rate)
index 7b17f39a6a102f9a1ceadfe6fcf3e39c995b4008..ce3ed056ea8be950e12cda094381f1a6a2b150eb 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <crypto/hash.h>
 #include <crypto/sha.h>
+#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -1047,10 +1048,17 @@ static const struct of_device_id cros_ec_codec_of_match[] = {
 MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match);
 #endif
 
+static const struct acpi_device_id cros_ec_codec_acpi_id[] = {
+       { "GOOG0013", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_codec_acpi_id);
+
 static struct platform_driver cros_ec_codec_platform_driver = {
        .driver = {
                .name = "cros-ec-codec",
                .of_match_table = of_match_ptr(cros_ec_codec_of_match),
+               .acpi_match_table = ACPI_PTR(cros_ec_codec_acpi_id),
        },
        .probe = cros_ec_codec_platform_probe,
 };
index 6803d39e09a5fed0bb922cf9d66a0eb2ca6de32c..43110151e928d345708394c6ae3a266441dd441f 100644 (file)
@@ -588,7 +588,9 @@ static int hdac_hda_dev_remove(struct hdac_device *hdev)
        struct hdac_hda_priv *hda_pvt;
 
        hda_pvt = dev_get_drvdata(&hdev->dev);
-       cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+       if (hda_pvt && hda_pvt->codec.registered)
+               cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+
        return 0;
 }
 
index f53235be77d94f94179f7c1a87829f61b57181b4..1f7964beb20c20b1224eb3a55e7d9c3d8b4de069 100644 (file)
@@ -396,9 +396,6 @@ static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_component
 
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
-               snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
-                                   MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
-                                   MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
                snd_soc_component_update_bits(component, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0);
                snd_soc_component_update_bits(component, CDC_A_MICB_1_EN,
                                    MICB_1_EN_OPA_STG2_TAIL_CURR_MASK,
@@ -448,6 +445,14 @@ static int pm8916_wcd_analog_enable_micbias_int1(struct
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
        struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component);
 
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
+                                   MICB_1_INT_TX1_INT_RBIAS_EN_MASK,
+                                   MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE);
+               break;
+       }
+
        return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg,
                                                     wcd->micbias1_cap_mode);
 }
@@ -558,6 +563,11 @@ static int pm8916_wcd_analog_enable_micbias_int2(struct
        struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component);
 
        switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
+                                   MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
+                                   MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
+               break;
        case SND_SOC_DAPM_POST_PMU:
                pm8916_mbhc_configure_bias(wcd, true);
                break;
@@ -938,10 +948,10 @@ static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = {
 
        SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0,
                            pm8916_wcd_analog_enable_micbias_ext1,
-                           SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+                           SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0,
                            pm8916_wcd_analog_enable_micbias_ext2,
-                           SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+                           SND_SOC_DAPM_POST_PMU),
 
        SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0,
                           pm8916_wcd_analog_enable_adc,
index 58b2468fb2a7139b8d7dbb35085ecbe5acb8215f..09fccacadd6b1ec38c7d06d115c6e9df7f11d70f 100644 (file)
@@ -586,6 +586,12 @@ static int msm8916_wcd_digital_enable_interpolator(
                snd_soc_component_write(component, rx_gain_reg[w->shift],
                              snd_soc_component_read32(component, rx_gain_reg[w->shift]));
                break;
+       case SND_SOC_DAPM_POST_PMD:
+               snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL,
+                                             1 << w->shift, 1 << w->shift);
+               snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL,
+                                             1 << w->shift, 0x0);
+               break;
        }
        return 0;
 }
index adbae1f36a8afb303b85ebdb5617e33ca6a4620c..747ca248bf10c9e4b1dc763ab25e3cffdefcde3c 100644 (file)
@@ -2432,6 +2432,13 @@ static void rt5640_disable_jack_detect(struct snd_soc_component *component)
 {
        struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
 
+       /*
+        * soc_remove_component() force-disables jack and thus rt5640->jack
+        * could be NULL at the time of driver's module unloading.
+        */
+       if (!rt5640->jack)
+               return;
+
        disable_irq(rt5640->irq);
        rt5640_cancel_work(rt5640);
 
index a1db1bce330fa5c3874b52a925ef805ff87502d4..5faecbeb54970ee6e9f5a40b3f85c4ffe1bd7158 100644 (file)
@@ -505,15 +505,20 @@ static int fsl_audmix_probe(struct platform_device *pdev)
                                              ARRAY_SIZE(fsl_audmix_dai));
        if (ret) {
                dev_err(dev, "failed to register ASoC DAI\n");
-               return ret;
+               goto err_disable_pm;
        }
 
        priv->pdev = platform_device_register_data(dev, mdrv, 0, NULL, 0);
        if (IS_ERR(priv->pdev)) {
                ret = PTR_ERR(priv->pdev);
                dev_err(dev, "failed to register platform %s: %d\n", mdrv, ret);
+               goto err_disable_pm;
        }
 
+       return 0;
+
+err_disable_pm:
+       pm_runtime_disable(dev);
        return ret;
 }
 
@@ -521,6 +526,8 @@ static int fsl_audmix_remove(struct platform_device *pdev)
 {
        struct fsl_audmix *priv = dev_get_drvdata(&pdev->dev);
 
+       pm_runtime_disable(&pdev->dev);
+
        if (priv->pdev)
                platform_device_unregister(priv->pdev);
 
index 46612331f5ea7aa21bbf9e70b9945113cab67e5d..54e97455d7f6603b36e9df79f21264a212e3ed51 100644 (file)
@@ -442,7 +442,8 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "NB41"),
                },
-               .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP
+               .driver_data = (void *)(BYT_CHT_ES8316_SSP0
+                                       | BYT_CHT_ES8316_INTMIC_IN2_MAP
                                        | BYT_CHT_ES8316_JD_INVERTED),
        },
        {       /* Teclast X98 Plus II */
index a22f97234201175d0bc9fd2a9c65891cc847aa5a..5f1bf6d3800c6c9c7be395cdc87b3e20b24260bc 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/clk.h>
 #include <linux/dmi.h>
 #include <linux/slab.h>
-#include <asm/cpu_device_id.h>
 #include <linux/acpi.h>
 #include <sound/core.h>
 #include <sound/jack.h>
index 9054558ce38619faf28a5bc535d59608e912f7ea..b94680fb26fa76b904d767e96f588cb299244529 100644 (file)
@@ -539,6 +539,9 @@ void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd)
        struct snd_soc_rtdcom_list *rtdcom;
        struct snd_soc_component *component;
 
+       if (!rtd->pcm)
+               return;
+
        for_each_rtd_components(rtd, rtdcom, component)
                if (component->driver->pcm_destruct)
                        component->driver->pcm_destruct(component, rtd->pcm);
index 1c84ff1a5bf96b09e303ec08d5d4c6e1e565aad9..8ef0efeed0a7e27ca683c86ae1e5535399f5008a 100644 (file)
@@ -479,6 +479,12 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
                goto free_rtd;
 
        rtd->dev = dev;
+       INIT_LIST_HEAD(&rtd->list);
+       INIT_LIST_HEAD(&rtd->component_list);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
        dev_set_drvdata(dev, rtd);
        INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
 
@@ -494,12 +500,6 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
        /*
         * rtd remaining settings
         */
-       INIT_LIST_HEAD(&rtd->component_list);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
-
        rtd->card = card;
        rtd->dai_link = dai_link;
        if (!rtd->dai_link->ops)
@@ -1871,6 +1871,8 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
 
                        /* convert non BE into BE */
                        dai_link->no_pcm = 1;
+                       dai_link->dpcm_playback = 1;
+                       dai_link->dpcm_capture = 1;
 
                        /* override any BE fixups */
                        dai_link->be_hw_params_fixup =
index b28613149b0cce2e4a2b1623091fac095808818b..92e4f4d08bfad6b7ff9e9904eac0fac7f98f8bd9 100644 (file)
@@ -548,12 +548,12 @@ static void remove_link(struct snd_soc_component *comp,
        if (dobj->ops && dobj->ops->link_unload)
                dobj->ops->link_unload(comp, dobj);
 
+       list_del(&dobj->list);
+       snd_soc_remove_dai_link(comp->card, link);
+
        kfree(link->name);
        kfree(link->stream_name);
        kfree(link->cpus->dai_name);
-
-       list_del(&dobj->list);
-       snd_soc_remove_dai_link(comp->card, link);
        kfree(link);
 }
 
index cfefcfd927986d537fd59476c1a1f479f4d5003c..aef6ca167b9c4b8aca519708d60d28fabff90519 100644 (file)
@@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev)
 
        priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
                                          sizeof(*priv->pd_dev), GFP_KERNEL);
-       if (!priv)
+       if (!priv->pd_dev)
                return -ENOMEM;
 
        priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
@@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev)
        }
        sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
 
+       /* set default mailbox offset for FW ready message */
+       sdev->dsp_box.offset = MBOX_OFFSET;
+
        return 0;
 
 exit_pdev_unregister:
index 827f84a0722e91d7266d5729f5bc6273cbd42f51..fbfa225d1c5afa905ced3138721575a92dc27567 100644 (file)
 #define IDISP_VID_INTEL        0x80860000
 
 /* load the legacy HDA codec driver */
-#ifdef MODULE
-static void hda_codec_load_module(struct hda_codec *codec)
+static int hda_codec_load_module(struct hda_codec *codec)
 {
+#ifdef MODULE
        char alias[MODULE_NAME_LEN];
        const char *module = alias;
 
        snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
        dev_dbg(&codec->core.dev, "loading codec module: %s\n", module);
        request_module(module);
-}
-#else
-static void hda_codec_load_module(struct hda_codec *codec) {}
 #endif
+       return device_attach(hda_codec_dev(codec));
+}
 
 /* enable controller wake up event for all codecs with jack connectors */
 void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
@@ -129,10 +128,16 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
        if ((mach_params && mach_params->common_hdmi_codec_drv) ||
            (resp & 0xFFFF0000) != IDISP_VID_INTEL) {
                hdev->type = HDA_DEV_LEGACY;
-               hda_codec_load_module(&hda_priv->codec);
+               ret = hda_codec_load_module(&hda_priv->codec);
+               /*
+                * handle ret==0 (no driver bound) as an error, but pass
+                * other return codes without modification
+                */
+               if (ret == 0)
+                       ret = -ENOENT;
        }
 
-       return 0;
+       return ret;
 #else
        hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
        if (!hdev)
index 8796f385be76c19ce20c346cb29a6b1849edb35e..896d21984b735abcfe3ca9fea6fa0fce0d0ef9e5 100644 (file)
@@ -216,6 +216,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
                link_dev = hda_link_stream_assign(bus, substream);
                if (!link_dev)
                        return -EBUSY;
+
+               snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
        }
 
        stream_tag = hdac_stream(link_dev)->stream_tag;
@@ -228,8 +230,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
        if (ret < 0)
                return ret;
 
-       snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
-
        link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
        if (!link)
                return -EINVAL;
@@ -361,6 +361,13 @@ static int hda_link_hw_free(struct snd_pcm_substream *substream,
        bus = hstream->bus;
        rtd = snd_pcm_substream_chip(substream);
        link_dev = snd_soc_dai_get_dma_data(dai, substream);
+
+       if (!link_dev) {
+               dev_dbg(dai->dev,
+                       "%s: link_dev is not assigned\n", __func__);
+               return -EINVAL;
+       }
+
        hda_stream = hstream_to_sof_hda_stream(link_dev);
 
        /* free the link DMA channel in the FW */
index b1783360fe106bdbf33acffc4443d142bfd81240..bae7ac3581e5189d63652a12a00aa5cfde27b42a 100644 (file)
@@ -329,13 +329,13 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
                if (!ret)
                        break;
 
-               dev_err(sdev->dev, "error: Error code=0x%x: FW status=0x%x\n",
+               dev_dbg(sdev->dev, "iteration %d of Core En/ROM load failed: %d\n",
+                       i, ret);
+               dev_dbg(sdev->dev, "Error code=0x%x: FW status=0x%x\n",
                        snd_sof_dsp_read(sdev, HDA_DSP_BAR,
                                         HDA_DSP_SRAM_REG_ROM_ERROR),
                        snd_sof_dsp_read(sdev, HDA_DSP_BAR,
                                         HDA_DSP_SRAM_REG_ROM_STATUS));
-               dev_err(sdev->dev, "error: iteration %d of Core En/ROM load failed: %d\n",
-                       i, ret);
        }
 
        if (i == HDA_FW_BOOT_ATTEMPTS) {
index 5994e10733642afc2bcc710798c541ed7722884d..5fdfbaa8c4ed6a7832db085a664bc4d6aa1a1836 100644 (file)
@@ -826,6 +826,9 @@ void snd_sof_ipc_free(struct snd_sof_dev *sdev)
 {
        struct snd_sof_ipc *ipc = sdev->ipc;
 
+       if (!ipc)
+               return;
+
        /* disable sending of ipc's */
        mutex_lock(&ipc->tx_mutex);
        ipc->disable_ipc_tx = true;
index 48ea915b24ba25018e3357fe949a3a35276ac4b5..2ed92c990b97c1c0d663b9a4c9b023507db580f2 100644 (file)
@@ -226,7 +226,6 @@ static void uni_player_set_channel_status(struct uniperif *player,
         * sampling frequency. If no sample rate is already specified, then
         * set one.
         */
-       mutex_lock(&player->ctrl_lock);
        if (runtime) {
                switch (runtime->rate) {
                case 22050:
@@ -303,7 +302,6 @@ static void uni_player_set_channel_status(struct uniperif *player,
                player->stream_settings.iec958.status[3 + (n * 4)] << 24;
                SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
        }
-       mutex_unlock(&player->ctrl_lock);
 
        /* Update the channel status */
        if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
@@ -365,8 +363,10 @@ static int uni_player_prepare_iec958(struct uniperif *player,
 
        SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
 
+       mutex_lock(&player->ctrl_lock);
        /* Update the channel status */
        uni_player_set_channel_status(player, runtime);
+       mutex_unlock(&player->ctrl_lock);
 
        /* Clear the user validity user bits */
        SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
@@ -598,7 +598,6 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
        iec958->status[1] = ucontrol->value.iec958.status[1];
        iec958->status[2] = ucontrol->value.iec958.status[2];
        iec958->status[3] = ucontrol->value.iec958.status[3];
-       mutex_unlock(&player->ctrl_lock);
 
        spin_lock_irqsave(&player->irq_lock, flags);
        if (player->substream && player->substream->runtime)
@@ -608,6 +607,8 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
                uni_player_set_channel_status(player, NULL);
 
        spin_unlock_irqrestore(&player->irq_lock, flags);
+       mutex_unlock(&player->ctrl_lock);
+
        return 0;
 }
 
index 81c407da15c5e9c1d1d76ce415e3963ee9d23ce0..08696a4adb69a69a40629cb91a25da26e94b70bb 100644 (file)
@@ -153,13 +153,13 @@ static const struct snd_soc_component_driver stm32_adfsdm_dai_component = {
        .name = "stm32_dfsdm_audio",
 };
 
-static void memcpy_32to16(void *dest, const void *src, size_t n)
+static void stm32_memcpy_32to16(void *dest, const void *src, size_t n)
 {
        unsigned int i = 0;
        u16 *d = (u16 *)dest, *s = (u16 *)src;
 
        s++;
-       for (i = n; i > 0; i--) {
+       for (i = n >> 1; i > 0; i--) {
                *d++ = *s++;
                s++;
        }
@@ -186,8 +186,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
 
        if ((priv->pos + src_size) > buff_size) {
                if (format == SNDRV_PCM_FORMAT_S16_LE)
-                       memcpy_32to16(&pcm_buff[priv->pos], src_buff,
-                                     buff_size - priv->pos);
+                       stm32_memcpy_32to16(&pcm_buff[priv->pos], src_buff,
+                                           buff_size - priv->pos);
                else
                        memcpy(&pcm_buff[priv->pos], src_buff,
                               buff_size - priv->pos);
@@ -196,8 +196,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
        }
 
        if (format == SNDRV_PCM_FORMAT_S16_LE)
-               memcpy_32to16(&pcm_buff[priv->pos],
-                             &src_buff[src_size - cur_size], cur_size);
+               stm32_memcpy_32to16(&pcm_buff[priv->pos],
+                                   &src_buff[src_size - cur_size], cur_size);
        else
                memcpy(&pcm_buff[priv->pos], &src_buff[src_size - cur_size],
                       cur_size);
index 48e629ac2d88b630778004f652cd84b4f3b53dbd..30bcd5d3a32a8ffe4bcda256200c0ecbc7f4832b 100644 (file)
@@ -184,6 +184,56 @@ static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg)
        }
 }
 
+static int stm32_sai_sub_reg_up(struct stm32_sai_sub_data *sai,
+                               unsigned int reg, unsigned int mask,
+                               unsigned int val)
+{
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_update_bits(sai->regmap, reg, mask, val);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
+}
+
+static int stm32_sai_sub_reg_wr(struct stm32_sai_sub_data *sai,
+                               unsigned int reg, unsigned int mask,
+                               unsigned int val)
+{
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write_bits(sai->regmap, reg, mask, val);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
+}
+
+static int stm32_sai_sub_reg_rd(struct stm32_sai_sub_data *sai,
+                               unsigned int reg, unsigned int *val)
+{
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_read(sai->regmap, reg, val);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
+}
+
 static const struct regmap_config stm32_sai_sub_regmap_config_f4 = {
        .reg_bits = 32,
        .reg_stride = 4,
@@ -295,7 +345,7 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
 
        mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
        cr1 = SAI_XCR1_MCKDIV_SET(div);
-       ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
+       ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, mask, cr1);
        if (ret < 0)
                dev_err(&sai->pdev->dev, "Failed to update CR1 register\n");
 
@@ -372,8 +422,8 @@ static int stm32_sai_mclk_enable(struct clk_hw *hw)
 
        dev_dbg(&sai->pdev->dev, "Enable master clock\n");
 
-       return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                 SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
+       return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                   SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
 }
 
 static void stm32_sai_mclk_disable(struct clk_hw *hw)
@@ -383,7 +433,7 @@ static void stm32_sai_mclk_disable(struct clk_hw *hw)
 
        dev_dbg(&sai->pdev->dev, "Disable master clock\n");
 
-       regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
+       stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
 }
 
 static const struct clk_ops mclk_ops = {
@@ -446,15 +496,15 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
        unsigned int sr, imr, flags;
        snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING;
 
-       regmap_read(sai->regmap, STM_SAI_IMR_REGX, &imr);
-       regmap_read(sai->regmap, STM_SAI_SR_REGX, &sr);
+       stm32_sai_sub_reg_rd(sai, STM_SAI_IMR_REGX, &imr);
+       stm32_sai_sub_reg_rd(sai, STM_SAI_SR_REGX, &sr);
 
        flags = sr & imr;
        if (!flags)
                return IRQ_NONE;
 
-       regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
-                         SAI_XCLRFR_MASK);
+       stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
+                            SAI_XCLRFR_MASK);
 
        if (!sai->substream) {
                dev_err(&pdev->dev, "Device stopped. Spurious IRQ 0x%x\n", sr);
@@ -503,8 +553,8 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        int ret;
 
        if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
-               ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                        SAI_XCR1_NODIV,
+               ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                          SAI_XCR1_NODIV,
                                         freq ? 0 : SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
@@ -583,7 +633,7 @@ static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
 
        slotr_mask |= SAI_XSLOTR_SLOTEN_MASK;
 
-       regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
 
        sai->slot_width = slot_width;
        sai->slots = slots;
@@ -665,7 +715,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
        cr1_mask |= SAI_XCR1_CKSTR;
        frcr_mask |= SAI_XFRCR_FSPOL;
 
-       regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
 
        /* DAI clock master masks */
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -693,7 +743,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
        cr1_mask |= SAI_XCR1_SLAVE;
 
 conf_update:
-       ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+       ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
        if (ret < 0) {
                dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
                return ret;
@@ -730,12 +780,12 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
        }
 
        /* Enable ITs */
-       regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX,
-                         SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
+       stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX,
+                            SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
 
        imr = SAI_XIMR_OVRUDRIE;
        if (STM_SAI_IS_CAPTURE(sai)) {
-               regmap_read(sai->regmap, STM_SAI_CR2_REGX, &cr2);
+               stm32_sai_sub_reg_rd(sai, STM_SAI_CR2_REGX, &cr2);
                if (cr2 & SAI_XCR2_MUTECNT_MASK)
                        imr |= SAI_XIMR_MUTEDETIE;
        }
@@ -745,8 +795,8 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
        else
                imr |= SAI_XIMR_AFSDETIE | SAI_XIMR_LFSDETIE;
 
-       regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
-                          SAI_XIMR_MASK, imr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX,
+                            SAI_XIMR_MASK, imr);
 
        return 0;
 }
@@ -763,10 +813,10 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
         * SAI fifo threshold is set to half fifo, to keep enough space
         * for DMA incoming bursts.
         */
-       regmap_write_bits(sai->regmap, STM_SAI_CR2_REGX,
-                         SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
-                         SAI_XCR2_FFLUSH |
-                         SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
+       stm32_sai_sub_reg_wr(sai, STM_SAI_CR2_REGX,
+                            SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
+                            SAI_XCR2_FFLUSH |
+                            SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
 
        /* DS bits in CR1 not set for SPDIF (size forced to 24 bits).*/
        if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
@@ -795,7 +845,7 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
        if ((sai->slots == 2) && (params_channels(params) == 1))
                cr1 |= SAI_XCR1_MONO;
 
-       ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+       ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
        if (ret < 0) {
                dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
                return ret;
@@ -809,7 +859,7 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai)
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int slotr, slot_sz;
 
-       regmap_read(sai->regmap, STM_SAI_SLOTR_REGX, &slotr);
+       stm32_sai_sub_reg_rd(sai, STM_SAI_SLOTR_REGX, &slotr);
 
        /*
         * If SLOTSZ is set to auto in SLOTR, align slot width on data size
@@ -831,16 +881,16 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai)
                sai->slots = 2;
 
        /* The number of slots in the audio frame is equal to NBSLOT[3:0] + 1*/
-       regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
-                          SAI_XSLOTR_NBSLOT_MASK,
-                          SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
+       stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX,
+                            SAI_XSLOTR_NBSLOT_MASK,
+                            SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
 
        /* Set default slots mask if not already set from DT */
        if (!(slotr & SAI_XSLOTR_SLOTEN_MASK)) {
                sai->slot_mask = (1 << sai->slots) - 1;
-               regmap_update_bits(sai->regmap,
-                                  STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
-                                  SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
+               stm32_sai_sub_reg_up(sai,
+                                    STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
+                                    SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
        }
 
        dev_dbg(cpu_dai->dev, "Slots %d, slot width %d\n",
@@ -870,14 +920,14 @@ static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai)
        dev_dbg(cpu_dai->dev, "Frame length %d, frame active %d\n",
                sai->fs_length, fs_active);
 
-       regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
 
        if ((sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LSB) {
                offset = sai->slot_width - sai->data_size;
 
-               regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
-                                  SAI_XSLOTR_FBOFF_MASK,
-                                  SAI_XSLOTR_FBOFF_SET(offset));
+               stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX,
+                                    SAI_XSLOTR_FBOFF_MASK,
+                                    SAI_XSLOTR_FBOFF_SET(offset));
        }
 }
 
@@ -994,9 +1044,9 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                                        return -EINVAL;
                                }
 
-                               regmap_update_bits(sai->regmap,
-                                                  STM_SAI_CR1_REGX,
-                                                  SAI_XCR1_OSR, cr1);
+                               stm32_sai_sub_reg_up(sai,
+                                                    STM_SAI_CR1_REGX,
+                                                    SAI_XCR1_OSR, cr1);
 
                                div = stm32_sai_get_clk_div(sai, sai_clk_rate,
                                                            sai->mclk_rate);
@@ -1058,12 +1108,12 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                dev_dbg(cpu_dai->dev, "Enable DMA and SAI\n");
 
-               regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                  SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
+               stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                    SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
 
                /* Enable SAI */
-               ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                        SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
+               ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                          SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
                if (ret < 0)
                        dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
                break;
@@ -1072,16 +1122,16 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
        case SNDRV_PCM_TRIGGER_STOP:
                dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n");
 
-               regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
-                                  SAI_XIMR_MASK, 0);
+               stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX,
+                                    SAI_XIMR_MASK, 0);
 
-               regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                  SAI_XCR1_SAIEN,
-                                  (unsigned int)~SAI_XCR1_SAIEN);
+               stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                    SAI_XCR1_SAIEN,
+                                    (unsigned int)~SAI_XCR1_SAIEN);
 
-               ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                        SAI_XCR1_DMAEN,
-                                        (unsigned int)~SAI_XCR1_DMAEN);
+               ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                          SAI_XCR1_DMAEN,
+                                          (unsigned int)~SAI_XCR1_DMAEN);
                if (ret < 0)
                        dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
 
@@ -1101,7 +1151,7 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        unsigned long flags;
 
-       regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
+       stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
        clk_disable_unprepare(sai->sai_ck);
 
@@ -1169,7 +1219,7 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
        cr1_mask |= SAI_XCR1_SYNCEN_MASK;
        cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync);
 
-       return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+       return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
 }
 
 static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = {
@@ -1322,8 +1372,13 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
        if (STM_SAI_HAS_PDM(sai) && STM_SAI_IS_SUB_A(sai))
                sai->regmap_config = &stm32_sai_sub_regmap_config_h7;
 
-       sai->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "sai_ck",
-                                               base, sai->regmap_config);
+       /*
+        * Do not manage peripheral clock through regmap framework as this
+        * can lead to circular locking issue with sai master clock provider.
+        * Manage peripheral clock directly in driver instead.
+        */
+       sai->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                                           sai->regmap_config);
        if (IS_ERR(sai->regmap)) {
                dev_err(&pdev->dev, "Failed to initialize MMIO\n");
                return PTR_ERR(sai->regmap);
@@ -1420,6 +1475,10 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
                return PTR_ERR(sai->sai_ck);
        }
 
+       ret = clk_prepare(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
        if (STM_SAI_IS_F4(sai->pdata))
                return 0;
 
@@ -1501,22 +1560,48 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int stm32_sai_sub_remove(struct platform_device *pdev)
+{
+       struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev);
+
+       clk_unprepare(sai->pdata->pclk);
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int stm32_sai_sub_suspend(struct device *dev)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
 
        regcache_cache_only(sai->regmap, true);
        regcache_mark_dirty(sai->regmap);
+
+       clk_disable(sai->pdata->pclk);
+
        return 0;
 }
 
 static int stm32_sai_sub_resume(struct device *dev)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
 
        regcache_cache_only(sai->regmap, false);
-       return regcache_sync(sai->regmap);
+       ret = regcache_sync(sai->regmap);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
 
@@ -1531,6 +1616,7 @@ static struct platform_driver stm32_sai_sub_driver = {
                .pm = &stm32_sai_sub_pm_ops,
        },
        .probe = stm32_sai_sub_probe,
+       .remove = stm32_sai_sub_remove,
 };
 
 module_platform_driver(stm32_sai_sub_driver);
index 3fd28ee01675edcad8442d9bb2599a7b9c730f8c..3769d9ce5dbef9f546bd93c11c628d1c5c6b5439 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 
  * @slave_config: dma slave channel runtime config pointer
  * @phys_addr: SPDIFRX registers physical base address
  * @lock: synchronization enabling lock
+ * @irq_lock: prevent race condition with IRQ on stream state
  * @cs: channel status buffer
  * @ub: user data buffer
  * @irq: SPDIFRX interrupt line
@@ -240,6 +240,7 @@ struct stm32_spdifrx_data {
        struct dma_slave_config slave_config;
        dma_addr_t phys_addr;
        spinlock_t lock;  /* Sync enabling lock */
+       spinlock_t irq_lock; /* Prevent race condition on stream state */
        unsigned char cs[SPDIFRX_CS_BYTES_NB];
        unsigned char ub[SPDIFRX_UB_BYTES_NB];
        int irq;
@@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx)
 static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
 {
        int cr, cr_mask, imr, ret;
+       unsigned long flags;
 
        /* Enable IRQs */
        imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE;
@@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
        if (ret)
                return ret;
 
-       spin_lock(&spdifrx->lock);
+       spin_lock_irqsave(&spdifrx->lock, flags);
 
        spdifrx->refcount++;
 
@@ -362,7 +364,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
                                "Failed to start synchronization\n");
        }
 
-       spin_unlock(&spdifrx->lock);
+       spin_unlock_irqrestore(&spdifrx->lock, flags);
 
        return ret;
 }
@@ -370,11 +372,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
 static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
 {
        int cr, cr_mask, reg;
+       unsigned long flags;
 
-       spin_lock(&spdifrx->lock);
+       spin_lock_irqsave(&spdifrx->lock, flags);
 
        if (--spdifrx->refcount) {
-               spin_unlock(&spdifrx->lock);
+               spin_unlock_irqrestore(&spdifrx->lock, flags);
                return;
        }
 
@@ -393,7 +396,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
        regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, &reg);
        regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, &reg);
 
-       spin_unlock(&spdifrx->lock);
+       spin_unlock_irqrestore(&spdifrx->lock, flags);
 }
 
 static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
@@ -480,8 +483,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
        memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB);
        memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB);
 
-       pinctrl_pm_select_default_state(&spdifrx->pdev->dev);
-
        ret = stm32_spdifrx_dma_ctrl_start(spdifrx);
        if (ret < 0)
                return ret;
@@ -513,7 +514,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
 
 end:
        clk_disable_unprepare(spdifrx->kclk);
-       pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev);
 
        return ret;
 }
@@ -665,7 +665,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = {
 static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
 {
        struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid;
-       struct snd_pcm_substream *substream = spdifrx->substream;
        struct platform_device *pdev = spdifrx->pdev;
        unsigned int cr, mask, sr, imr;
        unsigned int flags, sync_state;
@@ -745,14 +744,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
                        return IRQ_HANDLED;
                }
 
-               if (substream)
-                       snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+               spin_lock(&spdifrx->irq_lock);
+               if (spdifrx->substream)
+                       snd_pcm_stop(spdifrx->substream,
+                                    SNDRV_PCM_STATE_DISCONNECTED);
+               spin_unlock(&spdifrx->irq_lock);
 
                return IRQ_HANDLED;
        }
 
-       if (err_xrun && substream)
-               snd_pcm_stop_xrun(substream);
+       spin_lock(&spdifrx->irq_lock);
+       if (err_xrun && spdifrx->substream)
+               snd_pcm_stop_xrun(spdifrx->substream);
+       spin_unlock(&spdifrx->irq_lock);
 
        return IRQ_HANDLED;
 }
@@ -761,9 +765,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream,
                                 struct snd_soc_dai *cpu_dai)
 {
        struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&spdifrx->irq_lock, flags);
        spdifrx->substream = substream;
+       spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
 
        ret = clk_prepare_enable(spdifrx->kclk);
        if (ret)
@@ -839,8 +846,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream,
                                   struct snd_soc_dai *cpu_dai)
 {
        struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
 
+       spin_lock_irqsave(&spdifrx->irq_lock, flags);
        spdifrx->substream = NULL;
+       spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
+
        clk_disable_unprepare(spdifrx->kclk);
 }
 
@@ -944,6 +955,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
        spdifrx->pdev = pdev;
        init_completion(&spdifrx->cs_completion);
        spin_lock_init(&spdifrx->lock);
+       spin_lock_init(&spdifrx->irq_lock);
 
        platform_set_drvdata(pdev, spdifrx);
 
index 2991b9986f6653c2e741dfb97af2577b3b30416e..395403a2d33f82b81481ca2bdfb401f58454bfba 100644 (file)
@@ -145,6 +145,7 @@ struct snd_usb_substream {
        struct snd_usb_endpoint *sync_endpoint;
        unsigned long flags;
        bool need_setup_ep;             /* (re)configure EP at prepare? */
+       bool need_setup_fmt;            /* (re)configure fmt after resume? */
        unsigned int speed;             /* USB_SPEED_XXX */
 
        u64 formats;                    /* format bitmasks (all or'ed) */
index 9c8930bb00c8a5039804078f04bdedaf5853ab1a..0e4eab96e23e0fcc0cdf9213617cffd6914a2423 100644 (file)
@@ -370,7 +370,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
 add_sync_ep_from_ifnum:
        iface = usb_ifnum_to_if(dev, ifnum);
 
-       if (!iface || iface->num_altsetting == 0)
+       if (!iface || iface->num_altsetting < 2)
                return -EINVAL;
 
        alts = &iface->altsetting[1];
@@ -506,15 +506,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        if (WARN_ON(!iface))
                return -EINVAL;
        alts = usb_altnum_to_altsetting(iface, fmt->altsetting);
-       altsd = get_iface_desc(alts);
-       if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting))
+       if (WARN_ON(!alts))
                return -EINVAL;
+       altsd = get_iface_desc(alts);
 
-       if (fmt == subs->cur_audiofmt)
+       if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt)
                return 0;
 
        /* close the old interface */
-       if (subs->interface >= 0 && subs->interface != fmt->iface) {
+       if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) {
                if (!subs->stream->chip->keep_iface) {
                        err = usb_set_interface(subs->dev, subs->interface, 0);
                        if (err < 0) {
@@ -528,6 +528,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                subs->altset_idx = 0;
        }
 
+       if (subs->need_setup_fmt)
+               subs->need_setup_fmt = false;
+
        /* set interface */
        if (iface->cur_altsetting != alts) {
                err = snd_usb_select_mode_quirk(subs, fmt);
@@ -1728,6 +1731,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
                subs->data_endpoint->retire_data_urb = retire_playback_urb;
                subs->running = 0;
                return 0;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (subs->stream->chip->setup_fmt_after_resume_quirk) {
+                       stop_endpoints(subs, true);
+                       subs->need_setup_fmt = true;
+                       return 0;
+               }
+               break;
        }
 
        return -EINVAL;
@@ -1760,6 +1770,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
                subs->data_endpoint->retire_data_urb = retire_capture_urb;
                subs->running = 1;
                return 0;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (subs->stream->chip->setup_fmt_after_resume_quirk) {
+                       stop_endpoints(subs, true);
+                       subs->need_setup_fmt = true;
+                       return 0;
+               }
+               break;
        }
 
        return -EINVAL;
index 70c338f3ae241b278255e37775d34e216b1de4e2..d187aa6d50db0cd1f810495cdc84318019f61f16 100644 (file)
@@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                .vendor_name = "Dell",
                .product_name = "WD19 Dock",
                .profile_name = "Dell-WD15-Dock",
-               .ifnum = QUIRK_NO_INTERFACE
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_SETUP_FMT_AFTER_RESUME
        }
 },
 /* MOTU Microbook II */
index 349e1e52996d8924c812f0177998d45fee970950..82184036437b50d203e9d1581b35cd760cd63bda 100644 (file)
@@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
        return snd_usb_create_mixer(chip, quirk->ifnum, 0);
 }
 
+
+static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip,
+                                      struct usb_interface *iface,
+                                      struct usb_driver *driver,
+                                      const struct snd_usb_audio_quirk *quirk)
+{
+       chip->setup_fmt_after_resume_quirk = 1;
+       return 1;       /* Continue with creating streams and mixer */
+}
+
 /*
  * audio-interface quirks
  *
@@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
                [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
                [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
                [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
+               [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk,
        };
 
        if (quirk->type < QUIRK_TYPE_COUNT) {
@@ -1386,6 +1397,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+       case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
        case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
        case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
index ff3cbf653de80df519891866969db4615a27e70e..6fe3ab582ec6ac31fe5c402a82c43e9db4b80b15 100644 (file)
@@ -33,7 +33,7 @@ struct snd_usb_audio {
        wait_queue_head_t shutdown_wait;
        unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
        unsigned int tx_length_quirk:1; /* Put length specifier in transfers */
-       
+       unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */
        int num_interfaces;
        int num_suspended_intf;
        int sample_rate_read_error;
@@ -98,6 +98,7 @@ enum quirk_type {
        QUIRK_AUDIO_EDIROL_UAXX,
        QUIRK_AUDIO_ALIGN_TRANSFER,
        QUIRK_AUDIO_STANDARD_MIXER,
+       QUIRK_SETUP_FMT_AFTER_RESUME,
 
        QUIRK_TYPE_COUNT
 };
index d66131f696892065240e225e2c86d44278ea4e6e..397e5716ab6d869f51496d29bd218f1a30eadf10 100644 (file)
@@ -26,7 +26,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
                           bool is_plain_text)
 {
        if (is_plain_text)
-               jsonw_printf(jw, "%p", data);
+               jsonw_printf(jw, "%p", *(void **)data);
        else
                jsonw_printf(jw, "%lu", *(unsigned long *)data);
 }
index defae23a0169d8d4b653ab7305c1f3bf3c664742..97830e46d1a05e182b16b1baf049073638aa1ebf 100644 (file)
@@ -138,6 +138,7 @@ STATIC_OBJDIR       := $(OUTPUT)staticobjs/
 BPF_IN_SHARED  := $(SHARED_OBJDIR)libbpf-in.o
 BPF_IN_STATIC  := $(STATIC_OBJDIR)libbpf-in.o
 VERSION_SCRIPT := libbpf.map
+BPF_HELPER_DEFS        := $(OUTPUT)bpf_helper_defs.h
 
 LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
 LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
@@ -159,7 +160,7 @@ all: fixdep
 
 all_cmd: $(CMD_TARGETS) check
 
-$(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
+$(BPF_IN_SHARED): force elfdep bpfdep $(BPF_HELPER_DEFS)
        @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
        (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -177,12 +178,12 @@ $(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
        $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
 
-$(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h
+$(BPF_IN_STATIC): force elfdep bpfdep $(BPF_HELPER_DEFS)
        $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
 
-bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h
+$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
        $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header              \
-               --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h
+               --file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS)
 
 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
@@ -243,7 +244,7 @@ install_lib: all_cmd
                $(call do_install_mkdir,$(libdir_SQ)); \
                cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
 
-install_headers: bpf_helper_defs.h
+install_headers: $(BPF_HELPER_DEFS)
        $(call QUIET_INSTALL, headers) \
                $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
                $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
@@ -251,7 +252,7 @@ install_headers: bpf_helper_defs.h
                $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
                $(call do_install,xsk.h,$(prefix)/include/bpf,644); \
                $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
-               $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
+               $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \
                $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
                $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
                $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
@@ -271,7 +272,7 @@ config-clean:
 clean:
        $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
                *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
-               *.pc LIBBPF-CFLAGS bpf_helper_defs.h \
+               *.pc LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
                $(SHARED_OBJDIR) $(STATIC_OBJDIR)
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
index f3cbf86e51acf6e46ce70423e76b767798248754..20eed719542e5fcb9388d33cb93635db11c30f5e 100644 (file)
@@ -1228,8 +1228,10 @@ filter_event(struct tep_event_filter *filter, struct tep_event *event,
        }
 
        filter_type = add_filter_type(filter, event->id);
-       if (filter_type == NULL)
+       if (filter_type == NULL) {
+               free_arg(arg);
                return TEP_ERRNO__MEM_ALLOC_FAILED;
+       }
 
        if (filter_type->filter)
                free_arg(filter_type->filter);
index 387311c6726407eed80ccd4dc013e1fb9a330338..de988589d99bd5f93d07f18ce39d85ed8387a240 100644 (file)
@@ -1076,6 +1076,7 @@ int cmd_report(int argc, const char **argv)
        struct stat st;
        bool has_br_stack = false;
        int branch_mode = -1;
+       int last_key = 0;
        bool branch_call_mode = false;
 #define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function,percent"
        static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
@@ -1450,7 +1451,8 @@ int cmd_report(int argc, const char **argv)
                sort_order = sort_tmp;
        }
 
-       if (setup_sorting(session->evlist) < 0) {
+       if ((last_key != K_SWITCH_INPUT_DATA) &&
+           (setup_sorting(session->evlist) < 0)) {
                if (sort_order)
                        parse_options_usage(report_usage, options, "s", 1);
                if (field_order)
@@ -1530,6 +1532,7 @@ int cmd_report(int argc, const char **argv)
        ret = __cmd_report(&report);
        if (ret == K_SWITCH_INPUT_DATA) {
                perf_session__delete(session);
+               last_key = K_SWITCH_INPUT_DATA;
                goto repeat;
        } else
                ret = 0;
index 45286900aacbff35c0ecb2d333f38ab0b81f3eb3..0aa63aeb58ecf048e67401846986000b27b12f34 100644 (file)
@@ -339,10 +339,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
        list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
 
 #define hists__for_each_format(hists, format) \
-       perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
+       perf_hpp_list__for_each_format((hists)->hpp_list, format)
 
 #define hists__for_each_sort_list(hists, format) \
-       perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
+       perf_hpp_list__for_each_sort_list((hists)->hpp_list, format)
 
 extern struct perf_hpp_fmt perf_hpp__format[];
 
index 6658fbf196e6a36b578ddd1a6245c00456fd36aa..1965aefccb022b98f8362da8b03815a07d5c250c 100644 (file)
@@ -920,6 +920,9 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
                if (curr_map == NULL)
                        return -1;
 
+               if (curr_dso->kernel)
+                       map__kmap(curr_map)->kmaps = kmaps;
+
                if (adjust_kernel_syms) {
                        curr_map->start  = shdr->sh_addr + ref_reloc(kmap);
                        curr_map->end    = curr_map->start + shdr->sh_size;
index efe06d6219837c076f37924374ec18ecef0b7632..e59eb9e7f9236b5dc872b270b21b072b4886b6ef 100755 (executable)
@@ -31,15 +31,12 @@ class KunitStatus(Enum):
        TEST_FAILURE = auto()
 
 def create_default_kunitconfig():
-       if not os.path.exists(kunit_kernel.KUNITCONFIG_PATH):
+       if not os.path.exists(kunit_kernel.kunitconfig_path):
                shutil.copyfile('arch/um/configs/kunit_defconfig',
-                               kunit_kernel.KUNITCONFIG_PATH)
+                               kunit_kernel.kunitconfig_path)
 
 def run_tests(linux: kunit_kernel.LinuxSourceTree,
              request: KunitRequest) -> KunitResult:
-       if request.defconfig:
-               create_default_kunitconfig()
-
        config_start = time.time()
        success = linux.build_reconfig(request.build_dir)
        config_end = time.time()
@@ -108,15 +105,22 @@ def main(argv, linux=None):
        run_parser.add_argument('--build_dir',
                                help='As in the make command, it specifies the build '
                                'directory.',
-                               type=str, default=None, metavar='build_dir')
+                               type=str, default='', metavar='build_dir')
 
        run_parser.add_argument('--defconfig',
-                               help='Uses a default kunitconfig.',
+                               help='Uses a default .kunitconfig.',
                                action='store_true')
 
        cli_args = parser.parse_args(argv)
 
        if cli_args.subcommand == 'run':
+               if cli_args.build_dir:
+                       if not os.path.exists(cli_args.build_dir):
+                               os.mkdir(cli_args.build_dir)
+                       kunit_kernel.kunitconfig_path = os.path.join(
+                               cli_args.build_dir,
+                               kunit_kernel.kunitconfig_path)
+
                if cli_args.defconfig:
                        create_default_kunitconfig()
 
index bf3876835331327c7525fb3727647bd9070ec0b9..cc5d844ecca13bfe57f69f13b9c3180834e90a79 100644 (file)
@@ -14,7 +14,7 @@ import os
 import kunit_config
 
 KCONFIG_PATH = '.config'
-KUNITCONFIG_PATH = 'kunitconfig'
+kunitconfig_path = '.kunitconfig'
 
 class ConfigError(Exception):
        """Represents an error trying to configure the Linux kernel."""
@@ -82,7 +82,7 @@ class LinuxSourceTree(object):
 
        def __init__(self):
                self._kconfig = kunit_config.Kconfig()
-               self._kconfig.read_from_file(KUNITCONFIG_PATH)
+               self._kconfig.read_from_file(kunitconfig_path)
                self._ops = LinuxSourceTreeOperations()
 
        def clean(self):
@@ -111,7 +111,7 @@ class LinuxSourceTree(object):
                return True
 
        def build_reconfig(self, build_dir):
-               """Creates a new .config if it is not a subset of the kunitconfig."""
+               """Creates a new .config if it is not a subset of the .kunitconfig."""
                kconfig_path = get_kconfig_path(build_dir)
                if os.path.exists(kconfig_path):
                        existing_kconfig = kunit_config.Kconfig()
@@ -140,10 +140,10 @@ class LinuxSourceTree(object):
                        return False
                return True
 
-       def run_kernel(self, args=[], timeout=None, build_dir=None):
+       def run_kernel(self, args=[], timeout=None, build_dir=''):
                args.extend(['mem=256M'])
                process = self._ops.linux_bin(args, timeout, build_dir)
-               with open('test.log', 'w') as f:
+               with open(os.path.join(build_dir, 'test.log'), 'w') as f:
                        for line in process.stdout:
                                f.write(line.rstrip().decode('ascii') + '\n')
                                yield line.rstrip().decode('ascii')
index a2a8ea6beae34509aa7c08bb95ba71aef3b6c027..cba97756ac4a5479f6608dfa28062e2afcb56ae7 100755 (executable)
@@ -174,6 +174,7 @@ class KUnitMainTest(unittest.TestCase):
                kunit.main(['run'], self.linux_source_mock)
                assert self.linux_source_mock.build_reconfig.call_count == 1
                assert self.linux_source_mock.run_kernel.call_count == 1
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300)
                self.print_mock.assert_any_call(StrContains('Testing complete.'))
 
        def test_run_passes_args_fail(self):
@@ -199,7 +200,14 @@ class KUnitMainTest(unittest.TestCase):
                timeout = 3453
                kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
                assert self.linux_source_mock.build_reconfig.call_count == 1
-               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=None, timeout=timeout)
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout)
+               self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+       def test_run_builddir(self):
+               build_dir = '.kunit'
+               kunit.main(['run', '--build_dir', build_dir], self.linux_source_mock)
+               assert self.linux_source_mock.build_reconfig.call_count == 1
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
                self.print_mock.assert_any_call(StrContains('Testing complete.'))
 
 if __name__ == '__main__':
index 419652458da4ab1d7d855e430c502c7b55d82cd7..1ff0a9f49c01ca82ea14678fba4b476202557da9 100644 (file)
@@ -40,3 +40,4 @@ xdping
 test_cpp
 /no_alu32
 /bpf_gcc
+bpf_helper_defs.h
index e0fe01d9ec33b42be030487b8f930c534b023879..e2fd6f8d579cdd989c602340a93c97f9600cc780 100644 (file)
@@ -120,9 +120,9 @@ force:
 $(BPFOBJ): force
        $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
 
-BPF_HELPERS := $(BPFDIR)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
-$(BPFDIR)/bpf_helper_defs.h:
-       $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ bpf_helper_defs.h
+BPF_HELPERS := $(OUTPUT)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
+$(OUTPUT)/bpf_helper_defs.h:
+       $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ $(OUTPUT)/bpf_helper_defs.h
 
 # Get Clang's default includes on this system, as opposed to those seen by
 # '-target bpf'. This fixes "missing" files on some architectures/distros,
index 47315fe48d5af51aa0593b0e0546dfff142976ba..24dd8ed485802423be0279166d6735b33f261d71 100755 (executable)
@@ -232,7 +232,7 @@ test_mc_aware()
        stop_traffic
        local ucth1=${uc_rate[1]}
 
-       start_traffic $h1 own bc bc
+       start_traffic $h1 192.0.2.65 bc bc
 
        local d0=$(date +%s)
        local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
@@ -254,7 +254,11 @@ test_mc_aware()
                        ret = 100 * ($ucth1 - $ucth2) / $ucth1
                        if (ret > 0) { ret } else { 0 }
                    ")
-       check_err $(bc <<< "$deg > 25")
+
+       # Minimum shaper of 200Mbps on MC TCs should cause about 20% of
+       # degradation on 1Gbps link.
+       check_err $(bc <<< "$deg < 15") "Minimum shaper not in effect"
+       check_err $(bc <<< "$deg > 25") "MC traffic degrades UC performance too much"
 
        local interval=$((d1 - d0))
        local mc_ir=$(rate $u0 $u1 $interval)
index e62f3d4f68da484405a5ae869ef9a40b500c7b46..78ae4aaf7141ad285847b94adf52c87229208520 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 CFLAGS += -I../../../../../usr/include/
-LDFLAGS += -lpthread
+LDLIBS += -lpthread
 TEST_GEN_PROGS := epoll_wakeup_test
 
 include ../../lib.mk
index b879305a766d07f0f76f5ae5cfda4eb7f5ef0ef4..5b8c0fedee7613b77456409517bcdea1b76707c3 100755 (executable)
@@ -34,6 +34,12 @@ test_modprobe()
 
 check_mods()
 {
+       local uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo "skip all tests: must be run as root" >&2
+               exit $ksft_skip
+       fi
+
        trap "test_modprobe" EXIT
        if [ ! -d $DIR ]; then
                modprobe test_firmware
index 31eb09e38729520926ab784648af744d9d172eb1..a6e3d5517a6fa0378b164bbd41e2c8f754602f00 100644 (file)
@@ -7,6 +7,9 @@
 MAX_RETRIES=600
 RETRY_INTERVAL=".1"    # seconds
 
+# Kselftest framework requirement - SKIP code is 4
+ksft_skip=4
+
 # log(msg) - write message to kernel log
 #      msg - insightful words
 function log() {
@@ -18,7 +21,16 @@ function log() {
 function skip() {
        log "SKIP: $1"
        echo "SKIP: $1" >&2
-       exit 4
+       exit $ksft_skip
+}
+
+# root test
+function is_root() {
+       uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo "skip all tests: must be run as root" >&2
+               exit $ksft_skip
+       fi
 }
 
 # die(msg) - game over, man
@@ -62,6 +74,7 @@ function set_ftrace_enabled() {
 #               for verbose livepatching output and turn on
 #               the ftrace_enabled sysctl.
 function setup_config() {
+       is_root
        push_config
        set_dynamic_debug
        set_ftrace_enabled 1
index dc2908c22c265fc0f3c4b8b4022da94c839a1a03..a082127081157de7f04a1f91e29d4f43b795d54b 100755 (executable)
@@ -8,8 +8,7 @@ MOD_LIVEPATCH=test_klp_state
 MOD_LIVEPATCH2=test_klp_state2
 MOD_LIVEPATCH3=test_klp_state3
 
-set_dynamic_debug
-
+setup_config
 
 # TEST: Loading and removing a module that modifies the system state
 
index 6e4626ae71b0caf2d8ec6d3c3bfb02f5892fa69b..8f4057310b5b41a2c6b6bf52c448efdfaa4391e5 100755 (executable)
@@ -1,6 +1,9 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 ALL_TESTS="loopback_test"
 NUM_NETIFS=2
 source tc_common.sh
@@ -72,6 +75,11 @@ setup_prepare()
 
        h1_create
        h2_create
+
+       if ethtool -k $h1 | grep loopback | grep -q fixed; then
+               log_test "SKIP: dev $h1 does not support loopback feature"
+               exit $ksft_skip
+       fi
 }
 
 cleanup()
index 16571ac1dab40267fc2141f924839aa671784d1d..d3e0809ab3681fa40cff983c7e48be9d6643eca6 100755 (executable)
@@ -226,17 +226,19 @@ check_transfer()
        return 0
 }
 
-test_tcp_forwarding()
+test_tcp_forwarding_ip()
 {
        local nsa=$1
        local nsb=$2
+       local dstip=$3
+       local dstport=$4
        local lret=0
 
        ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" &
        lpid=$!
 
        sleep 1
-       ip netns exec $nsa nc -w 4 10.0.2.99 12345 < "$ns1in" > "$ns1out" &
+       ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" &
        cpid=$!
 
        sleep 3
@@ -258,6 +260,28 @@ test_tcp_forwarding()
        return $lret
 }
 
+test_tcp_forwarding()
+{
+       test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+
+       return $?
+}
+
+test_tcp_forwarding_nat()
+{
+       local lret
+
+       test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+       lret=$?
+
+       if [ $lret -eq 0 ] ; then
+               test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
+               lret=$?
+       fi
+
+       return $lret
+}
+
 make_file "$ns1in" "ns1"
 make_file "$ns2in" "ns2"
 
@@ -283,14 +307,19 @@ ip -net ns2 route add 192.168.10.1 via 10.0.2.1
 # Same, but with NAT enabled.
 ip netns exec nsr1 nft -f - <<EOF
 table ip nat {
+   chain prerouting {
+      type nat hook prerouting priority 0; policy accept;
+      meta iif "veth0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345
+   }
+
    chain postrouting {
       type nat hook postrouting priority 0; policy accept;
-      meta oifname "veth1" masquerade
+      meta oifname "veth1" counter masquerade
    }
 }
 EOF
 
-test_tcp_forwarding ns1 ns2
+test_tcp_forwarding_nat ns1 ns2
 
 if [ $? -eq 0 ] ;then
        echo "PASS: flow offloaded for ns1/ns2 with NAT"
@@ -313,7 +342,7 @@ fi
 ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
 ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
 
-test_tcp_forwarding ns1 ns2
+test_tcp_forwarding_nat ns1 ns2
 if [ $? -eq 0 ] ;then
        echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
 else
index eec2663261f2ac8bfc09085049ea0d908f8851c5..e8a657a5f48a04db13532048daa64a454e8cc6d4 100644 (file)
@@ -15,7 +15,7 @@
 #include <errno.h>
 #include <stddef.h>
 
-static inline pid_t gettid(void)
+static inline pid_t rseq_gettid(void)
 {
        return syscall(__NR_gettid);
 }
@@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg)
                rseq_percpu_unlock(&data->lock, cpu);
 #ifndef BENCHMARK
                if (i != 0 && !(i % (reps / 10)))
-                       printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+                       printf_verbose("tid %d: count %lld\n",
+                                      (int) rseq_gettid(), i);
 #endif
        }
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && thread_data->reg &&
            rseq_unregister_current_thread())
                abort();
@@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg)
                } while (rseq_unlikely(ret));
 #ifndef BENCHMARK
                if (i != 0 && !(i % (reps / 10)))
-                       printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+                       printf_verbose("tid %d: count %lld\n",
+                                      (int) rseq_gettid(), i);
 #endif
        }
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && thread_data->reg &&
            rseq_unregister_current_thread())
                abort();
@@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
@@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
@@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
index d40d60e7499e8aa2f814f7282092692db84fcf73..3f63eb362b92fbd709332043c862f7d67390323d 100644 (file)
@@ -149,11 +149,13 @@ static inline void rseq_clear_rseq_cs(void)
 /*
  * rseq_prepare_unload() should be invoked by each thread executing a rseq
  * critical section at least once between their last critical section and
- * library unload of the library defining the rseq critical section
- * (struct rseq_cs). This also applies to use of rseq in code generated by
- * JIT: rseq_prepare_unload() should be invoked at least once by each
- * thread executing a rseq critical section before reclaim of the memory
- * holding the struct rseq_cs.
+ * library unload of the library defining the rseq critical section (struct
+ * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
+ * post_commit_offset fields. This also applies to use of rseq in code
+ * generated by JIT: rseq_prepare_unload() should be invoked at least once by
+ * each thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs or reclaim of the code pointed to by struct
+ * rseq_cs start_ip and post_commit_offset fields.
  */
 static inline void rseq_prepare_unload(void)
 {
diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
new file mode 100644 (file)
index 0000000..e7b9417
--- /dev/null
@@ -0,0 +1 @@
+timeout=0
index 6944b898bb530929f8436873b9af395cd81e20cd..ee1b727ede045dfd920bf319d3729efd217ef137 100644 (file)
@@ -3158,7 +3158,18 @@ TEST(user_notification_basic)
        EXPECT_GT(poll(&pollfd, 1, -1), 0);
        EXPECT_EQ(pollfd.revents, POLLIN);
 
-       EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+       /* Test that we can't pass garbage to the kernel. */
+       memset(&req, 0, sizeof(req));
+       req.pid = -1;
+       errno = 0;
+       ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
+       EXPECT_EQ(-1, ret);
+       EXPECT_EQ(EINVAL, errno);
+
+       if (ret) {
+               req.pid = 0;
+               EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+       }
 
        pollfd.fd = listener;
        pollfd.events = POLLIN | POLLOUT;
@@ -3278,6 +3289,7 @@ TEST(user_notification_signal)
 
        close(sk_pair[1]);
 
+       memset(&req, 0, sizeof(req));
        EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
 
        EXPECT_EQ(kill(pid, SIGUSR1), 0);
@@ -3296,6 +3308,7 @@ TEST(user_notification_signal)
        EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
        EXPECT_EQ(errno, ENOENT);
 
+       memset(&req, 0, sizeof(req));
        EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
 
        resp.id = req.id;
index 0aad760fcd8c3704a6445e1a5218082d3d069063..2bbac73e6477790795a8ad56c2da524cabcda9e3 100755 (executable)
@@ -128,7 +128,7 @@ parse() {
                        str="${ftype} ${name} ${location} ${str}"
                        ;;
                "nod")
-                       local dev=`LC_ALL=C ls -l "${location}"`
+                       local dev="`LC_ALL=C ls -l "${location}"`"
                        local maj=`field 5 ${dev}`
                        local min=`field 6 ${dev}`
                        maj=${maj%,}